query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test that read_mapping_file can read more than one molecule.
def test_read_mapping_file_multiple(reference_multi): content, reference = reference_multi from_names = list(reference.keys()) to_names = [] block_names = [] for k in reference: to_names.extend(reference[k].keys()) for to in reference[k]: block_names.extend(reference[k][to].keys()) force_fields = case_to_dummy_ffs(from_names + to_names, block_names, {(0, 'X1'): [(0, 'A')], (0, 'X2'): [(0, 'B')], (0, 'X3'): [(0, 'D')]}, {(0, 'A'): {(0, 'X1'): 1.0}, (0, 'B'): {(0, 'X2'): 1.0}, (0, 'C'): {(0, 'X2'): 1.0}, (0, 'D'): {(0, 'X3'): 1.0}}, []) mappings = vermouth.map_input.read_backmapping_file(content, force_fields) compare_old_new_mappings(mappings, reference)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_mapping_file(case):\n reference = collections.defaultdict(lambda: collections.defaultdict(dict))\n for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff):\n reference[from_ff][to_ff][case.name] = (\n case.mapping, case.weights, case.extra\n )\n\n ffs = case_to_dummy_ffs(case.from_ff + case.to_ff, [case.name], case.mapping,\n case.weights, case.extra)\n\n reference = vermouth.map_input._default_to_dict(reference)\n\n mappings = vermouth.map_input.read_backmapping_file(\n ['[ molecule ]'] + case.string.split('\\n'),\n ffs\n )\n compare_old_new_mappings(mappings, reference)", "def test_mapping(self):\n\n # Input PatternDS has constraints fake, file and pattern.\n # Use fake from first input as animal constraint.\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/a/new/pattern/%animal%/%file%/%pattern%.file',\n 'echo', map_dict={'animal': ('fake', 0)})\n output = the_process_unit.execute(simulate=True)\n\n all_files = [thing for thing in output.files]\n\n self.assertEqual(len(all_files), 1)\n self.assertEqual(all_files[0].full_path, '/a/new/pattern/fake_1/file_1/pattern_1.file')", "def test_tb_full_mapping_iter_02():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n\n files = [\n gem_file,\n fastq_file_2\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm2 = tbFullMappingTool()\n tfm2_files, tfm2_meta = tfm2.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_2_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_2_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_2_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_2_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_tb_full_mapping_iter_01():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n\n files = [\n gem_file,\n fastq_file_1\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm1 = tbFullMappingTool()\n tfm1_files, tfm1_meta = tfm1.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_1_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_1_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_1_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_1_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_read_mapping_directory(ref_mapping_directory):\n dirpath, ref_mappings = ref_mapping_directory\n from_names = list(ref_mappings.keys())\n to_names = []\n block_names = []\n mapping = {}\n weights = {}\n\n\n for k in ref_mappings:\n to_names.extend(ref_mappings[k].keys())\n for to in ref_mappings[k]:\n block_names.extend(ref_mappings[k][to].keys())\n for block_name in ref_mappings[k][to]:\n m, w, _ = ref_mappings[k][to][block_name]\n mapping.update(m)\n weights.update(w)\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n mapping, weights, [])\n\n\n mappings = vermouth.map_input.read_mapping_directory(dirpath, force_fields)\n compare_old_new_mappings(mappings, ref_mappings)", "def test_read_mapping_errors(content):\n with pytest.raises(IOError):\n vermouth.map_input._read_mapping_partial(content.split('\\n'), 1)", "def test_from_mapped_smiles(self):\n\n # there should be no undefined sterochmeistry error when making the molecule\n mol = Molecule.from_mapped_smiles(\n \"[H:14][c:1]1[c:3]([c:7]([c:11]([c:8]([c:4]1[H:17])[H:21])[C:13]([H:24])([H:25])[c:12]2[c:9]([c:5]([c:2]([c:6]([c:10]2[H:23])[H:19])[H:15])[H:18])[H:22])[H:20])[H:16]\"\n )\n assert mol.n_atoms == 25\n # make sure the atom map is not exposed\n with pytest.raises(KeyError):\n mapping = mol._properties[\"atom_map\"]", "def test_from_mapped_smiles(self):\n\n # there should be no undefined sterochmeistry error when making the molecule\n mol = Molecule.from_mapped_smiles(\n \"[H:14][c:1]1[c:3]([c:7]([c:11]([c:8]([c:4]1[H:17])[H:21])[C:13]([H:24])\"\n \"([H:25])[c:12]2[c:9]([c:5]([c:2]([c:6]([c:10]2[H:23])[H:19])[H:15])[H:18])[H:22])[H:20])[H:16]\"\n )\n assert mol.n_atoms == 25\n # make sure the atom map is not exposed\n with pytest.raises(KeyError):\n mol._properties[\"atom_map\"]", "def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)", "def test_read_multiple(self):\n meshes = stlreader.get_data(self.stl_multi_file)\n for name, vertices, polygons in meshes:\n self.assertEqual(name, \"{}#{}\".format(os.path.basename(self.stl_multi_file), 0))\n self.assertTrue(len(vertices) > 0)\n self.assertTrue(len(polygons) > 0)\n polygon_ids = list()\n for a, b, c in polygons.itervalues():\n polygon_ids += [a, b, c]\n self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))", "def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)", "def test_check_map_single_sample(self):\r\n\r\n header, mapping_data = check_map(\r\n valid_mapping_data_no_bcs_no_added_demultiplex,\r\n barcode_type=0)\r\n\r\n expected_header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n expected_mapping_data =\\\r\n [['s1', '', '', 's1_description']]\r\n\r\n self.assertEquals(header, expected_header)\r\n self.assertEquals(mapping_data, expected_mapping_data)", "def test_molecule_loop(self):\n\n # Loop over the molecules.\n for mol in mol_res_spin.molecule_loop('#RNA'):\n # Test the molecule name.\n self.assertEqual(mol.name, 'RNA')\n\n # Test loop length.\n self.assertEqual(len(list(mol_res_spin.molecule_loop('#RNA'))), 1)", "def test_check_mapping_data_dups(self):\r\n\r\n mapping_data = ['Sample3\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n self.assertRaises(ValueError, check_mapping_data, mapping_data,\r\n headers, filename_column)", "def test_check_mapping_data_dups(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile2\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n self.assertRaises(ValueError, check_mapping_data, mapping_data,\r\n headers, filename_column)", "def test_store_mapping(self):\r\n\r\n expected = [\"1:\\t0\\t2\\t5\\t6\\n\",\r\n \"3:\\n\",\r\n \"4:\\n\",\r\n \"8:\\t7\\n\"]\r\n\r\n self.files_to_remove.append(\"/tmp/test_store_mapping_mapping.txt\")\r\n store_mapping(self.mapping, \"/tmp/\", prefix=\"test_store_mapping\")\r\n observed = list(open(\"/tmp/test_store_mapping_mapping.txt\", \"U\"))\r\n self.assertItemsEqual(observed, expected)", "def test_combine_mappings(self):\r\n\r\n self.tmp_dir = mkdtemp(dir=\"./\", suffix=\"/\")\r\n\r\n combine_mappings(\r\n fasta,\r\n denoiser_mapping,\r\n denoised_seqs,\r\n otu_picker_map,\r\n self.tmp_dir)\r\n\r\n observed_otu_map = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_otu_map.txt\")))\r\n\r\n expected_otu_map = \"\"\"1:\\tS1_1\\tS1_2\\tS2_4\\tS2_5\r\n2:\\tS2_3\\tS1_6\r\n\"\"\"\r\n self.assertEqual(observed_otu_map, expected_otu_map)\r\n\r\n observed_fasta = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_all.fasta\")))\r\n expected_fasta = \"\"\">S1_1 Read1\r\nAAA\r\n>S1_2 Read2\r\nTTT\r\n>S2_3 Read3\r\nGGG\r\n\"\"\"\r\n self.assertEqual(observed_fasta, expected_fasta)", "def test_check_map(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])", "def test_conceptmap_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"cm-address-use-v3.json\"\n inst = conceptmap.ConceptMap.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ConceptMap\" == inst.resource_type\n\n impl_conceptmap_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ConceptMap\" == data[\"resourceType\"]\n\n inst2 = conceptmap.ConceptMap(**data)\n impl_conceptmap_1(inst2)", "def test_get_interesting_mapping_fields(self):\r\n # all columns are completely unique\r\n d = parse_mapping_file(self.mapping_f1)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # all columns are completely identical\r\n d = parse_mapping_file(self.mapping_f2)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # some columns retained\r\n d = parse_mapping_file(self.mapping_f3)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = ['Something', 'days_since_epoch']\r\n self.assertEqual(actual, expected)", "def test_format_mapping_file(self):\r\n headers = ['SampleID', 'col1', 'col0', 'Description']\r\n samples =\\\r\n [['bsample', 'v1_3', 'v0_3', 'd1'],\r\n ['asample', 'aval', 'another', 'd2']]\r\n comments = ['this goes after headers', 'this too']\r\n self.assertEqual(format_mapping_file(headers, samples, comments),\r\n example_mapping_file)\r\n # need file or stringIO for roundtrip test\r\n # roundtrip = parse_mapping_file(format_mapping_file(headers,samples,comments))\r\n # self.assertEqual(roundtrip, [headers,samples,comments])\r", "def test_filter_mapping_file(self):\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\\\n ['a','b','c','d','e','f']), (self.map_headers, self.map_data))\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers, ['a']),\n (['SampleID','Description'],['a\\tx'.split('\\t')]))", "def test_create_replicated_mapping_file(self):\r\n # 3 replicates, with two extra samples in the mapping file.\r\n obs = qiime.simsam.create_replicated_mapping_file(self.map_f, 3,\r\n self.otu_table.SampleIds)\r\n self.assertEqual(obs, exp_rep_map_lines)\r\n\r\n # Must specify at least one replicate.\r\n self.assertRaises(ValueError,\r\n qiime.simsam.create_replicated_mapping_file, self.map_f, 0,\r\n self.otu_table.SampleIds)", "def test_filter_mapping_file(self):\r\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\r\n ['a', 'b', 'c', 'd', 'e', 'f']), (self.map_headers, self.map_data))\r\n self.assertEqual(\r\n filter_mapping_file(self.map_data, self.map_headers, ['a']),\r\n (['SampleID', 'Description'], ['a\\tx'.split('\\t')]))", "def test_parse_mapping_file_handles_file_handle(self):\r\n fd, fp = mkstemp(prefix='test_parse_mapping_file',\r\n suffix='.txt')\r\n close(fd)\r\n self.files_to_remove.append(fp)\r\n open(fp, 'w').write('\\n'.join(['#sample\\ta\\tb',\r\n '#comment line to skip',\r\n 'x \\t y \\t z ', ' ',\r\n '#more skip',\r\n 'i\\tj\\tk']))\r\n obs = parse_mapping_file(open(fp))\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n self.assertEqual(obs, exp)", "def test_sample_ids_from_metadata_description(self):\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"Treatment:Foo\")\n self.tutorial_mapping_f.seek(0)\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"DOB:!20061218,!20070314,!20071112,\"\n \"!20080116\")", "def test_conceptmap_6(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"cm-contact-point-use-v2.json\"\n inst = conceptmap.ConceptMap.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ConceptMap\" == inst.resource_type\n\n impl_conceptmap_6(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ConceptMap\" == data[\"resourceType\"]\n\n inst2 = conceptmap.ConceptMap(**data)\n impl_conceptmap_6(inst2)", "def test_check_mapping_data_valid_data(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n expected_data = {'File3': 'Sample3',\r\n 'File2': 'Sample2',\r\n 'File1': 'Sample1'}\r\n\r\n actual_data = check_mapping_data(\r\n mapping_data,\r\n headers,\r\n filename_column)\r\n\r\n self.assertEqual(actual_data, expected_data)", "def test_read_mapping_partial(case):\n full_mapping = vermouth.map_input._read_mapping_partial(case.string.split('\\n'), 1)\n name, from_ff, to_ff, mapping, weights, extra, _ = full_mapping\n assert name == case.name\n assert from_ff == case.from_ff\n assert to_ff == case.to_ff\n assert mapping == case.mapping\n assert extra == case.extra\n assert weights == case.weights", "def test_check_map(self):\r\n\r\n header, mapping_data = check_map(self.valid_mapping_data_golay)\r\n\r\n expected_header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n expected_mapping_data =\\\r\n [['s1', 'AACTCGTCGATG', 'ATTCGATART', 's1_description'],\r\n ['s2', 'agcAGCACTTGT', 'ATTCGATART', 's2_description'],\r\n ['s3', 'ACCGCAGAGTCA', 'YATGCTGCCTCCCGTAGGAGT', 's3_description']]\r\n\r\n self.assertEquals(header, expected_header)\r\n self.assertEquals(mapping_data, expected_mapping_data)" ]
[ "0.67420113", "0.6703428", "0.6658432", "0.6653267", "0.6620562", "0.6576531", "0.65403", "0.6393296", "0.6315044", "0.6295086", "0.6235337", "0.6168162", "0.61647826", "0.61638695", "0.6154931", "0.6074728", "0.6069374", "0.60642123", "0.605357", "0.5964693", "0.59500426", "0.59455866", "0.592864", "0.592253", "0.5920485", "0.5906319", "0.5900412", "0.5885617", "0.5877605", "0.58436507" ]
0.7212817
0
Build a file tree with mapping files.
def ref_mapping_directory(tmpdir_factory): basedir = tmpdir_factory.mktemp('data') mapdir = basedir.mkdir('mappings') template = textwrap.dedent(""" [ molecule ] dummy_{0} [ from ] {1} [ to ] {2} [ atoms ] 0 X1{0} A{0} B{0} 1 X2{0} C{0} D{0} """) mappings = collections.defaultdict(lambda: collections.defaultdict(dict)) force_fields_from = ['ff{}'.format(i) for i in range(4)] force_fields_to = force_fields_from + ['only_to'] force_fields_from = force_fields_from + ['only_from'] iterate_on = itertools.product(force_fields_from, force_fields_to, range(3)) for idx, (from_ff, to_ff, _) in enumerate(iterate_on): mapfile = mapdir / 'file{}.map'.format(idx) with open(str(mapfile), 'w') as outfile: outfile.write(template.format(idx, from_ff, to_ff)) mapping = { (0, 'X1{}'.format(idx)): [(0, 'A{}'.format(idx)), (0, 'B{}'.format(idx))], (0, 'X2{}'.format(idx)): [(0, 'C{}'.format(idx)), (0, 'D{}'.format(idx))], } weights = { (0, 'A{}'.format(idx)): {(0, 'X1{}'.format(idx)): 0.5}, (0, 'B{}'.format(idx)): {(0, 'X1{}'.format(idx)): 0.5}, (0, 'C{}'.format(idx)): {(0, 'X2{}'.format(idx)): 0.5}, (0, 'D{}'.format(idx)): {(0, 'X2{}'.format(idx)): 0.5}, } extra = [] mappings[from_ff][to_ff]['dummy_{}'.format(idx)] = (mapping, weights, extra) mappings = {from_ff: dict(to_ff) for from_ff, to_ff in mappings.items()} return Path(str(basedir)), mappings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root", "def maps():\n flatmap_list = []\n root_path = pathlib.Path(settings['FLATMAP_ROOT'])\n if root_path.is_dir():\n for flatmap_dir in root_path.iterdir():\n index = os.path.join(settings['FLATMAP_ROOT'], flatmap_dir, 'index.json')\n mbtiles = os.path.join(settings['FLATMAP_ROOT'], flatmap_dir, 'index.mbtiles')\n if os.path.isdir(flatmap_dir) and os.path.exists(index) and os.path.exists(mbtiles):\n with open(index) as fp:\n index = json.loads(fp.read())\n version = index.get('version', 1.0)\n reader = MBTilesReader(mbtiles)\n if version >= 1.3:\n metadata = read_metadata(reader, 'metadata')\n if (('id' not in metadata or flatmap_dir.name != metadata['id'])\n and ('uuid' not in metadata or flatmap_dir.name != metadata['uuid'].split(':')[-1])):\n app.logger.error(f'Flatmap id mismatch: {flatmap_dir}')\n continue\n flatmap = {\n 'id': metadata['id'],\n 'source': metadata['source'],\n 'version': version\n }\n if 'created' in metadata:\n flatmap['created'] = metadata['created']\n if 'taxon' in metadata:\n flatmap['taxon'] = normalise_identifier(metadata['taxon'])\n flatmap['describes'] = metadata['describes'] if 'describes' in metadata else flatmap['taxon']\n elif 'describes' in metadata:\n flatmap['taxon'] = normalise_identifier(metadata['describes'])\n flatmap['describes'] = flatmap['taxon']\n if 'biological-sex' in metadata:\n flatmap['biologicalSex'] = metadata['biological-sex']\n if 'uuid' in metadata:\n flatmap['uuid'] = metadata['uuid']\n if 'name' in metadata:\n flatmap['name'] = metadata['name']\n else:\n try:\n source_row = reader._query(\"SELECT value FROM metadata WHERE name='source'\").fetchone()\n except (InvalidFormatError, sqlite3.OperationalError):\n flask.abort(404, 'Cannot read tile database: {}'.format(mbtiles))\n if source_row is None:\n continue\n flatmap = {\n 'id': flatmap_dir.name,\n 'source': source_row[0]\n }\n created = reader._query(\"SELECT value FROM metadata WHERE name='created'\").fetchone()\n if created is not None:\n flatmap['created'] = created[0]\n describes = reader._query(\"SELECT value FROM metadata WHERE name='describes'\").fetchone()\n if describes is not None and describes[0]:\n flatmap['describes'] = normalise_identifier(describes[0])\n flatmap_list.append(flatmap)\n return flask.jsonify(flatmap_list)", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update", "def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]", "def create_map(\n datapointsPath: Union[Path, str],\n linksPath: Union[Path, str],\n datapointAttrPath: Union[Path, str],\n node_attr_map: Dict[str, str],\n link_attr_map: Dict[str, str],\n snapshots: List[Dict] = [],\n playerSettings: Dict[str, Any] = {},\n outFolder: Union[Path, str] = \"data_out\",\n):\n\n # create folders and copy the index file\n print(f\">> creating folders\")\n out_dir = Path(outFolder)\n out_data_path = out_dir / \"data\"\n if not out_data_path.exists():\n print(f\"\\t- new folder - {out_data_path}\")\n out_data_path.mkdir(parents=True, exist_ok=True)\n else:\n print(f\"\\t- found existing. overwriting - {out_data_path}\")\n\n # copy the index and run scripts to out directory\n shutil.copy(\"src/index.html\", out_dir)\n print(f\"\\t- copied {out_dir}/index.html\")\n\n shutil.copy(\"src/run_local.sh\", out_dir)\n print(f\"\\t- copied {out_dir}/run_local.sh\\n\")\n\n # write the files\n print(f\">> building dataset\")\n __write_dataset_file(datapointsPath, datapointAttrPath, out_data_path)\n print(f\"\\t- new dataset file written to {out_data_path / 'nodes.json'}.\\n\")\n\n print(f\">> building network\")\n __write_network_file(datapointsPath, linksPath, node_attr_map, link_attr_map, out_data_path)\n print(f\"\\t- new network file written to {out_data_path / 'links.json'}.\\n\")\n\n print(f\">> building settings\")\n __write_settings_file(snapshots, playerSettings, out_data_path)\n print(f\"\\t- new settings file written to {out_data_path / 'settings.json'}.\\n\")", "def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths", "def write_map( file_lists, target_dir, output_dir ):\n tld_to_volumes = {}\n for i, group in enumerate( file_lists ):\n for node in group:\n tld = toplevel_subdir( node, target_dir )\n tld_to_volumes.setdefault( tld, set() ).add( i )\n with open( os.path.join( output_dir, \"map.txt\" ), \"w\" ) as fout:\n for tld, volumes in tld_to_volumes.items():\n fout.write( \"{:24s}: {}\\n\".format( tld, \" \".join( [ str( x ) for x in volumes ] ) ) )", "def file_ids_as_tree(file_ids, start_path):\n relative_path = normdirpath(start_path)\n\n paths = {file_id.filename.replace(relative_path, '') for file_id in file_ids}\n base_depth = max([path.count('../') for path in paths])\n\n main_dict = {FILE_MARKER: []}\n for file_id in file_ids:\n attach(\n file_id,\n main_dict,\n expand_path(file_id.filename, relative_path, base_depth)\n )\n\n return main_dict", "def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)", "def create_map(self, data_file):\n mapping = []\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n path_to_file = os.path.join(root, DATA_PATH, data_file)\n with open(path_to_file, 'r') as file:\n lines = file.readlines()\n list_array = [x.strip('\\n') for x in lines]\n for line in list_array:\n mapping.append(list(line))\n\n self.map = mapping", "def map(self, clean=False):\n self.files = {}\n curs = DatabaseManager.Instance().cursor\n if clean:\n curs.execute(\"DELETE FROM DestinationsFilesList WHERE `destinationName`=%s;\", (self.name, ))\n DatabaseManager.Instance().connector.commit()\n for root, directory, files in os.walk(self.path):\n\n for file_ in files:\n path = os.path.join(root, file_)\n relative_path = self.get_relative_path(path)\n if self.filter.test(File(path)):\n fwh = None\n if not clean:\n sql = \"SELECT * FROM DestinationsFilesList WHERE `path`=%s AND `destinationName`=%s LIMIT 1;\"\n curs.execute(sql, (relative_path, self.name))\n res = curs.fetchone()\n # curs.fetchall()\n if res is not None:\n # file already in DB, so use it\n fwh = FileWithHash.from_sql_query(res)\n if fwh is None:\n fwh = FileWithHash(path, self.name, None, relative_path)\n sql2 = \"INSERT INTO DestinationsFilesList (`hash`, `path`, `destinationName`) VALUES(%s, %s, %s);\"\n # self.logger.info(\"%s add: %s\", [self.name, relative_path]\n curs.execute(sql2, (fwh.hash, relative_path, fwh.destination_name))\n DatabaseManager.Instance().connector.commit()\n self.files[fwh.hash] = fwh", "def create_master_mapping_file(options, bcos_to_map):\n mapping_dict = {\n \"missing\": {\n\n },\n\n \"additional\": {\n\n }\n }\n\n for bco in bcos_to_map:\n\n filename = os.path.splitext(bco)[0] + \"mapping.txt\"\n\n options.bco = bco\n create_mapping_file(options)\n\n addition_mode = False\n\n with open(filename) as mapping_file:\n for line in mapping_file:\n if line.startswith(\"#\"): # Skip comments explaining mapping file\n continue\n if line.startswith(\"====\"): # Switch modes. Starts false -> true -> false\n addition_mode = not addition_mode\n continue\n key_path = line.split(\"-->\")[0]\n value = line.split(\"-->\")[1].strip(\":\")\n\n if addition_mode:\n if key_path not in mapping_dict[\"missing\"]:\n mapping_dict[\"missing\"][key_path] = [value, [bco]]\n else:\n mapping_dict[\"missing\"][key_path][1].append(bco)\n else:\n if key_path not in mapping_dict[\"additional\"]:\n mapping_dict[\"additional\"][key_path] = [value, [bco]]\n else:\n mapping_dict[\"additional\"][key_path][1].append(bco)\n os.remove(filename)\n\n\n with open(\"mastermappingfile.txt\", \"w\") as masterfile: # write dict to\n # single file with\n masterfile.writelines(\n\"\"\"# Use this file to provide mapping values for multiple bcos.\n# fill out the values for each bco listed.\n# MISSING PROPERTIES/FIELDS lists properties/fields that are missing from bco\n# NONALLOWED PROPERTIES/FIELDS shows properties that are not allowed\n# Syntax for specifying values\n# To delete a value\n# PATH --> FIELD: DELETE\n# To add a value\n# PATH --> FIELD: ADD-value_to_add\n# To rename a field name\n# PATH --> FIELD: RENAME-new_field_name\n# To swap a field name with another current field name\n# PATH --> FIELD: SWAP-other_field_name\n# Blank values will be skipped. Data does not need to be double represented\n# For example, \n# if <bco_id> needs renamed to <object_id>, either\n# ['object_id'] --> object_id: \n# SWAP-bco_id\n# OR \n# ['bco_id'] --> bco_id: RENAME:object_id \n# will work. No need to fill out both values.\n\"\"\")\n masterfile.write(\"====MISSING PROPERTIES/FIELDS=====\\n\")\n for key in mapping_dict[\"missing\"]:\n attribute = mapping_dict[\"missing\"][key]\n value = attribute[0]\n\n masterfile.write(key + \"-->\" + value)\n for bco_name in attribute[1]:\n masterfile.write(\" \" + os.path.basename(bco_name) + \":\\n\")\n masterfile.write(\"-----------------------\\n\")\n masterfile.write(\"=====ADDITONAL PROPERTIES/FIELDS=====\\n\")\n for key in mapping_dict[\"additional\"]:\n attribute = mapping_dict[\"additional\"][key]\n value = attribute[0]\n masterfile.write(key + \"-->\" + value)\n for bco_name in attribute[1]:\n masterfile.write(\" \" + os.path.basename(bco_name) + \":\\n\")\n masterfile.write(\"-----------------------\\n\")", "def create_rooted_trees_from_dir(paths, fout, outgroup):\n #pdb.set_trace()\n fout = open(fout, 'w')\n for count, path in enumerate(paths):\n base_path, tree_file_name = os.path.split(path)\n #pdb.set_trace()\n fin = open(path)\n for tree in fin:\n tree = tree.strip()\n tree = Tree(tree)\n tree.set_outgroup(outgroup)\n newick = tree.write(format=5) + '\\n'\n fout.write(newick)\n print count+1\n fout.close()", "def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|", "def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')", "def build(root):", "def Init(self):\n # First iteration over all the files in root searching for symlinks and\n # non-regular files.\n seen_inodes = {}\n for basepath, _, filenames in sorted(os.walk(self._root)):\n for filename in sorted(filenames):\n full_path = os.path.join(basepath, filename)\n rel_path = full_path[len(self._root):]\n st = os.lstat(full_path)\n\n file_data = {\n 'size': st.st_size,\n }\n self._files[rel_path] = file_data\n\n # Track symlinks.\n if stat.S_ISLNK(st.st_mode):\n link_path = os.readlink(full_path)\n # lddtree's normpath handles a little more cases than the os.path\n # version. In particular, it handles the '//' case.\n self._symlinks[rel_path] = (\n link_path.lstrip('/') if link_path and link_path[0] == '/' else\n lddtree.normpath(os.path.join(os.path.dirname(rel_path),\n link_path)))\n file_data['deps'] = {\n 'symlink': [self._symlinks[rel_path]]\n }\n\n # Track hardlinks.\n if st.st_ino in seen_inodes:\n self._hardlinks[rel_path] = seen_inodes[st.st_ino]\n continue\n seen_inodes[st.st_ino] = rel_path", "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def filetree(self) -> P:\n ...", "def generate_build_file(startpath, outfilename='build.yml'):\n buildfiles = {}\n buildtables = {}\n\n def file_node(ext, fullpath):\n return fullpath\n\n def table_node(ext, fullpath):\n return [ext.lower(), fullpath]\n\n def add_to_contents(contents, nodefunc, path, files):\n try:\n safepath = [_pythonize_name(d) if d != '.' else '.' for d in path]\n except BuildException:\n warning = \"Warning: could not determine a Python-legal name for {path}; skipping.\"\n print(warning.format(path=os.sep.join(path)))\n return\n\n ptr = contents\n for folder in safepath:\n ptr = ptr.setdefault(folder, {})\n\n for file in files:\n fullpath = os.path.join(os.path.join(*path), file)\n name, ext = splitext_no_dot(file)\n ptr[_pythonize_name(name)] = nodefunc(ext, fullpath)\n\n for root, dirs, files in os.walk(startpath):\n # skip hidden directories\n for d in dirs:\n if d.startswith('.') or d == PACKAGE_DIR_NAME:\n dirs.remove(d)\n\n rel_path = os.path.relpath(root, startpath)\n path = rel_path.split(os.sep)\n\n tablefiles = []\n rawfiles = []\n for file in files:\n # skip hidden files\n if file.startswith('.'):\n continue\n\n name, ext = splitext_no_dot(file)\n # separate files into tables and raw\n if ext.lower() in TARGET['pandas']:\n tablefiles.append(file)\n else:\n rawfiles.append(file)\n\n if rawfiles:\n add_to_contents(buildfiles, file_node, path, rawfiles)\n\n if tablefiles:\n add_to_contents(buildtables, table_node, path, tablefiles)\n\n for contents in [buildfiles, buildtables]:\n for node in ['.', '..']:\n if node in contents:\n for key in contents[node]:\n contents[key] = contents[node][key]\n del contents[node]\n\n contents = dict(files=buildfiles, tables=buildtables)\n buildfilepath = os.path.join(startpath, outfilename)\n with open(buildfilepath, 'w') as outfile:\n yaml.dump(contents, outfile)\n return buildfilepath", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def test_read_mapping_directory(ref_mapping_directory):\n dirpath, ref_mappings = ref_mapping_directory\n from_names = list(ref_mappings.keys())\n to_names = []\n block_names = []\n mapping = {}\n weights = {}\n\n\n for k in ref_mappings:\n to_names.extend(ref_mappings[k].keys())\n for to in ref_mappings[k]:\n block_names.extend(ref_mappings[k][to].keys())\n for block_name in ref_mappings[k][to]:\n m, w, _ = ref_mappings[k][to][block_name]\n mapping.update(m)\n weights.update(w)\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n mapping, weights, [])\n\n\n mappings = vermouth.map_input.read_mapping_directory(dirpath, force_fields)\n compare_old_new_mappings(mappings, ref_mappings)", "def build_single_file_index(cls, index_path, d):\n index = json.load(open(index_path))\n info_list = cls.list_from_index_path(index_path)\n\n sub_d = d\n for entry in info_list:\n if entry[0] not in sub_d:\n sub_d[entry[0]] = {}\n if entry[1] not in sub_d[entry[0]]:\n sub_d[entry[0]][entry[1]] = {}\n sub_d = sub_d[entry[0]][entry[1]]\n\n current_dir = os.path.dirname(index_path)\n rel_dirname = os.path.relpath(current_dir, paths.db_root)\n if 'files' in index:\n for name, file in list(index['files'].items()):\n sub_d[name] = os.path.join(rel_dirname, file)\n if 'info' in index:\n sub_d.update(index['info'])", "def make_structure(file_name):\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if 'ppt' not in loc and (file_name not in loc):\n shutil.rmtree(f'{output_path}/{fld}')\n shutil.copytree(f'{tmp_path}/{file_name}/{i[0].split(file_name)[-1]}', f'{output_path}/{fld}')\n return", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace(\"\\n\", os.linesep).encode())", "def get_objects(dirname):\n\n objects = os.listdir(dirname)\n temp_map = []\n\n for obj in objects:\n\n fpath = os.path.join(dirname, obj)\n\n if fpath[0:2] == \"./\":\n fpath = fpath[2:]\n\n # if the object is a file, store it as a file\n if os.path.isfile(fpath):\n\n temp_map.append({\"name\": fpath,\n \"is_file\": True,\n \"children\": []})\n\n # else, assume the object is a directory\n else:\n\n children_map = get_objects(fpath)\n temp_map.append({\"name\": fpath,\n \"is_file\": False,\n \"children\": children_map})\n\n return temp_map", "def __init__(self, mapper=None, relative_to=None):\n\n if mapper and relative_to:\n raise ValueError(\"Must specify exactly one of 'mapper' or 'relative_to'\")\n\n if relative_to:\n base = os.path.abspath(relative_to)\n if not os.path.isdir(base):\n raise ValueError('Could not find a directory to bundle relative to at %s' % base)\n self.mapper = RelativeToMapper(base)\n else:\n self.mapper = mapper or RelativeToMapper(os.getcwd())\n\n self.filemap = {}", "def __init__(self, dirname, defmode='r'):\n self.name = dirname\n self.defmode = defmode\n\n self.items = []\n\n for i in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, i)):\n self.items.append(Tree(os.path.join(dirname, i), defmode))\n\n else:\n self.items.append(open(os.path.join(dirname, i), defmode))\n\n self._dict = self.to_dict()" ]
[ "0.7007131", "0.6332988", "0.6162577", "0.606019", "0.6058516", "0.60204667", "0.59296227", "0.5903134", "0.5871665", "0.5856109", "0.5854468", "0.5800572", "0.57764846", "0.57486165", "0.5710741", "0.57098264", "0.5703374", "0.5689811", "0.5654435", "0.56431305", "0.5602831", "0.5601961", "0.55871856", "0.5582947", "0.5576584", "0.55731237", "0.5564886", "0.5558487", "0.5552906", "0.5543044" ]
0.6665727
1
Test that mapping files from a directory are propely found and read.
def test_read_mapping_directory(ref_mapping_directory): dirpath, ref_mappings = ref_mapping_directory from_names = list(ref_mappings.keys()) to_names = [] block_names = [] mapping = {} weights = {} for k in ref_mappings: to_names.extend(ref_mappings[k].keys()) for to in ref_mappings[k]: block_names.extend(ref_mappings[k][to].keys()) for block_name in ref_mappings[k][to]: m, w, _ = ref_mappings[k][to][block_name] mapping.update(m) weights.update(w) force_fields = case_to_dummy_ffs(from_names + to_names, block_names, mapping, weights, []) mappings = vermouth.map_input.read_mapping_directory(dirpath, force_fields) compare_old_new_mappings(mappings, ref_mappings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_map(self, audio_store_and_expected_files):\n audio_store = audio_store_and_expected_files[0]\n expected_files = audio_store_and_expected_files[1]\n\n # Check number of files.\n assert len(audio_store.file_map) == expected_files\n\n # Ensure the keys are as expected.\n key_list = list(audio_store.file_map.keys())\n assert key_list == [x + 1 for x in range(len(key_list))]\n\n # Ensure the values are as expected.\n for key, file in audio_store.file_map.items():\n\n # Check the extension.\n ext = os.path.splitext(file)[1].replace('.', '')\n assert ext in audio_store.audio_extensions\n\n # File should exist.\n assert os.path.exists(\n os.path.join(\n audio_store.top_dir, audio_store.audio_dir, file))", "def test_mapping(self):\n\n # Input PatternDS has constraints fake, file and pattern.\n # Use fake from first input as animal constraint.\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/a/new/pattern/%animal%/%file%/%pattern%.file',\n 'echo', map_dict={'animal': ('fake', 0)})\n output = the_process_unit.execute(simulate=True)\n\n all_files = [thing for thing in output.files]\n\n self.assertEqual(len(all_files), 1)\n self.assertEqual(all_files[0].full_path, '/a/new/pattern/fake_1/file_1/pattern_1.file')", "def test_get_file_map_type():\n for file in sd.get_file_map(\"resources/\"):\n assert file.endswith(\".txt\")", "def test_tb_full_mapping_iter_01():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n\n files = [\n gem_file,\n fastq_file_1\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm1 = tbFullMappingTool()\n tfm1_files, tfm1_meta = tfm1.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_1_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_1_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_1_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_1_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_get_file_map_len():\n file_map = sd.get_file_map(\"resources/\")\n files = glob.glob(\"resources/\" + \"/**\" + \".txt\", recursive=True)\n assert len(file_map) == len(files)", "def test_tb_full_mapping_iter_02():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n\n files = [\n gem_file,\n fastq_file_2\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm2 = tbFullMappingTool()\n tfm2_files, tfm2_meta = tfm2.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_2_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_2_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_2_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_2_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_check_mapping_file_errors(self):\r\n\r\n # Use data with errors, default parameters\r\n check_mapping_file(mapping_fp=self.errors_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_data_errors_corrected_output)\r\n self.assertEqual(log_data, self.expected_data_log_errors_output)", "def test_check_mapping_file_correct_file(self):\r\n\r\n # Use valid data, default parameters\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)\r\n\r\n # With additional parameters added should not change results using\r\n # same valid input data\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=True,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)", "def test_ifFileExists():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"file\" in testConfig.config and \"file_locations\" in testConfig.config:\n print \"File In Location: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExistsInPossibleLocations, testConfig.config\n elif \"file\" in testConfig.config:\n print \"File: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExists, testConfig.config", "def read_mapfiles():\n mappings = []\n\n # matches stuff like\n # \"/GLOW/*\" glow\n # \"/cms/Role=pilot/Capability=NULL\" cmspilot\n # and extracts the stuff between the quotes, and the username in the second field\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s+([A-Za-z0-9_]+)\\s*(?:$|[#])')\n for filepath in [DEFAULT_VOMS_MAPFILE, VOMS_MAPFILE]:\n try:\n with open(filepath, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n mappings.append(Mapping(match.group(1), match.group(2)))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n continue\n else:\n raise\n\n return mappings", "def test_find_tests_by_test_mapping_include_subdir(self):\n os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}\n with mock.patch.dict('os.environ', os_environ_mock, clear=True):\n tests, all_tests = self.ctr._find_tests_by_test_mapping(\n path=TEST_MAPPING_TOP_DIR, file_name='test_mapping_sample',\n include_subdirs=True, checked_files=set())\n expected = set([TEST_1, TEST_2, TEST_5, TEST_7, TEST_9])\n expected_all_tests = {'presubmit': expected,\n 'postsubmit': set([\n TEST_3, TEST_6, TEST_8, TEST_10]),\n 'other_group': set([TEST_4])}\n self.assertEqual(expected, tests)\n self.assertEqual(expected_all_tests, all_tests)", "def test_read_mapping_file(case):\n reference = collections.defaultdict(lambda: collections.defaultdict(dict))\n for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff):\n reference[from_ff][to_ff][case.name] = (\n case.mapping, case.weights, case.extra\n )\n\n ffs = case_to_dummy_ffs(case.from_ff + case.to_ff, [case.name], case.mapping,\n case.weights, case.extra)\n\n reference = vermouth.map_input._default_to_dict(reference)\n\n mappings = vermouth.map_input.read_backmapping_file(\n ['[ molecule ]'] + case.string.split('\\n'),\n ffs\n )\n compare_old_new_mappings(mappings, reference)", "def test_lookup(self):\n env = pike.Environment()\n self.make_files('foo')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n ret = env.lookup('foo')\n self.assertEqual(ret, os.path.join('.', 'foo'))", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def test_files(self, location):\n for filename in os.listdir(location):\n with open(location + '/' + filename) as json_file:\n data = json.load(json_file)\n self.test_data(data)", "def test_scan_dir(self):\n dir_path = tempfile.mkdtemp()\n\n try:\n # Create a bzip, gzip and normal file in turn in the temp directory\n for method, suffix in ((bz2.BZ2File, '.bzip2'),\n (gzip.open, '.gzip'),\n (open, '.normal')):\n handle, path = tempfile.mkstemp(suffix, dir=dir_path)\n os.close(handle)\n file_obj = method(path, 'wb')\n # Write three lines to the file\n file_obj.write(\"Line one.\\nLine two.\\nLine three.\")\n file_obj.close()\n records = bin.parser.scan_dir(self.mock_parser, dir_path, False,\n re.compile('(.*)'), self.mock_db, [])\n for record in records:\n # Check that all three lines have been read\n self.assertEqual(record.get_field('StopLine'), 3,\n \"Unable to read %s file\"\n % record.get_field('FileName').split('.')[1])\n finally:\n shutil.rmtree(dir_path)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def test_filter_mapping_file(self):\r\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\r\n ['a', 'b', 'c', 'd', 'e', 'f']), (self.map_headers, self.map_data))\r\n self.assertEqual(\r\n filter_mapping_file(self.map_data, self.map_headers, ['a']),\r\n (['SampleID', 'Description'], ['a\\tx'.split('\\t')]))", "def test_check_mapping_file_warnings(self):\r\n\r\n check_mapping_file(mapping_fp=self.warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_output_warnings)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_warnings_output)\r\n self.assertEqual(log_data, self.expected_log_warnings_output)", "def test_parse_mapping_file_handles_filepath(self):\r\n fd, fp = mkstemp(prefix='test_parse_mapping_file',\r\n suffix='.txt')\r\n close(fd)\r\n self.files_to_remove.append(fp)\r\n open(fp, 'w').write('\\n'.join(['#sample\\ta\\tb',\r\n '#comment line to skip',\r\n 'x \\t y \\t z ', ' ',\r\n '#more skip',\r\n 'i\\tj\\tk']))\r\n obs = parse_mapping_file(fp)\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n self.assertEqual(obs, exp)", "def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]", "def test_filter_mapping_file_from_mapping_f(self):\r\n actual = filter_mapping_file_from_mapping_f(\r\n self.tutorial_mapping_f, [\"PC.354\", \"PC.355\"])\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)", "def testDirExists(self, mock_dir, mock_exists, mock_listdir):\n mock_dir.return_value = True\n mock_exists.return_value = True\n mock_listdir.return_value = self.files\n\n self.assertEqual(\n self.is_seq,\n self.mr.is_seq\n )\n\n if len(self.seqs) > 0:\n self.assertEqual(\n self.seqs[0],\n self.mr.seq\n )\n else:\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n self.assertEqual(\n self.seqs,\n self.mr.seqs\n )\n\n mock_listdir.assert_called_once_with(self.mr.path)", "def inferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping,\n sequences_mapping, protein_mapping, verbose=False, throwOnMismatch=False, fileType=None):\n import csv, os\n\n if fileType == \"simple\":\n return simpleInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"traml\":\n return tramlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"sqmass\":\n return sqlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n\n nomatch_found = set([])\n for file_nr, f in enumerate(aligned_pg_files):\n header_dict = {}\n if f.endswith('.gz'):\n import gzip \n filehandler = gzip.open(f,'rb')\n else:\n filehandler = open(f)\n reader = csv.reader(filehandler, delimiter=\"\\t\")\n header = next(reader)\n for i,n in enumerate(header):\n header_dict[n] = i\n\n if not \"align_origfilename\" in header_dict or not \"align_runid\" in header_dict:\n\n # Check whether we have a single mzML file and a single result\n # file. If so, simply map these to each other.\n if len(rawdata_files) == 1 and len(aligned_pg_files) == 1:\n mapping[\"0_0\"] = rawdata_files\n return\n\n print (header_dict)\n raise Exception(\"need column header align_origfilename and align_runid\")\n\n for this_row in reader:\n\n if len(this_row) == 0: \n continue\n\n # Get the transition mapping ... \n mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping)\n\n # 1. Get the original filename (find a non-NA entry) and the corresponding run id\n aligned_fname, aligned_id = getAlignedFilename(this_row, header_dict)\n\n if aligned_id is None or aligned_id in mapping:\n continue \n\n # 2. Go through all chromatogram input files and try to find\n # one that matches the one from align_origfilename\n for rfile in rawdata_files:\n\n # 2.1 remove common file endings from the raw data\n rfile_base = os.path.basename(rfile)\n for ending in [\".sqMass\", \".filter\", \".mzML\", \".chrom\"]:\n rfile_base = rfile_base.split(ending)[0]\n\n # 2.3 Check if we have a match\n if aligned_fname == rfile_base:\n if verbose: \n print(\"- Found match:\", os.path.basename(rfile), \"->\", os.path.basename(this_row[ header_dict[\"align_origfilename\"] ]))\n mapping[aligned_id] = [rfile]\n\n if not aligned_id in mapping:\n if True:\n nomatch_found.update( [aligned_fname] )\n if throwOnMismatch:\n raise Exception(\"Mismatch, alignment filename could not be matched to input chromatogram\")\n\n if verbose:\n print(\"- No match found for :\", list(nomatch_found), \"in any of\", \\\n [os.path.basename(rfile) for rfile in rawdata_files])\n print(\"- This may be a bad sign if you expected a match here. You might have \" +\\\n \"to either rename your files to have matching filenames \" +\\\n \"or provide an input yaml file describing the matching in detail.\")", "def test_format_mapping_file(self):\r\n headers = ['SampleID', 'col1', 'col0', 'Description']\r\n samples =\\\r\n [['bsample', 'v1_3', 'v0_3', 'd1'],\r\n ['asample', 'aval', 'another', 'd2']]\r\n comments = ['this goes after headers', 'this too']\r\n self.assertEqual(format_mapping_file(headers, samples, comments),\r\n example_mapping_file)\r\n # need file or stringIO for roundtrip test\r\n # roundtrip = parse_mapping_file(format_mapping_file(headers,samples,comments))\r\n # self.assertEqual(roundtrip, [headers,samples,comments])\r", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)", "def test_store_mapping(self):\r\n\r\n expected = [\"1:\\t0\\t2\\t5\\t6\\n\",\r\n \"3:\\n\",\r\n \"4:\\n\",\r\n \"8:\\t7\\n\"]\r\n\r\n self.files_to_remove.append(\"/tmp/test_store_mapping_mapping.txt\")\r\n store_mapping(self.mapping, \"/tmp/\", prefix=\"test_store_mapping\")\r\n observed = list(open(\"/tmp/test_store_mapping_mapping.txt\", \"U\"))\r\n self.assertItemsEqual(observed, expected)", "def test_check_mapping_data_valid_data(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n expected_data = {'File3': 'Sample3',\r\n 'File2': 'Sample2',\r\n 'File1': 'Sample1'}\r\n\r\n actual_data = check_mapping_data(\r\n mapping_data,\r\n headers,\r\n filename_column)\r\n\r\n self.assertEqual(actual_data, expected_data)", "def test_reader(file_dir,\n word2id_dict,\n label2id_dict,\n word_replace_dict,\n filename_feature=\"\"):\n word_dict_len = max(map(int, word2id_dict.values())) + 1\n label_dict_len = max(map(int, label2id_dict.values())) + 1\n\n def reader():\n \"\"\"\n the data generator\n \"\"\"\n index = 0\n for root, dirs, files in os.walk(file_dir):\n for filename in files:\n if not filename.startswith(filename_feature):\n continue\n for line in io.open(\n os.path.join(root, filename), 'r', encoding='utf8'):\n index += 1\n bad_line = False\n line = line.strip(\"\\n\")\n if len(line) == 0:\n continue\n seg_tag = line.rfind(\"\\t\")\n if seg_tag == -1:\n seg_tag = len(line)\n word_part = line[0:seg_tag]\n label_part = line[seg_tag + 1:]\n word_idx = []\n words = word_part\n for word in words:\n if ord(word) < 0x20:\n word = ' '\n if word in word_replace_dict:\n word = word_replace_dict[word]\n if word in word2id_dict:\n word_idx.append(int(word2id_dict[word]))\n else:\n word_idx.append(int(word2id_dict[\"OOV\"]))\n yield word_idx, words\n\n return reader" ]
[ "0.6989255", "0.67256457", "0.6499422", "0.64963084", "0.64944816", "0.64576644", "0.63912785", "0.62879676", "0.62493104", "0.6245174", "0.62226516", "0.61930525", "0.6185058", "0.60630137", "0.6058861", "0.6024957", "0.5999067", "0.59754974", "0.5968207", "0.596742", "0.59522647", "0.5936938", "0.59153455", "0.5908508", "0.5908088", "0.59005845", "0.58943355", "0.5891607", "0.5888302", "0.58835894" ]
0.7653856
0
Sets text of a Plone document if it exists and reindex the document The text is coming from a browser view template tag
def setPageText(portal, page, viewName): if page is None: return request = getattr(portal, 'REQUEST', None) if request is not None: view = queryMultiAdapter((portal, request), name=viewName) if view is not None: text = bodyfinder(view.index()).strip() page.setText(text, mimetype='text/html') page.reindexObject()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_text(self, text):\n self.response['text'] = text", "def edit_document():", "def SetText(self, text):\n self.Clear()\n self.__context.builder.DocumentInsert(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n text)\n self._blip_data.content = text", "def _set_text(self, text):\n self.clear()\n self.paragraphs[0].text = _to_unicode(text)", "def set_text(self, new_text):\n\n self.output['text'] = new_text", "def settext(self, text):\n self.__text = text\n self.__nonzero = True", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def set_text(self):\n pass", "def set_text(self):\n\n if not self.text and len(self.get_files()) > 0:\n self.text = self.files[0].get_title()\n # if \"_\" in str(self.text):\n if re.match(\"[0-9]_[0-9]\", self.text) is not None:\n self.text = self.files[0].get_parent()[\"title\"]\n else:\n try: \n int(self.text)\n # is a simple int\n if int(self.text) > 20:\n self.text = self.files[0].get_parent()[\"title\"]\n except Exception as e:\n # not a simple int\n # do nothing cause probably set already\n pass\n self.text = self.text.replace(\"_\", \" \")\n self.set_keywords()", "def update_document(self):\n pass", "def reindex_page(self, page, title, writer, text=None):\n\n if text is None:\n get_text = getattr(page, 'plain_text', lambda: u'')\n try:\n text = get_text()\n except error.NotFoundErr:\n text = None\n\n extract_links = getattr(page, 'extract_links', None)\n links = []\n wanted = []\n if extract_links and text:\n for link, label in extract_links(text):\n qlink = link.replace(u' ', u'%20')\n label = label.replace(u' ', u'%20')\n links.append(u'%s:%s' % (qlink, label))\n if link[0] != '+' and link not in wanted and link not in self.storage:\n wanted.append(qlink)\n else:\n links = []\n doc = {'title': str(title)}\n if links:\n doc['links'] = u' '.join(links)\n doc['has_links'] = True\n if wanted:\n doc['wanted'] = u' '.join(wanted)\n if text:\n doc['content'] = text\n writer.add_document(**doc)\n else:\n writer.delete_by_term('title', title)", "def addContent(text):", "def setText(self, text=\"\"):\n self._text = text\n self._text_item.setHtml(self._compile_text())", "def _set_text(self, text):\n self.clear()\n r = self.add_run()\n r.text = _to_unicode(text)", "def set_text(self, text):\n\n self.text = text", "def setText(self,text,index=0):\n self.rb[index].setText(text)", "def SetText(self, text):\r\n\r\n self._text = text", "def update(self, text: str) -> None:\n raise NotImplementedError", "def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)", "def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))", "def update_text(self: object, widget: Text, new_text: str) -> None:\n widget.delete(\"1.0\", END) #Clear the text window so we can write.\n widget.insert(END,new_text)", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def update(self, db):\n self.text = self.form.text.data\n self.save(db)", "def set_lic_text(self, doc, text):\n if self.has_extr_lic(doc):\n if not self.extr_text_set:\n self.extr_text_set = True\n self.extr_lic(doc).text = text\n return True\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')" ]
[ "0.66143626", "0.66091734", "0.6442834", "0.63463277", "0.63095236", "0.6092229", "0.6057413", "0.6057413", "0.6057413", "0.6057413", "0.6057413", "0.6057413", "0.60565853", "0.60544866", "0.60536844", "0.6018306", "0.59941316", "0.5978407", "0.59762806", "0.5954543", "0.59179926", "0.58493024", "0.58306086", "0.5796543", "0.57808256", "0.57565624", "0.5750887", "0.5742798", "0.5733539", "0.57097274" ]
0.7150492
0
Add a MaildropHost if Products.MaildropHost is available... If MaildropHost exist, PloneGazette will use it to send mails. This will avoid duplicate emails send as reported by
def addMaildropHost(self): portal = getToolByName(self, 'portal_url').getPortalObject() if not hasattr(portal, "MaildropHost"): try: portal.manage_addProduct['MaildropHost'].manage_addMaildropHost('MaildropHost', title='MaildropHost') except AttributeError: # if MaildropHost is not available, we pass... pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manage_addMailServer( self, id='MailServer', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailServer( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )", "def manage_addMailSender( self, id='MailHost', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailSender( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )", "def get_mail_host():\n portal = getSite()\n if portal is None:\n return None\n request = portal.REQUEST\n ctrlOverview = getMultiAdapter((portal, request), name='overview-controlpanel')\n mail_settings_correct = not ctrlOverview.mailhost_warning()\n if mail_settings_correct:\n mail_host = getToolByName(portal, 'MailHost', None)\n return mail_host", "def add_host(self, group, host):\n if group not in self.inventory:\n self.add_inventory_group(group)\n\n if host not in self.inventory[group]['hosts']:\n self.inventory[group]['hosts'].append(host)\n return", "def test_smtp_host(self):\n key = api.portal.get_registry_record(\n 'plone.smtp_host'\n )\n self.assertEqual(u'smtp.gmail.com', key)", "def qa():\n env.hosts = ['[email protected]']\n env.directory = '/var/www/swordpushweb'", "def admin_host(self, admin_host):\n\n self._admin_host = admin_host", "def __add_host(self, host_form):\n try:\n host_object = Host.objects.get(\n host_name=host_form.cleaned_data['host_name']\n )\n for field in host_form.cleaned_data:\n setattr(\n host_object, field, host_form.cleaned_data[field]\n )\n host_object.save()\n return HttpResponseRedirect(reverse('log_collector:index'))\n except errors.ObjectDoesNotExist:\n return self.form_valid(host_form)", "def configure_remote_hostname_override(target='prod'):\n config = setup_env('etc/ep.remote.cfg')\n env.update(config._sections['energyportal_%s' % target])\n\n upload_template('remote.template.py',\n '/home/ubuntu/ep_site/settings/components/env/%s.py' % target,\n use_sudo=True, template_dir='fabfile/templates', use_jinja=True, context=env)", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")", "def check_add_hosts(self, export_details):\n\n playbook_host_dict = self.create_current_host_dict_playbook()\n add_host_dict = dict()\n host_type_list = ['no_access_hosts', 'read_only_hosts',\n 'read_write_hosts', 'read_only_root_hosts',\n 'read_write_root_hosts']\n\n for host_type in host_type_list:\n if playbook_host_dict[host_type]:\n hosts_to_add = list()\n ipv4_hosts, ipv6_hosts, fqdn_hosts = \\\n self.get_export_hosts(export_details[host_type])\n for host in playbook_host_dict[host_type]:\n version = check_ipv4_ipv6_fqdn(host)\n\n # Check if host is FQDN/Netgroup or IP\n if version:\n if version == 4:\n # IPv4 host is provided\n ipv4_host = self.get_ipv4_host(host)\n # Check if given host is member of already added\n # network\n if ipv4_host not in ipv4_hosts:\n if str(ipv4_host) not in hosts_to_add:\n hosts_to_add.append(str(ipv4_host))\n else:\n # IPv6 host is provided\n ipv6_host = self.get_ipv6_host(host)\n # Check if given host is member of already added\n # network\n if ipv6_host not in ipv6_hosts:\n if str(ipv6_host) not in hosts_to_add:\n hosts_to_add.append(str(ipv6_host))\n else:\n # FQDN/Netgroup is provided\n if host not in fqdn_hosts:\n if host not in hosts_to_add:\n hosts_to_add.append(host)\n if hosts_to_add:\n if host_type == \"read_only_root_hosts\":\n export_details[host_type].extend(hosts_to_add)\n add_host_dict['read_only_root_hosts'] = \\\n export_details[host_type]\n else:\n add_host_dict['add_' + host_type] = hosts_to_add\n\n LOG.info(\"Host list to add: %s\", add_host_dict)\n return add_host_dict", "def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")", "def append_allowed_hosts(self, hostname):\r\n settings.ALLOWED_HOSTS.append(hostname)\r\n self.addCleanup(settings.ALLOWED_HOSTS.pop)", "def add_or_remove_host(self, event):\n try:\n host = self.caller.search(self.lhs).Dominion\n except AttributeError:\n return\n if event:\n if host == event.main_host:\n raise self.CalCmdError(\"The main host cannot be removed.\")\n if host in event.hosts:\n event.change_host_to_guest(host)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n event.add_host(host)\n msg = \"%s added to hosts.\" % host\n else:\n hosts = self.project[\"hosts\"]\n if host.id in hosts:\n hosts.remove(host.id)\n if host.id not in self.project[\"invites\"]:\n self.project[\"invites\"].append(host.id)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n hosts.append(host.id)\n if host.id in self.project[\"invites\"]:\n self.project[\"invites\"].remove(host.id)\n msg = \"%s added to hosts.\" % host\n self.msg(msg)", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts", "def getHost(self): #$NON-NLS-1$\r", "def add(self, hostname, hostalias, hostip, hosttemplate, pollername=None, hgname=None):\n return super(HostTemplate, self).add(hostname, hostalias, hostip, hosttemplate, '', [])", "def production():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]']\n env.host_type = 'production'\n env.user = 'ombu'\n env.host_webserver_user = 'nginx'\n env.host_site_path = '/home/ombu/webapps/ombuweb'", "def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e", "def add_host_entry(ip, hostname, domain):\n host_line = ip+\" \"+hostname+\".\"+domain+\" \"+hostname\n\n # Only add entry if it does not exist already. We don't want warnings about\n # grep not finding the entry, as that's to be expected.\n with hide(\"warnings\"), settings(warn_only=True):\n if run(\"grep \\\"\"+host_line+\"\\\" /etc/hosts\").failed:\n sudo(\"echo \"+host_line+\" >> /etc/hosts\")", "def _install(self, host):\n pass", "def AddHost(parser):\n parser.add_argument(\n '--host',\n help=(\n \"Cloud SQL user's hostname expressed as a specific IP address or\"\n ' address range. `%` denotes an unrestricted hostname. Applicable'\n ' flag for MySQL instances; ignored for all other engines. Note, if'\n ' you connect to your instance using IP addresses, you must add your'\n ' client IP address as an authorized address, even if your hostname'\n ' is unrestricted. For more information, see [Configure'\n ' IP](https://cloud.google.com/sql/docs/mysql/configure-ip).'\n ),\n )", "def enhost(host):\n available = find(host,DNSMASQ_AVAILABLE)\n if not available:\n print(\"%s not found in available directory -- \" % host + DNSMASQ_AVAILABLE)\n return \n\n enabled = find(host,DNSMASQ_ENABLED)\n if enabled:\n print(\"%s is already enabled\" % host)\n return\n src = available\n dest = DNSMASQ_ENABLED + '/' + host + DNSMASQ_CFG_SUFFIX\n print(\"Linking %s to %s\" % (src,dest))\n os.symlink(src,dest)\n restart()\n listhosts()", "def add_hosts(self, hosts):\n for host in hosts:\n if host not in self.__hosts__:\n self.__hosts__.append(KnownHostsHost(host))", "def init_host(self):\n\n LOG.debug(_('XManager init_host...'))\n\n pass", "def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def setup_domain_for_droplet(self, droplet, name):\n domain = self.manager.get_domain(self.domain)\n domain.load()\n droplet.load()\n domain.create_new_domain_record(type=\"A\", name=\"%s.net\" % name,\n data=droplet.ip_address)", "def test_default_domain(self, mock_getfqdn):\n # (This avoids problems with ESPs that re-use Content-ID as attachment\n # filename: if the local hostname ends in \".com\", you can end up with\n # an inline attachment filename that causes Gmail to reject the message.)\n mock_getfqdn.return_value = \"server.example.com\"\n cid = attach_inline_image(self.message, sample_image_content())\n self.assertRegex(\n cid,\n r\"[\\w.]+@inline\",\n \"Content-ID should be a valid Message-ID, \" \"but _not_ @server.example.com\",\n )" ]
[ "0.57076377", "0.5688542", "0.54326963", "0.5342844", "0.522222", "0.5184435", "0.5170344", "0.5141721", "0.51176596", "0.5113623", "0.5091956", "0.5051609", "0.5037057", "0.5037057", "0.50238883", "0.49762768", "0.49572164", "0.49479112", "0.48867533", "0.48693794", "0.48661914", "0.48653543", "0.48627666", "0.48496935", "0.48369452", "0.4824847", "0.48199376", "0.48029366", "0.48027465", "0.48022905" ]
0.7845082
0
Method to add our wanted indexes to the portal_catalog.
def addCatalogIndexes(portal): catalog = getToolByName(portal, 'portal_catalog') indexes = catalog.indexes() wanted = (('standardTags', 'KeywordIndex'), ('iamTags', 'KeywordIndex'), ('isearchTags', 'KeywordIndex'), ('hiddenTags', 'KeywordIndex')) indexables = [] for name, meta_type in wanted: if name not in indexes: catalog.addIndex(name, meta_type) indexables.append(name) logger.info("Added %s for field %s.", meta_type, name) if len(indexables) > 0: logger.info("Indexing new indexes %s.", ', '.join(indexables)) catalog.manage_reindexIndex(ids=indexables)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n # import step in zcml, but doing it in code makes this method usable as \n # upgrade step as well. Note that this silently does nothing when there is \n # no catalog.xml, so it is quite safe.\n setup = getToolByName(context, 'portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'catalog')\n \n catalog = getToolByName(context, 'portal_catalog')\n indexes = catalog.indexes()\n \n # Specify the indexes you want, with ('index_name', 'index_type')\n wanted = (('county', 'FieldIndex'),\n ('constituency', 'FieldIndex'),\n ('priority_number', 'FieldIndex'), \n ('political_party', 'FieldIndex'),\n ('elected_nominated', 'FieldIndex'),\n ('member_status', 'FieldIndex'),\n ('special_interest', 'FieldIndex'),\n ('other_names', 'FieldIndex'),\n ('member_role', 'FieldIndex'),\n ('member_title', 'FieldIndex'),\n ('body_text', 'FieldIndex'),\n ('member_full_names', 'ZCTextIndex'),\n )\n\n indexables = []\n for (name, meta_type) in wanted:\n if meta_type and name not in indexes:\n if meta_type == 'ZCTextIndex':\n item_extras = Empty()\n item_extras.doc_attr = name\n item_extras.index_type = 'Okapi BM25 Rank'\n item_extras.lexicon_id = 'plone_lexicon'\n catalog.addIndex(name, meta_type, item_extras)\n else:\n catalog.addIndex(name, meta_type)\n \n indexables.append(name)\n logger.info('Added %s for field %s.', meta_type, name)\n if len(indexables) > 0:\n logger.info('Indexing new indexes %s.', ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def create_indexes(self) -> None:\n self.collection.create_index(\"traceId\")\n self.collection.create_index(\"process.serviceName\")", "def createIndex(self):\n\n super(COCO_PLUS, self).createIndex()\n catNameToId = dict()\n pointclouds = dict()\n imgToPc = dict()\n\n if 'pointclouds' in self.dataset:\n for pc in self.dataset['pointclouds']:\n imgToPc[pc['img_id']] = pc\n pointclouds[pc['id']] = pc\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n catNameToId[cat['name']] = cat['id']\n\n self.catNameToId = catNameToId\n self.pointclouds = pointclouds\n self.imgToPc = imgToPc\n self.logger.info('index created.')", "def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")", "def addVars(self, *indexes, **kwargs):\n ...", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def index_siteroot(context):\n portal = getSite()\n portal.reindexObject()", "def insert_index(self):\n pass", "def _SetupIndexes(self, _open=open):\n pass", "def manage_addPloneAnnuaireCatalog(self, REQUEST=None):\n\n c = PloneAnnuaireCatalog()\n self._setObject(c.getId(), c)\n\n cat = getattr(self, c.getId())\n\n # Add Lexicon\n cat.manage_addProduct['ZCTextIndex'].manage_addLexicon(\n 'annuaire_lexicon',\n elements=[\n args(group=\"Annuaire Latin normalizer and splitter\",\n name=\"Annuaire Latin normalizer and splitter\"),\n ]\n )\n\n # Add indexes and metadatas\n for index_name, index_type in cat.enumerateIndexes():\n try: # ugly try catch XXX FIXME\n if index_name not in cat.indexes():\n if index_type == 'ZCTextIndex':\n extra = args(doc_attr=index_name,\n lexicon_id='annuaire_lexicon',\n index_type='Okapi BM25 Rank')\n cat.addIndex(index_name, index_type, extra=extra)\n else:\n cat.addIndex(index_name, index_type)\n\n if not index_name in cat.schema():\n cat.addColumn(index_name)\n except:\n pass\n\n if REQUEST is not None:\n return self.manage_main(self, REQUEST, update_menu=1)", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def create_index():", "def create_indices(self) -> None:\n self.client.indices.create(\n index=\"business\",\n body=BUSINESS_MAPPINGS\n )\n self.client.indices.create(\n index=\"review\",\n body=REVIEW_MAPPINGS\n )\n self.client.indices.create(\n index=\"tip\",\n body=TIP_MAPPINGS\n )", "def store_index(self, index, doc_type, source_list, init_id):\n\n bulk_actions = []\n doc_id = init_id\n\n for source in source_list:\n data_body = ElasticSearchUtility.__index_data_body(index, doc_type, doc_id, source[\"_source\"])\n bulk_actions.append(data_body)\n doc_id += 1\n\n print 'inserting - ', len(bulk_actions)\n helpers.bulk(self.es, bulk_actions)", "def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)", "def build_index(self):\n self.rebuild_index()", "def solr_index(self, **kwargs):\n solr_dict = self.solr_dict()\n solr_dict['uuid'] = str(self.uuid)\n if kwargs.get('solrconn'):\n solrconn = kwargs.get('solrconn')\n else:\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n solrconn.add(**solr_dict)\n\n if kwargs.get('commit', True):\n solrconn.commit()", "def on_new_site(self, files):\n init_index()", "def build_index():\n pass", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def checkCatalogIndexes(self, trans):\n for lang in trans.keys():\n index_id = 'objecttrans_%s' % lang\n if not index_id in self.indexes():\n self.__createObjecttransIndex(index_id)\n index_id = 'objectname_%s' % lang\n if not index_id in self.indexes():\n self.__createObjectnameIndex(index_id)", "def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')", "def add_not_yet_indexed_photos(self):\n # Define which photos have not been indexed yet by this index.\n # TODO: How? CBIRIndex should store information about which photos from database it has indexed.\n # new ones = Database photos - indexed\n # ManyToManyRelation?\n\n # cbir_instance.compute_descriptors(list_paths, to_index=True, for_training_clusterer=False)\n # cbir_instance.add_photos_to_index()\n\n raise NotImplementedError", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)", "def init(self):\n self._es.create_index_template(\n name=DATASETS_INDEX_NAME,\n template=DATASETS_INDEX_TEMPLATE,\n force_recreate=True,\n )\n self._es.create_index(DATASETS_INDEX_NAME)", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)" ]
[ "0.819925", "0.6714551", "0.66770244", "0.6349882", "0.6222696", "0.6083812", "0.6036434", "0.6023171", "0.60225964", "0.60145235", "0.60141164", "0.6001239", "0.59967524", "0.5949453", "0.5890639", "0.5824087", "0.57495576", "0.5746176", "0.5742815", "0.57316446", "0.5660347", "0.5639385", "0.56046766", "0.5578768", "0.5577449", "0.5577057", "0.55769384", "0.555676", "0.55358285", "0.55213827" ]
0.8188832
1
Extracts and parses data from a sqlite3.Cursor object
def cursor_data(c): # pull column description d = [] for i in range(len(c.description)): d.append(c.description[i][0]) # fetch column entries c = c.fetchall() # compile list info = [] for i in range(len(c)): # compile dictionary entry entry = {} for j in range(len(d)): entry[d[j]] = c[i][j] info.append(entry) # success return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_list(cursor):\n header = [h[0] for h in cursor.description]\n data = cursor.fetchall()\n return header, data", "def get_text_data(db, table, col):\n con = lite.connect(db)\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT \" + col + \" FROM \" + table)\n text_data = cur.fetchall() #list of tuples\n text_data = strip_tuple(text_data)\n text_data = clean_text(text_data)\n return(text_data)", "def get_file_contents(db_cursor):\n\n db_cursor.execute(\"\"\"SELECT * FROM data\"\"\")\n db_rows = db_cursor.fetchall()\n return {row[0]: row[1] for row in db_rows if row != []}", "def dictfetchall(cursor):\n desc = cursor.description\n for row in cursor.fetchall():\n yield dict(zip([col[0] for col in desc], row))", "def construct_dict(cursor):\n rows = cursor.fetchall()\n return [dict((cursor.description[i][0], value) for i, value in enumerate(row))\n for row in rows]", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def read_from_db(cursor):\n\tcursor.execute('''SELECT * FROM (\n\t\tSELECT * FROM Kombucha_data ORDER BY Time DESC LIMIT 20)\n\t\tORDER BY Time ASC;''')\n\n\trows = cursor.fetchall()\n\treturn rows", "def sqlite3_dict_factory(cursor, row):\n dict_row = dict()\n for idx, col in enumerate(cursor.description):\n dict_row[col[0]] = row[idx]\n dict_row[idx] = row[idx]\n return dict_row", "def map_row_to_dict(cursor: sqlite3.Cursor, row_data):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row_data[idx]\n return d", "def __toListOfDict(self, cursor):\n lst = []\n for row in cursor.fetchall():\n # first convert row to a dictionary\n rowdict={}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n lst.append(rowdict)\n return lst", "def fetch_data_from_db(query):\n cursor.execute(query)\n result = cursor.fetchall()\n return result", "def dictfetchall(cursor):\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]", "def dictfetchall(cursor):\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]", "def read(self,s,v):\n self.cur.execute(s,v)\n data = self.cur.fetchall()\n return data", "def cursor(cls) -> Iterator[sqlite3.Cursor]:\n with closing(cls.db.cursor()) as cur:\n yield cur", "def parseIntoDB(self, filehandle, cursor, alignTab, sequenceTab=None,\n update=None):\n c = filehandle.tell()\n filehandle.seek(0, 2)\n filesize = filehandle.tell()\n filehandle.seek(c)\n l = filehandle.readline()\n rc = 0\n count = 0\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n count+=1\n self.readalign(la[1:], filehandle)\n self._dump(alignTab, sequenceTab)\n if(update and not count % 1000):\n cursor.execute(update % (int(filehandle.tell() * 100.\n / filesize)))\n else:\n## print \"end of records\"\n return\n l=filehandle.readline()", "def _process_data_for_inv_db(row=None, sets=None, parts=None, colors=None):\n # print(\"Getting data for row {}\".format(row[0]))\n row[0] = secondary_sets.get_set_id(row[0], sets=sets, add=True) # Set Id\n # print(\"Got ID {}\".format(row[0]))\n if row[0] is not None:\n row[1] = get_re_piece_id(row[1], parts=parts, add=False) # Re_piece Id\n # print(\"Got Piece {}\".format(row[1]))\n row[2] = syt.int_zero(row[2]) # Quantity\n row[3] = info.get_color_id(row[3], colors=colors) # Color ID\n # print(\"Got Color {}\".format(row[3]))\n\n del row[-1]\n return row\n\n else:\n return None", "def getCursor(self) -> sqlite3:\n return self.cursor", "def parse_row_stats(self, cursor):\n rows = list(cursor)\n if len(rows) > 0:\n column_names = [desc[0] for desc in cursor.description]\n # assumed to be a single row returned\n # convert the column names to lowercase\n return dict(zip(column_names, rows[0]))\n return dict()", "def cursor_with_rows(condition, database, table):\n connection = sqlite3.connect(database)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)\n return cursor, connection", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def fetch_data(cursor, *tables):\r\n final = []\r\n\r\n for table in tables:\r\n\r\n cursor.execute(f\"SELECT * FROM {table};\")\r\n data = cursor.fetchall()\r\n final.append(data)\r\n\r\n try:\r\n return final if len(final) > 1 else final[0]\r\n except:\r\n return []", "def initialize_from_sql_cursor(self, sqlcursor):\n # initialize the result\n tuples = 0\n\n # add the SQL result to the time series\n data = sqlcursor.fetchmany()\n while 0 < len(data):\n for entry in data:\n self.add_entry(str(entry[0]), entry[1])\n\n data = sqlcursor.fetchmany()\n\n # set the normalization level\n self._normalized = self._check_normalization\n\n # return the number of tuples added to the timeseries.\n return tuples", "def query(cursor, query):\n out = []\n c = cursor.execute(query)\n out.append(c.fetchall())\n # will return list of tuples for each query\n return out", "def initialize_from_sql_cursor(self, sqlcursor):\n # initialize the result\n tuples = 0\n\n # add the SQL result to the timeseries\n data = sqlcursor.fetchmany()\n while 0 < len(data):\n for entry in data:\n self.add_entry(str(entry[0]), [item for item in entry[1:]])\n\n data = sqlcursor.fetchmany()\n\n # set the normalization level\n self._normalized = self._check_normalization()\n\n # return the number of tuples added to the timeseries.\n return tuples", "def cursor():\n dbh = handle()\n return dbh.cursor()", "def parse(self, sql):\n # Get a temporary file name for sqlite\n db_file = tempfile.NamedTemporaryFile('w')\n # Connect to the temporary file.\n self.db = sqlite3.connect(db_file.name)\n # Enable foreign keys.\n self.db.execute('pragma foreign_keys=ON')\n # Get a cursor instance.\n self.cursor = self.db.cursor()\n\n # If sql is not a string assume it is a file.\n if not isinstance(sql, str):\n # Read the file into sql.\n sql = str(sql.read())\n\n # Execute the SQL statements from the input.\n self.cursor.executescript(sql)\n\n # Get all table names.\n self.cursor.execute(\n \"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self.cursor.fetchall()\n\n # Initialise the variable containing the parsed tables.\n self.tables = OrderedDict()\n # Run through all tables.\n for table in tables:\n # Create an entry for each table.\n self.tables[table[0]] = OrderedDict()\n\n # Get info on columns and primary keys.\n self.cursor.execute('PRAGMA table_info({})'.format(table[0]))\n # For each column\n for sql_column in self.cursor.fetchall():\n # Create an empty column entry.\n column = dict()\n # Set the name.\n column['name'] = sql_column[1]\n # Set the type\n column['type'] = sql_column[2]\n # Determine if this is a primary key\n column['primary'] = False\n if sql_column[5] == 1:\n column['primary'] = True\n # We do not know if this key has a reference yet.\n column['foreign'] = False\n\n # Add the column to the table.\n self.tables[table[0]][sql_column[1]] = column\n\n # Get information on foreign keys.\n self.cursor.execute('PRAGMA foreign_key_list({});'.format(table[0]))\n # Run through all foreign keys\n for foreign_key in self.cursor.fetchall():\n # Find the column by its name.\n for name, column in self.tables[table[0]].items():\n # Search for the name of the source column.\n if name == foreign_key[3]:\n # Add the referenced table and column in dot notation.\n self.tables[table[0]][name]['foreign'] = '{}.{}'.format(foreign_key[2], foreign_key[4])\n\n # Close the database connection\n self.db.close()\n # Make the cursor unusable for good measure.\n self.cursor = None\n\n # Run through the parsed tables and dispatch to the related call backs.\n for table_name, columns in self.tables.items():\n # New table.\n self.add_table(table_name)\n\n # Table columns.\n for column in columns.values():\n # Primary key.\n if column['primary'] is True:\n self.add_column_primary(column['name'], column['type'])\n # Foreign key.\n if column['foreign'] is not False:\n self.add_column_foreign(column['name'], column['type'], column['foreign'])\n # Just a column.\n if ((column['primary'] is not True) and\n (column['foreign'] is False)):\n self.add_column(column['name'], column['type'])", "def serialize(self, cursor, fh):\n description = cursor.description\n rows = cursor.fetchall()\n pickle.dump((description, rows), fh)", "def cursor_to_dataframe(cur):\n description = cur.description\n column_names = [item.name for item in description]\n data = cur.fetchall()\n df = pandas.DataFrame(data, columns=column_names)\n cur.close()\n return df", "def dictfetchall(cursor):\n desc = cursor.description\n return [dict(itertools.izip([col[0] for col in desc], row))\n for row in cursor.fetchall()]" ]
[ "0.64374334", "0.62329024", "0.6100417", "0.5942611", "0.5928529", "0.59160304", "0.59160304", "0.58683413", "0.58111495", "0.5792052", "0.5788436", "0.577716", "0.577716", "0.5717413", "0.56549394", "0.56148547", "0.5608014", "0.5591911", "0.5588732", "0.55835444", "0.55728376", "0.5537774", "0.55307674", "0.5527743", "0.5527111", "0.55188674", "0.55120367", "0.55044913", "0.55020124", "0.54750276" ]
0.7030328
0
Retrieves and parses current time stamp from the 'datetime' module.
def get_time(): dt = datetime.datetime.now() dt_parsed = dt.strftime("%Y-%m-%d %H:%M:%S") return dt_parsed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''", "def get_timestamp(self):\n p = self._get_sub_text('timestamp')\n if not p:\n return None\n else:\n return xep_0082.datetime(p)", "def read_datetime(self):\n with GPIOTimingContentManager(self.gpio, start=self._start_tx, end=self._end_tx):\n self._write_byte(self.REG_BURST_READ)\n\n regs = list()\n for _ in range(self.REG_SIZE):\n regs.append(self._read_byte())\n\n # Decode bytes to datetime\n return datetime.datetime.strptime(\" \".join([\"{:x}\".format(x) for x in regs]), self.DT_STR_FMT)", "def time(self):\n return parse_time(self['timestamp'])", "def datetime(self, datetime=None):\n if datetime is None:\n buf = self.i2c.readfrom_mem(self.addr, DATETIME_REG, 7)\n return (\n self._bcd2dec(buf[6]) + 2000, # year\n self._bcd2dec(buf[5]), # month\n self._bcd2dec(buf[4]), # day\n self._bcd2dec(buf[3] - self.weekday_start), # weekday\n self._bcd2dec(buf[2]), # hour\n self._bcd2dec(buf[1]), # minute\n self._bcd2dec(buf[0] & 0x7F), # second\n 0 # subseconds\n )\n buf = bytearray(7)\n buf[0] = self._dec2bcd(datetime[6]) & 0x7F # second, msb = CH, 1=halt, 0=go\n buf[1] = self._dec2bcd(datetime[5]) # minute\n buf[2] = self._dec2bcd(datetime[4]) # hour\n buf[3] = self._dec2bcd(datetime[3] + self.weekday_start) # weekday\n buf[4] = self._dec2bcd(datetime[2]) # day\n buf[5] = self._dec2bcd(datetime[1]) # month\n buf[6] = self._dec2bcd(datetime[0] - 2000) # year\n if (self._halt):\n buf[0] |= (1 << 7)\n self.i2c.writeto_mem(self.addr, DATETIME_REG, buf)", "async def get_time(self) -> DateTime:\n return await DateTime.get(self._api)", "def getdatetime(timedateformat='complete'):\n\n timedateformat = timedateformat.lower()\n if timedateformat == 'day':\n return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[2]\n elif timedateformat == 'month':\n return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[1]\n elif timedateformat == 'year':\n return ((str(datetime.datetime.now())).split(' ')[0]).split('-')[0]\n elif timedateformat == 'hour':\n return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]\n ).split(':')[0]\n elif timedateformat == 'minute':\n return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]\n ).split(':')[1]\n elif timedateformat == 'second':\n return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]\n ).split(':')[2]\n elif timedateformat == 'millisecond':\n return (str(datetime.datetime.now())).split('.')[1]\n elif timedateformat == 'yearmonthday':\n return (str(datetime.datetime.now())).split(' ')[0]\n elif timedateformat == 'daymonthyear':\n return ((str(datetime.datetime.now())).split(' ')[0]).split(\n '-')[2] + '-' + ((str(\n datetime.datetime.now())).split(' ')[0]).split('-')[1] + '-' + (\n (str(datetime.datetime.now())).split(' ')[0]).split('-')[0]\n elif timedateformat == 'hourminutesecond':\n return ((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]\n elif timedateformat == 'secondminutehour':\n return (((str(datetime.datetime.now())).split(' ')[1]).split('.')[0]\n ).split(':')[2] + ':' + (((str(datetime.datetime.now())).split(\n ' ')[1]).split('.')[0]).split(':')[1] + ':' + (\n ((str(datetime.datetime.now())).split(' ')[1]\n ).split('.')[0]).split(':')[0]\n elif timedateformat == 'complete':\n return str(datetime.datetime.now())\n elif timedateformat == 'datetime':\n return (str(datetime.datetime.now())).split('.')[0]\n elif timedateformat == 'timedate':\n return ((str(\n datetime.datetime.now())).split('.')[0]).split(' ')[1] + ' ' + (\n (str(datetime.datetime.now())).split('.')[0]).split(' ')[0]\n else:\n raise ValueError(\"Invalid time date format used.\")", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def get_datetime(self, record):\n value = RecordValue(self.timestamp_attribute).render(record)\n return datetime.datetime.fromtimestamp(value)", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def timestamp(self):\n return parser.get_timestamp(self)", "def nowdt():\n from datetime import datetime\n\n now = datetime.now()\n return now.strftime(\"%d/%m/%Y %H:%M:%S\")", "def fetch_timestamp(self):\r\n return self.__public_request('GET', '/api/v1/timestamp')", "def get_time_stamp():\n with open(TIME_STAMP_FILE_NAME, 'r') as f:\n s = f.readline()\n return s", "def timestamp(self) -> dt.datetime:\n ts = self.json_data['timestamp']\n # Extract the datetime object from the `ts` string\n ts = dt.datetime.strptime(ts, '%Y-%m-%d %H:%M:%S')\n # Localise to Eastern time (Formstack returns Eastern times)\n ts = pytz.timezone('US/Eastern').localize(ts)\n # Convert to UTC time\n return ts.astimezone(pytz.timezone('UTC'))", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def stime(self):\n try:\n return self['datetime_1']\n except TypeError:\n return None", "def time(self, the_datetime=None):\n if the_datetime:\n if self._request(\n 'S1',\n the_datetime.strftime('%y'), str(the_datetime.month), str(the_datetime.day),\n str(the_datetime.hour), str(the_datetime.minute), str(the_datetime.second)\n )[0]:\n return the_datetime\n else:\n done, data = self._request('GT')\n if done:\n if data == ['165', '165', '165', '165', '165', '85']:\n raise NoClock\n return datetime.datetime(\n year=int(data[0])+2000, month=int(data[1]), day=int(data[2]),\n hour=int(data[3]), minute=int(data[4]), second=int(data[5])\n )\n\n raise EvseError", "def get_datetime() -> Tuple[str, str]:\n datetime_obj = datetime.now()\n return datetime_obj.strftime(\"%Y-%m-%d %H:%M:%S\"), datetime_obj.strftime(\"%Y-%m-%d\")", "def get_current_time():\n dateTime = datetime.datetime.now()\n # \"%Y-%m-%d %H:%M:%S:%f\" is default formatting with everything\n dateTime = dateTime.strftime(\"%m-%d-%y %H:%M:%S\")\n\n logger.logger.debug(\"Getting current time: {}\".format(dateTime))\n\n return dateTime", "def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result", "def etime(self):\n try:\n return self['datetime_2']\n except TypeError:\n return None", "def get_time():\n return datetime.datetime.now()", "def parse_time_stamp(self, time_stamp):\n # return iso8601.parse_date(time_stamp).replace(tzinfo=datetime.timezone.utc)\n return iso8601.parse_date(time_stamp)", "def get_current_datetime ( ) :\n return datetime.datetime.now( )", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def get_current_time(self):\n return self.time", "def extract_datetime(self, text):\n time = text.split('Time:')[1].split(' ')[0].strip()\n date = text.split('Date:')[1].split('\\n')[0].strip()\n date = self._sanitize_date(date)\n\n dst = False\n if \"PDT\" in time:\n dst = True\n\n momentstr = \" \".join([date, time.split(' ')[0]])\n fmt = \"%m/%d/%Y %H:%M:%S\"\n naive_moment = datetime.datetime.strptime(momentstr, fmt)\n\n zone = pytz.timezone('US/Pacific')\n moment = zone.localize(naive_moment, is_dst=dst)\n\n return datetime_to_epoch(moment)", "def get_datetime(self):\n timestamp = self.SendTime + self.SendTimeNS / 1000000000.0\n return datetime.datetime.utcfromtimestamp(timestamp)" ]
[ "0.69299984", "0.6702524", "0.63582027", "0.63440084", "0.63380075", "0.63115704", "0.6280957", "0.6251938", "0.62222415", "0.61944693", "0.61944693", "0.61590403", "0.6087337", "0.60751706", "0.60636747", "0.6060406", "0.6042445", "0.6027001", "0.6009286", "0.59921634", "0.5991707", "0.5980629", "0.59789014", "0.59779936", "0.5957173", "0.59216326", "0.59129393", "0.5902135", "0.5899005", "0.58962965" ]
0.6883768
1
Fetches the ten most recent news articles from an RSS feed.
def get_news(url): # parse RSS feed into list of dictionaries feed = feedparser.parse(url) # no RSS feed articles for url if len(feed['entries']) == 0: return [] # get first ten articles from the RSS feed news = [] i = 0 while True: if i == len(feed['entries']) or i > 30: break try: # get link to article link = feed["entries"][i]["link"] # get title of article title = feed["entries"][i]["title"] try: # get raw summary of article summary_raw = feed["entries"][i]["summary"] # format summary summary = "" for c in summary_raw: if c == "<": summary += "..." break summary += c except KeyError as e: logging.error("no summary for RSS feed article: {}".format(link)) summary = "read more here..." # get raw date date_raw = feed["entries"][i]["published_parsed"] if date_raw is None: date = feed["entries"][i]["published"] else: # format date year = str(date_raw.tm_year) months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] month = months[date_raw.tm_mon - 1] day = str(date_raw.tm_mday) weekdays = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] wday = weekdays[date_raw.tm_wday] hour = str(date_raw.tm_hour) hour = "{:2}".format(hour).format(' ','0') min = str(date_raw.tm_min) min = "{:2}".format(min).replace(' ','0') date = hour + ":" + min + " - " + wday + " " + month + " " + day + ", " + year # compile entry and append to news list entry = {"link":link, "title":title, "date":date, "summary":summary} # sanitize entry for key in entry: # apostrophe entry[key] = entry[key].replace("&#39;", "'") # right single quotation mark entry[key] = entry[key].replace("’", "&#8217;") # left single quotation mark entry[key] = entry[key].replace('"', "&#8216;") # right double quotation mark entry[key] = entry[key].replace("'", "&#8221;") # left double quotation mark entry[key] = entry[key].replace("'", "&#8220;") # Weird ampersand formatting entry[key] = entry[key].replace("&amp;", "&") # prepare entry for sqlite queries entry[key] = surround(entry[key]) # add entry to news list news.append(entry) # max 10 entries if len(news) == 10: break i += 1 except Exception as e: logging.error(e) i += 1 pass # success return news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def get_rss(limit):\n rss_data = feedparser.parse(URL)\n if limit == 1:\n title = rss_data.entries[0].title\n link = rss_data.entries[0].link\n rss_print(title, link)\n else:\n for i in range(0, limit):\n title = rss_data.entries[i].title\n link = rss_data.entries[i].link\n\n print(Back.CYAN + str(i + 1) + \"\\t\")\n rss_print(title, link)", "def latestEntriesRss():\n now = datetime.now()\n latestEntries = session.query(Pokemon).order_by(desc(Pokemon.date_entered))\\\n .limit(20)\n rss = render_template('rss.xml', lastBuildDate=now, entries=latestEntries)\n response = make_response(rss)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def get_recent_news_items():\n news_item_count = request.args.get('newsItemCount') or 3\n try:\n animal_news = AnimalNews.get_printable_news_items_all_animals(news_item_count)\n return jsonify(message=animal_news), 200\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def parse_rss(database, feed, depth=1):\n # Get the updates article count, and article urls and publish dates.\n rss_a = rss_feed(feed)\n \n # Get all (article urls, publish dates) pairs\n articles = []\n pairs = rss_a[1].items()\n for url, pubdate in pairs: \n articles += crawl_url(database, url, date=pubdate, depth=depth)\n \n return articles", "def rss(request, blog):\n\tblog = Blog.objects.get(urlname=blog)\n\tarticles = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:RSS_COUNT]\n\treturn render_to_response('rss/blog.html', {'blog': blog, 'articles': articles}, context_instance=RequestContext(request))", "def get_rss(address, website):\n #print address\n try:\n results = pattern.web.Newsfeed().search(address, count=100,\n cached=False, timeout=30)\n logger.debug('There are {} results from {}'.format(len(results),\n website))\n \n #print \"Results found\"\n except Exception as e:\n print 'There was an error. Check the log file for more information.'\n logger.warning('Problem fetching RSS feed for {}. {}'.format(address,\n e))\n results = None\n\n return results", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def getLatestStories(self, newest, alreadyReadList):\n\t\turl = \"http://news.ycombinator.com\"\n\t\tif newest == \"newest\":\n\t\t\turl += \"/newest\"\n\t\tsource = self.getSource(url)\n\t\tstories = self.getStories(source, alreadyReadList)\n\t\treturn stories", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def top_ten(subreddit):\n req = get(\n \"https://www.reddit.com/r/{}/hot.json\".format(subreddit),\n headers={\n \"User-Agent\": \"alx_app\"},\n params={\n \"limit\": 10},\n allow_redirects=False)\n if req.status_code != 200:\n print(None)\n else:\n posts = req.json().get(\"data\").get(\"children\")\n for post in posts:\n print(post.get(\"data\").get(\"title\"))", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def getTopStories(self):\n source = self.getSource(\"http://news.ycombinator.com\")\n stories = self.getStories(source)\n return stories", "def get_top_news_and_the_rest(self):\n queryset = self.news.order_by('-marked', '-publication_date')\n return queryset.first(), queryset[1:]", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def get_news(url, n_pages=1):\r\n news = []\r\n while n_pages:\r\n print(\"Collecting data from page: {}\".format(url))\r\n\r\n delay = 2\r\n max_retries = 5\r\n backoff_factor = 0.3\r\n for tryes in range(max_retries):\r\n try:\r\n response = requests.get(url)\r\n except requests.exceptions.RequestException:\r\n if tryes == max_retries - 1:\r\n raise\r\n else:\r\n break\r\n time.sleep(delay)\r\n delay = backoff_factor * (2 ** tryes)\r\n\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n news_list = extract_news(soup)\r\n next_page = extract_next_page(soup)\r\n url = \"https://news.ycombinator.com/\" + next_page\r\n news.extend(news_list)\r\n n_pages -= 1\r\n return news", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def top_ten(subreddit):\n BASE_URL = 'http://www.reddit.com/r/{}/hot.json'.format(subreddit)\n head = {'User-Agent': 'Mozilla/5.0'}\n par = {'limit': 10}\n\n r = requests.get(BASE_URL, params=par, headers=head).json()\n\n res = r.get('data', {}).get('children', None)\n\n if res:\n for post in res:\n print(post.get('data').get('title'))\n else:\n print(None)", "def get_news(url, n_pages=1):\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n next_page = extract_next_page(soup)\n url = \"https://news.ycombinator.com/\" + next_page\n news.extend(news_list)\n n_pages -= 1\n return news", "def top_ten(subreddit):\n\n limit = \"10\"\n\n url = \"https://www.reddit.com/r/{}/hot.json?limit={}\".format(subreddit,\n limit)\n\n user_agent = {\"User-Agent\": \"Python\"}\n response = requests.get(url, headers=user_agent, allow_redirects=False)\n if response.status_code >= 300:\n print(\"None\")\n else:\n for elem in response.json().get(\"data\").get(\"children\"):\n print(elem.get(\"data\").get(\"title\"))", "def get_custom_feeds(request):\n start = int(request.paginate_number) * 10\n end = start + 10\n feeds = Feed.objects.all().order_by('-id')[start: end]\n return get_feed_list(feeds)", "def get_news(url, n_pages=1):\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n print(news_list)\n next_page = extract_next_page(soup)\n url = 'https://news.ycombinator.com/' + next_page\n news.extend(news_list)\n n_pages -= 1\n return news", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data" ]
[ "0.721724", "0.6942011", "0.6921786", "0.68017155", "0.680133", "0.67669815", "0.6721919", "0.66387874", "0.6592782", "0.65631974", "0.64701366", "0.63726705", "0.6331431", "0.6329042", "0.6328483", "0.63121986", "0.6246357", "0.6245741", "0.6224736", "0.6222687", "0.6214443", "0.6186483", "0.61791617", "0.61296105", "0.6126121", "0.61258334", "0.6123617", "0.61229414", "0.61150444", "0.6110842" ]
0.7554712
0
Calls the new_transformations.sql script on a date by date basis
def run_sql_transformations(self): conn = pg2.connect(user='postgres', dbname='penny', host='localhost', port='5432', password='password') for d in self.get_list_of_dates(): print(d) df = pd.read_sql("Select count(*) as acount from auctions where auctiontime < '" + d + "' and qauctionID not in (SELECT DISTINCT AuctionID from bid_transform)", conn) print (df.acount[0]) if (df.acount[0] > 0): bashCommand = "sudo -u postgres psql -d penny -f new_transformations.sql -v auction_date='" + d + "'" process = subprocess.Popen(bashCommand.split()) output, error = process.communicate() conn.close
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_new_sql(self):\n\n pass", "def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()", "def upgrade():\n\n op.execute(\"\"\"\n UPDATE task_group_tasks\n SET start_date = CURDATE(), end_date=CURDATE()\n WHERE (start_date IS NOT NULL AND start_date < \"1900-01-01\") OR\n (end_date IS NOT NULL AND end_date < \"1900-01-01\")\n \"\"\")", "def create_gt_database_template_old(self):\n pass\n with self.connection as cursor:\n fn = os.path.join(os.path.dirname(__file__), 'gtlogold.sql')\n self.cursor.execute(open(fn, \"r\").read())", "def migration():", "def _generate_sql(self, keys, changed_keys):\n for key in reversed(keys):\n app_label, sql_name = key\n new_item = self.to_sql_graph.nodes[key]\n sql_deps = [n.key for n in self.to_sql_graph.node_map[key].parents]\n reverse_sql = new_item.reverse_sql\n\n if key in changed_keys:\n operation_cls = AlterSQL\n kwargs = {}\n # in case of replace mode, AlterSQL will hold sql, reverse_sql and\n # state_reverse_sql, the latter one will be used for building state forward\n # instead of reverse_sql.\n if new_item.replace:\n kwargs['state_reverse_sql'] = reverse_sql\n reverse_sql = self.from_sql_graph.nodes[key].sql\n else:\n operation_cls = CreateSQL\n kwargs = {'dependencies': list(sql_deps)}\n\n operation = operation_cls(\n sql_name, new_item.sql, reverse_sql=reverse_sql, **kwargs)\n sql_deps.append(key)\n self.add_sql_operation(app_label, sql_name, operation, sql_deps)", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)", "def execute(self, context): \n aws_hook = AwsHook(self.aws_credentials)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(self.redshift_conn_id)\n execution_date = context['execution_date']\n \n self.log.info(f\"Truncating {self.table}\")\n redshift.run(f\"TRUNCATE TABLE {self.table}\")\n \n \n self.log.info(f\"Inserting data into {self.table}\")\n s3_path = f\"s3://{self.s3_bucket}/{self.s3_key}\"\n\n if self.s3_key == \"log_data\":\n year = execution_date.year\n month = execution_date.month\n \n s3_path = '/'.join([s3_path, str(year), str(month)])\n \n formatted_sql = StageToRedshiftOperator.copy_sql.format(\n self.table,\n s3_path,\n credentials.access_key,\n credentials.secret_key,\n self.file_format,\n self.format_path\n )\n \n redshift.run(formatted_sql)", "def execute_upgrade():\n\n def migrate_review_policy(community_record):\n review_policy = community_record[\"access\"].get(\n \"review_policy\", ReviewPolicyEnum.CLOSED.value\n )\n community_record[\"access\"][\"review_policy\"] = review_policy\n\n secho(\"Starting data migration...\", fg=\"green\")\n communities = Community.model_cls.query.all()\n\n for community_data in communities:\n community = Community(community_data.data, model=community_data)\n migrate_review_policy(community)\n community.commit()\n\n secho(f\"Committing to DB\", fg=\"green\")\n db.session.commit()\n secho(\"Data migration completed.\", fg=\"green\")", "def transform_schedule(keywords, parameters, input_file, output_file):\n\treturn", "def run_migrations(self, migrations):\n for migration in migrations:\n name = migration[\"name\"]\n migration[\"script\"] = self.get_sql_script(name)\n\n if self.dry_run:\n for migration in migrations:\n print(f'---------------- {migration[\"name\"]} ----------------')\n print(migration[\"script\"])\n return\n\n if not self.accept_all and not self.prompt_for_migrations(migrations):\n return\n\n applied_migrations = []\n with self.target_db.begin() as conn:\n for migration in migrations:\n name = migration[\"name\"]\n script = migration[\"script\"]\n if self.apply_migrations:\n print(f\"Applying {name}\")\n conn.execute(script)\n applied_migrations.append(name)\n if self.register:\n self.register_migrations(applied_migrations)", "def get_transformations_lookups(foreign_keys: list, source_db: str, df_name: str):\n #template = \"\"\"[{{\"name\": \"{lookup_name}\",\"script\": \"{source_table}, {lookup_table} lookup(user_id == {lookup_table}@id,\\n\\tbroadcast: 'none')~> {lookup_name}\"}},{{\"name\": \"DerivedColumn1\",\"script\": \"{lookup_name} derive(migration_date = currentTimestamp(),\\n\\t\\tid_old = {source_table}@id,\\n\\t\\tsource_db = '{source_db}') ~> DerivedColumn1\"}}]\"\"\".encode(\"unicode_escape\").decode('utf-8')\n\n script = \"{0}, {1} lookup({2} == {1}@id_old,\\n\\tbroadcast: 'none')~> {3}\"\n transformation_list = []\n\n source_table = \"Input\" +df_name.replace(\"_\", \"\")\n orginal_table = source_table\n for idx, foreign_key in enumerate(foreign_keys):\n\n lookup_table = \"Output\" + foreign_key['referenced_table_name'].replace(\"_\", \"\")\n lookup_col = foreign_key['column_name']\n lookup_name = 'LKP{0}'.format(lookup_table)\n\n print(lookup_name)\n print(source_table)\n print(lookup_table)\n print(source_db)\n\n script = script.format(source_table, lookup_table, lookup_col, lookup_name, orginal_table)\n transformation_dict = {\"name\": lookup_name, \"script\": script}\n transformation_list.append(transformation_dict)\n source_table = lookup_name\n\n transformation_list += get_transformations_no_lookup(source_table)\n return transformation_list", "def step060():\n logger.logMessage('Begin: updating database')\n update_sql = 'update weather_work set tsa=$1, esDocId = $2 where time = $3;'\n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n c = pgConn.cursor()\n# c.execute('drop table weather_work')\n# c.execute('create table weather_work (like weather excluding constraints)')\n# c.execute('insert into weather_work select * from weather_dupes')\n# c.execute('create index weather_work_time on weather_work(time)')\n pgConn.commit()\n c.execute('prepare updtDocid as {0}'.format(update_sql))\n numUpdates = 0\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n tsa = int(fields[0])\n time = fields[1].rstrip() \n docid = fields[2].rstrip()\n try:\n dic = { 'esDocId': docid, 'tsa': tsa , 'time': time+\"+00:00\" }\n c.execute('execute updtDocid (%(tsa)s,%(esDocId)s,%(time)s)',dic)\n numUpdates += 1\n if numUpdates % 250 == 0:\n pgConn.commit()\n logger.logMessage(level='DEBUG',message=\"{0:9d} commited updates\".format(numUpdates))\n except:\n logger.logException('Exception while updating database')\n pgConn.rollback()\n raise\n line = f.readline().rstrip()\n pgConn.commit()\n logger.logMessage(\"Total updates: {0:d}\".format(numUpdates))\n c.close()\n pgConn.close()\n logger.logMessage('End : updating database')", "def move_to_new_database(sql):\n substitutions = {'nasdaq_fundamentaldb_poc': 'prime',\n 'nasdaq_corp_actions_research': 'corporate_actions_research',\n 'nasdaq_dividend': 'equity_dividend',\n 'nasdaq_dividend_adj_factor': 'equity_dividend_adj_factor',\n 'nasdaq_pit_fundamentals': 'equity_fundamentals',\n 'nasdaq_security_content': 'equity_security_content',\n 'nasdaq_security_master': 'equity_security_master',\n 'nasdaq_split_or_spin_events': 'equity_split_or_spin_events',\n 'nasdaq_gic_fx_rates': 'gic_fx_rate',\n 'nasdaq_giw_index_summary': 'giw_index_summary',\n 'nasdaq_giw_index_weight': 'giw_index_weight',\n 'nasdaq_reuters_fx_rates': 'reuters_fx_rate',\n 'nasdaq_saxtat': 'saxtat',\n 'nasdaq_security_mapping': 'security_mapping',\n 'nasdaq_tso_research': 'tso_research',\n 'nsc': 'esc',\n 'nsm': 'esm',\n 'sec_content_date': 'evaluation_date',\n 'giw_trade_date': 'index_composure_date'}\n sql = multi_replace(sql, substitutions)\n return sql", "def load_dim_date(cur,table):\n logging.info(f\"loading {table} table\")\n cur.execute(etl.insert_dim_date)\n logging.info(f\"data loaded in table {table}\")", "def main():\n aurora_secret = get_aurora_secret()\n aurora_creds = {\n 'host': aurora_secret['host'],\n 'port': aurora_secret['port'],\n 'dbname': aurora_secret['engine'],\n 'user': aurora_secret['username'],\n 'password': aurora_secret['password']\n }\n with UseDatabase(aurora_creds) as cursor:\n sql_anomaly = \"\"\"SELECT grow_table, \n MAX(GREATEST(soil_date, light_date, air_date)) \n FROM grow_anomalies\n GROUP BY grow_table;\"\"\"\n cursor.execute(sql_anomaly)\n anomaly_dates = cursor.fetchall()\n all_deltas = []\n for i in anomaly_dates:\n sql_select = sql.SQL(\"\"\"SELECT MAX(datetime)\n FROM {}\"\"\").format(sql.Identifier(i[0]))\n cursor.execute(sql_select)\n result_datetime = cursor.fetchone()\n all_deltas.append([i[0], result_datetime[0] - i[1]])\n for i in all_deltas:\n sql_update = sql.SQL(\"\"\"UPDATE public.grow_anomalies\n SET days_since_anomaly = {}\n WHERE grow_table = {}\"\"\").format(\n sql.Literal(i[1].days),\n sql.Literal(i[0])\n )\n cursor.execute(sql_update)", "def save_alter_sql(self, table_name):\n\t\talter_sql = ''\n\t\tscripted_alters = []\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\tfor elem in self.joins:\n\t\t\tcolName = '{}.{}'.format(elem.Parent,elem.Column)\n\t\t\tprint(colName)\n\t\t\tif colName in colNames and colName not in scripted_alters:\n\t\t\t\tif colName == 'CommunityMart.dbo.PersonFact.SourceSystemClientID' and 'CommunityMart.dbo.PersonFact.SourceSystemClientID' not in scripted_alters:\n\t\t\t\t\tscripted_alters.append(colName)\n\t\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF NOT EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ADD {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format('PatientID',table_name.split('.')[2],table_name,'PatientID','int','PatientID')\n\t\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ALTER COLUMN {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format('PatientID',table_name.split('.')[2],table_name,'PatientID','int','PatientID')\n\t\t\t\t\talter_sql += alter\n\t\t\t\tscripted_alters.append(colName)\n\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF NOT EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ADD {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format(elem.Column,table_name.split('.')[2],table_name,elem.Column,elem.Datatype,colName)\n\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ALTER COLUMN {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format(elem.Column,table_name.split('.')[2],table_name,elem.Column,elem.Datatype,colName)\n\n\t\t\t\talter_sql += alter\n\t\t\t\tprint(alter)\n\t\twith open('Table/ALTER-{}.sql'.format(table_name),'w') as fout:\n\t\t\tself.altered_tables.append(table_name)\n\t\t\tfout.write(alter_sql)\n\t\treturn alter_sql", "def create_date_features(df = None, date = None):\n #TODO", "def _postgres_install(engine):\n\n sql = []\n\n # postgres trigger function template\n # need to make separate functions purely because the incoming history_id field name will be\n # different for different source tables. There may be a fancier way to dynamically choose\n # between incoming fields, but having 2 triggers fns seems straightforward\n\n def statement_trigger_fn(id_field):\n fn = f\"{fn_prefix}_{id_field}\"\n\n return f\"\"\"\n CREATE OR REPLACE FUNCTION {fn}()\n RETURNS TRIGGER\n LANGUAGE 'plpgsql'\n AS $BODY$\n BEGIN\n INSERT INTO history_audit (history_id, update_time)\n SELECT DISTINCT {id_field}, CURRENT_TIMESTAMP AT TIME ZONE 'UTC'\n FROM new_table\n WHERE {id_field} IS NOT NULL\n ON CONFLICT DO NOTHING;\n RETURN NULL;\n END;\n $BODY$\n \"\"\"\n\n def row_trigger_fn(id_field):\n fn = f\"{fn_prefix}_{id_field}\"\n\n return f\"\"\"\n CREATE OR REPLACE FUNCTION {fn}()\n RETURNS TRIGGER\n LANGUAGE 'plpgsql'\n AS $BODY$\n BEGIN\n INSERT INTO history_audit (history_id, update_time)\n VALUES (NEW.{id_field}, CURRENT_TIMESTAMP AT TIME ZONE 'UTC')\n ON CONFLICT DO NOTHING;\n RETURN NULL;\n END;\n $BODY$\n \"\"\"\n\n def statement_trigger_def(source_table, id_field, operation, when=\"AFTER\", function_keyword=\"FUNCTION\"):\n fn = f\"{fn_prefix}_{id_field}\"\n\n # Postgres supports many triggers per operation/table so the label can\n # be indicative of what's happening\n label = f\"history_audit_by_{id_field}\"\n trigger_name = get_trigger_name(label, operation, when, statement=True)\n\n return f\"\"\"\n CREATE TRIGGER {trigger_name}\n {when} {operation} ON {source_table}\n REFERENCING NEW TABLE AS new_table\n FOR EACH STATEMENT EXECUTE {function_keyword} {fn}();\n \"\"\"\n\n def row_trigger_def(source_table, id_field, operation, when=\"AFTER\", function_keyword=\"FUNCTION\"):\n fn = f\"{fn_prefix}_{id_field}\"\n\n label = f\"history_audit_by_{id_field}\"\n trigger_name = get_trigger_name(label, operation, when, statement=True)\n\n return f\"\"\"\n CREATE TRIGGER {trigger_name}\n {when} {operation} ON {source_table}\n FOR EACH ROW\n WHEN (NEW.{id_field} IS NOT NULL)\n EXECUTE {function_keyword} {fn}();\n \"\"\"\n\n # pick row or statement triggers depending on postgres version\n version = engine.dialect.server_version_info[0]\n trigger_fn = statement_trigger_fn if version > 10 else row_trigger_fn\n trigger_def = statement_trigger_def if version > 10 else row_trigger_def\n # In the syntax of CREATE TRIGGER, the keywords FUNCTION and PROCEDURE are equivalent,\n # but the referenced function must in any case be a function, not a procedure.\n # The use of the keyword PROCEDURE here is historical and deprecated (https://www.postgresql.org/docs/11/sql-createtrigger.html).\n function_keyword = \"FUNCTION\" if version > 10 else \"PROCEDURE\"\n\n for id_field in [\"history_id\", \"id\"]:\n sql.append(trigger_fn(id_field))\n\n for source_table, id_field in trigger_config.items():\n for operation in [\"UPDATE\", \"INSERT\"]:\n sql.append(trigger_def(source_table, id_field, operation, function_keyword=function_keyword))\n\n return sql", "def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS rsvps (\n creatd_date varchar,\n meetup integer,\n user_id integer,\n response varchar,\n PRIMARY KEY(meetup,user_id)\n )\"\"\")\n database.connection.commit()", "def run(self):\n with self.get_connection() as conn:\n #self._create_new_batch_and_step_audit(conn)\n \"\"\"Kick off the transformation job.\"\"\"\n if self.run_all_steps:\n self.run_all()\n else:\n self.skipped_steps = []\n self.skipped_tables = []\n step_runner = self.step_lambda_map[self.etl_step]\n step_runner()", "def force_update():\n # TODO: IS THERE A WAY TO ONLY REFRESH FOR A GIVEN YEAR?\n # TODO: FIND A WAY TO DO THIS ASYNCHRONOUSLY\n print('Starting update...')\n # TODO: THIS IS A PRETTY BAD WORKAROUND. WE SHOULD FIND A WAY TO PROVIDE THE SCRIPTS WITH THE 'LANDTAGSWAHLDB' PACKAGE\n sql_path = pathlib.Path(current_app.instance_path).parent.parent / 'sql-scripts' / 'UpdateViews.sql'\n with open(sql_path) as sql_file:\n script = sql_file.read()\n db = db_context.get_db()\n db.run_script(script)\n db.commit()\n return 'Success'", "def finish_translation_list_update():\n\n with _conn.cursor() as cur:\n cur.execute(\"ALTER TABLE translation_stats ENABLE TRIGGER update_translation_stats_timestamp;\")\n _conn.commit()", "def run(self, edit):\n logger.debug('Schema Hacker: open file')\n for sel in self.view.sel():\n schema, tbl = get_names(self.view, sel)\n schema = schema or 'public'\n folders = self.view.window().folders()\n script_path = ['misc', 'create_insert_function.pl']\n script = find_file(folders, script_path)[0]\n env = os.environ.copy()\n for key, val in (settings.get('env_var') or {}).items():\n env[key] = val\n fnc = subprocess.check_output(\n ['/usr/bin/perl', script, schema, tbl], env=env\n ).decode('utf8')\n file_name = fnc[\n fnc.index('CREATE OR REPLACE FUNCTION ') + 27 : fnc.index('(')\n ].lower() + '.sql'\n main_dir = os.path.dirname(os.path.dirname(script))\n fnc_path = os.path.join(main_dir, schema, 'FUNCTIONS', file_name)\n # Else: Create the file and open it\n if os.path.isfile(fnc_path):\n # Open existing file and put generated func in output panel\n view = self.view.window().open_file(fnc_path)\n panel = self.view.window().create_output_panel('schema_hacker')\n panel.run_command('append', {'characters': fnc, 'pos': 0})\n self.view.window().run_command(\n 'show_panel', {'panel': 'output.schema_hacker'})\n else: # Create function file and then open it\n with open(fnc_path, mode='w') as file:\n file.write(fnc)\n view = self.view.window().open_file(fnc_path)", "def trusted_zone(engine_db, table: str):\n schema = 'trusted'\n drop_old_table = f\"DROP TABLE IF EXISTS {schema}.{table};\"\n new_table = f\"\"\"\n CREATE TABLE {schema}.{table} AS \n SELECT * \n FROM raw.{table} \n WHERE refdate = (SELECT MAX(refdate) FROM raw.{table}) \n AND reftime = (SELECT MAX(reftime) FROM raw.{table})\n \"\"\"\n\n engine_db.execute(drop_old_table)\n engine_db.execute(new_table)\n print(f\"===> Success to save {schema}.{table}.\")", "def structure_and_repopulate_db() -> None:\n with open('db.sql', encoding=\"utf-8\") as f:\n commands = f.read().strip().split(';')\n commands = [command.strip() for command in commands]\n for command in commands:\n my_cursor.execute(command)\n my_db.commit()\n print('Source structure created, data repopulated')", "def store_transformation_into_DB(\n transID: int,\n baseNode: Node,\n addedNodes: List[Node],\n replacedNodes: List[Node],\n cursor: sqlite3.Cursor,\n fullScopeName: str,\n) -> None:\n\n cursor.execute(\n \"\"\"INSERT INTO Log_Transformation VALUES (\n ?,\n 'OPERATOR_BASE',\n ?,\n ?,\n ?\n )\"\"\",\n (transID, baseNode.get_name(), baseNode.get_kind_name(), fullScopeName),\n )\n for an in addedNodes:\n cursor.execute(\n \"\"\"INSERT INTO Log_Transformation VALUES (\n ?,\n 'ADD_OPERAND',\n ?,\n ?,\n ?\n )\"\"\",\n (transID, an.get_name(), an.get_kind_name(), fullScopeName),\n )\n\n for rn in replacedNodes:\n cursor.execute(\n \"\"\"INSERT INTO Log_Transformation VALUES (\n ?,\n 'REMOVE_OPERAND',\n ?,\n ?,\n ?\n )\"\"\",\n (transID, rn.get_name(), rn.get_kind_name(), fullScopeName),\n )", "def db_migrate():\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_UP.format(f\"upgrade-{when}\", when, MIGRATION_TABLE)\n down = MYSQL_DOWN.format(f\"downgrade-{when}\", when, MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: {os.path.join('migrations', sql_file)}\")" ]
[ "0.63391304", "0.55349416", "0.5524126", "0.5391393", "0.5282291", "0.5150908", "0.51406926", "0.5104045", "0.5097644", "0.5060103", "0.50325495", "0.50310206", "0.5027701", "0.49788183", "0.4962675", "0.4934943", "0.49243897", "0.490533", "0.48683342", "0.4866766", "0.48619705", "0.48227054", "0.4817434", "0.4810267", "0.48020527", "0.47991022", "0.47874495", "0.47832236", "0.4759482", "0.47324276" ]
0.6584904
0
Players may join a game of Pig
def test_join(self): pig = game.pig.Pig('PlayerA', 'PlayerB', 'PlayerC') self.assertEqual(pig.get_players(), ('PlayerA', 'PlayerB', 'PlayerC'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enough_players():\n return True", "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))", "def join_game(game):\n game = int(game)\n if 0 > game or game > len(games):\n return \"Not a valid gameBike\"\n if games.join_game(game):\n return \"Registration done\"\n else:\n return \"Not valid registration\"", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def playerCanPlay(game, situation, player):\r\n return True", "async def join(self, ctx):\n if lobby.count(f\"{ctx.author.mention}\") == 0:\n add(lobby, ctx.author.mention)\n await ctx.channel.send(\"You've been added to the queue!\")\n else:\n await ctx.channel.send(\"You're already queued for a match!\")\n await ctx.channel.send(embed=lobby_list())\n if len(lobby) == teamSizeMax:\n if roster:\n await ctx.channel.send(\n \"There is currently a match being picked right now, please try again after picking is finished\")\n else:\n assign_captains()", "async def join(self, interaction: discord.Interaction, button: discord.ui.Button):\n\t\tif interaction.user.id == self.ctx.author.id:\n\t\t\tawait interaction.response.send_message(\n\t\t\t\tcontent='You have already joined the game. You can add AI players or start the game early with the other two buttons.',\n\t\t\t\tephemeral=True,\n\t\t\t)\n\t\t\treturn\n\t\tself.players.append(interaction.user)\n\t\tself.start.disabled = False\n\t\tif len(self.players) >= self.max_players:\n\t\t\tview = None\n\t\t\tself.stop()\n\t\telse:\n\t\t\tview = self\n\t\tawait interaction.response.edit_message(content=self.generate_message(), view=view)", "def do_join_game(self):\n\t\titem = self.li_servers.get_selected()[0]\n\n\t\tself.nickname = self.e_nickname.text\n\t\tself.server_uuid = item.server.uuid\n\t\tself.game_name = item.server.name\n\t\tself.num_players = item.server.num_players\n\t\tself.boardsize = item.server.boardsize\n\n\t\td = {\"state\": be.S_JOIN,\n\t\t\t\t\"uuid\": self.server_uuid,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def can_exist_outside_of_game(self):\n return True", "def can_exist_outside_of_game(self):\n return True", "def player_join(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tplayer_ID = args[0] # IndexError\r\n\t\t\tteam_name = args[1] # IndexError\r\n\t\t\tteam_type = self.team_get_type_by_name(team_name) # ValueError\r\n\t\texcept IndexError:\t# Invaild arguments\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"The arguments for join the game are invaild.\")\r\n\t\texcept ValueError:\t# Invaild team name\r\n\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\"Specified team name {0} is not found.\".format(team_name))\r\n\t\telse:\r\n\t\t\t# If the player has already joined\r\n\t\t\tif self._teammates.get(player_ip) is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"IP {0} has already joined the game.\".format(player_ip))\r\n\t\t\t\treturn\r\n\r\n\t\t\t# Check if the player ID is used in the team\r\n\t\t\tplayer_info = self._teams[team_type].get_player_info_by_ID(player_ID)\r\n\t\t\tif player_info is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"join fail\")\r\n\t\t\t\t_logger.error(\"player-join: \" \\\r\n\t\t\t\t\t\"Player \\\"{0}\\\" is already in the team.\".format(player_ID))\r\n\t\t\t\treturn\r\n\r\n\t\t\tplayer_info = self._teams[team_type] \\\r\n\t\t\t\t.add_player_info(player_ip, player_ID, team_name)\r\n\r\n\t\t\tself._teammates[player_ip] = team_type\r\n\t\t\tself._handlers[\"player-join\"].invoke(player_info, team_type)\r\n\r\n\t\t\tself._comm_server.send_message(player_ip, \"join ok\")\r\n\r\n\t\t\t_logger.info(\"Player \\\"{0}\\\" from {1} joins the team \\\"{2}\\\".\" \\\r\n\t\t\t\t.format(player_info.ID, player_info.IP, player_info.team_name))", "def join_in_play(self):\n return self._join_in_play", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def _join(self, req):\n orig_game = None\n if self.game:\n orig_game = self.game\n game_id = req.pop(0)\n self.game, self.player = self.server.join_game(game_id, self)\n if orig_game:\n orig_game.leave(self)", "def collision(cubes, player):\n if player in cubes:\n return True\n else:\n return False", "def join_game(players_cursor, states_cursor, user, room_id):\n # Make sure player isn't already in the game\n joined_query = '''SELECT * FROM players_table WHERE user = ? AND room_id = ?;'''\n joined = players_cursor.execute(joined_query, (user, room_id)).fetchall()\n if len(joined) > 0:\n # TODO: Return proper message for already in game\n raise KeyError\n\n # Check if the game is already full\n players_query = '''SELECT * FROM players_table WHERE room_id = ?;'''\n players = players_cursor.execute(players_query, (room_id,)).fetchall()\n if len(players) == MAX_PLAYERS:\n # TODO: Return proper message for joining full game\n raise ValueError\n\n # Since the game is not full, add the player to the game\n insert_player = '''INSERT into players_table VALUES (?,?,?,?,?,?,?);'''\n players_cursor.execute(insert_player,\n (user, STARTING_STACK, 0, 0, \"\", len(players), room_id))\n \n FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))", "def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def check_trying_using(self):\r\n if self.opportunity or 'key' in inventory:\r\n if self.rect.colliderect(player):\r\n music_acceptor.usingPortalSound()\r\n player.rect.x = random.randrange(75, WIDTH - 125)\r\n player.rect.y = random.randrange(25, HEIGHT - 100)", "def gameOver(self):\n\t\treturn self.lives == 0", "def player_collision(self, player):\n return True", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "async def join(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if player.is_connected:\n return await ctx.send(\"I'm already in a voice channel :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"You are not in a voice channel :no_entry:\")\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n await ctx.send(\"Summoned to `{}` <:done:403285928233402378>\".format(ctx.author.voice.channel.name))", "async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)", "def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True", "def join(self, game):\n self.game = game\n self.game.join(self)\n return self.game", "def is_game_won(self):\n return True", "async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()", "async def ign_whoplays(self, ctx, game):\n supported_game = self.get_supported_game(game)\n if not supported_game:\n return await self.bot.say(self.format_supported_games())\n\n gamers = {}\n for player, games in self.names.items():\n if supported_game in games.keys():\n gamers.setdefault(player, games[supported_game])\n\n msg = \"Here are the members that play {0}:\\n\".format(bold(supported_game))\n for discord_member, ign in gamers.items():\n msg += \"{0}\\t\\t{1}\\n\".format(discord_member, italics(ign))\n await self.bot.say(msg)" ]
[ "0.69568753", "0.6629181", "0.6586598", "0.6556356", "0.6515452", "0.64138293", "0.6406495", "0.63846964", "0.63828003", "0.6336657", "0.6336657", "0.62983274", "0.62676483", "0.6216054", "0.6119754", "0.6101144", "0.6100801", "0.6068492", "0.6054281", "0.60209066", "0.6003471", "0.59735465", "0.5925612", "0.5922546", "0.5919751", "0.590872", "0.5902364", "0.5899153", "0.5898913", "0.5898498" ]
0.7154914
0
db cursor as a "self closing" context manager
def cursor(cls) -> Iterator[sqlite3.Cursor]: with closing(cls.db.cursor()) as cur: yield cur
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cursor(self):\n with self.connection() as conn:\n cursor = conn.cursor(prepared=True)\n try:\n yield cursor\n finally:\n cursor.close()", "def cursor():\n dbh = handle()\n return dbh.cursor()", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def with_cursor(fn):\n @functools.wraps(fn)\n def wrapped_fn(self, *args, **kwargs):\n with contextlib.closing(self.connect()) as connection:\n with contextlib.closing(connection.cursor()) as cursor:\n fn(self, cursor, *args, **kwargs)\n return wrapped_fn", "def getCursor(self) -> sqlite3:\n return self.cursor", "def close(self):\r\n if self.cursor is None:\r\n return\r\n\r\n self.cursor.close()\r\n self.cursor = None\r\n self.app_id = None\r\n self.iden_id = None", "def close(self):\n# self.cursor.close()\n\tself.db.close()", "def cursor(self):\r\n if self._closed:\r\n raise Error('The connection to the database has been closed.')\r\n return Cursor(self)", "def __enter__(self) -> 'DBcursor':\n self.conn = connector.connect(**self.dbconfig)\n self.cursor = self.conn.cursor()\n return self.cursor", "def close_cursor(self, cursor=None):\n if cursor != None:\n cursor.close()\n else:\n self.cur.close()\n\n self.dbcon.commit()", "def close(conn, cursor):\n conn.commit()\n cursor.close()\n conn.close()", "def managed_cursor(self, cursor_factory=None):\n\n self.conn_url = (f'postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}')\n self.conn = psycopg2.connect(self.conn_url)\n self.conn.autocommit = True\n self.curr = self.conn.cursor(cursor_factory=cursor_factory)\n try:\n yield self.curr\n finally:\n self.curr.close()\n self.conn.close()", "def _closeConnection(cursor, db):\n cursor.commit()\n cursor.close()\n db.close()", "def _close_cursor(self, cursor):\n\t\twith warnings.catch_warnings():\n\t\t\twarnings.simplefilter(\"ignore\")\n\t\t\tconnection = cursor.connection\n\t\tcursor.close()\n\t\tconnection.close()", "def close(cursor, conn):\n cursor.close()\n conn.close()", "def __enter__(self) -> 'cursor':\n self.conn = cx_Oracle.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor", "def __enter__(self):\n return self.cursor", "def dbcursor(self):\n return self.__dbcursor", "def close(self):\n self.cursor.close()", "def cursor(self):\n return self._conn.cursor()", "def close(connection, cursor):\n cursor.close()\n connection.close()", "def __cursor(cls):\n print('|-- Richiesta cursore da:'+str(cls.__dbCon))\n return cls.__dbCon.cursor( cursor_factory = psycopg2.extras.DictCursor )", "def close_db_while_running(self):\n self.close()\n try:\n yield\n finally:\n self.dismod_file.engine = get_engine(self._filename)", "def close(self):\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n if self._closed:\r\n raise Error('The cursor has already been closed.')\r\n else:\r\n self._closed = True", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()", "def get_cursor(self):\n self.cur = self.dbcon.cursor()\n return self.cur", "def _cursor(self):\n cursor = self.conn.cursor()\n\n return cursor" ]
[ "0.7609911", "0.7451035", "0.73391515", "0.73391515", "0.7098719", "0.70769083", "0.7069774", "0.70652825", "0.70240337", "0.69698375", "0.6927731", "0.69276625", "0.6879431", "0.6833794", "0.6830395", "0.6811575", "0.6784924", "0.6755978", "0.67383385", "0.6733037", "0.67103493", "0.67053646", "0.66713184", "0.65870523", "0.65633297", "0.65612435", "0.65612435", "0.65612435", "0.6552534", "0.6548891" ]
0.77426666
0
Get the available pollutants per country from the summary.
def pollutants_per_country(cls) -> dict[str, dict[str, int]]: with cls.cursor() as cur: cur.execute( "SELECT country_code, pollutant, pollutant_id FROM summary" ) output: dict[str, dict[str, int]] = defaultdict(dict) for country_code, pollutant, pollutant_id in cur: output[country_code][pollutant] = pollutant_id return dict(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_top_country(self):\n tabular_format_countries_list = [['Canada', 66, '20'], ['United States', 33, '10']]\n\n result = InstallationStatistics.get_statistics_top_country(tabular_format_countries_list)\n\n self.assertEqual('Canada', result)", "def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)", "def get_possible_preferences():\n df = pd.read_csv(\"../data/restaurant_info.csv\")\n foods = set(list(df[\"food\"]))\n areas = set(list(df[\"area\"].dropna()))\n price_ranges = set(list(df[\"pricerange\"]))\n\n return foods, areas, price_ranges", "def getCases(self, country):\n return self.cases_per_country[country]", "def get_available_polls(game_type_id):\n\n poll_response = requests.get(\n url=f'{settings.GAME_SETUP_URL}/all-polls/{game_type_id}/',\n timeout=5 # in sec\n )\n if poll_response.status_code == 200:\n return poll_response.json()\n return {}", "def topBrandsandCountries(df, countries_unique):\n top_countries = {}\n for x in countries_unique:\n if df[df.geo_country==x].device_brand_name.count() > 500:\n top_countries[x] = df[df.geo_country==x].device_brand_name.count()\n\n top_3_brands = ['Apple','Samsung','Huawei']\n\n apple = []\n samsung = []\n huawei = []\n for x in top_countries.keys():\n apple.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[0]].device_brand_name.count())\n samsung.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[1]].device_brand_name.count())\n huawei.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[2]].device_brand_name.count()) \n\n return top_countries,apple,samsung,huawei", "def get_listings_for_country(self, country, per_month=True):\n assert isinstance(country, str) and country in self.countries\n\n return self.get_listings_for_city(self.country_to_city_map[country], per_month)", "def get_country_distribution(self):\n countries = []\n probabilities = []\n rows = self.cursor.execute(\"SELECT * FROM country ORDER BY id\")\n for row in rows:\n countries.append(int(row['id']))\n probabilities.append(float(row['probability']))\n \n return [rows, countries, probabilities]", "def AllCountries():\n print(\"TOUS LES PAYS\\n\")\n for x in countries:\n nom = countries[x]['name']\n capital = countries[x]['capital']\n continent = countries[x]['location']\n independance = countries[x]['independance']\n president = countries[x]['president']\n langue = countries[x]['langue']\n superficie = countries[x]['superficie']\n haditant = countries[x]['population']\n pib = countries[x]['pib']\n\n print(f\"Nom ==> {nom}\")\n print(f\"Capital ==> {capital}\")\n print(f\"Continent ==> {continent}\")\n print(f\"Date Independance ==> {independance}\")\n print(f\"Nom President Actuel ==> {president}\")\n print(f\"Langue Offielle ==> {langue}\")\n print(f\"Superficie ==> {superficie}\")\n print(f\"Nombre d'habitants ==> {haditant}\")\n print(f\"PIB ==> {pib}\")", "def summary_table(countries: List[str]):\n \n df_list = []\n \n for country in countries:\n acceleration_figures = acceleration(country)\n pop = COUNTRY_DATA[country]['population']\n df_list.append(\n [\n country,\n COUNTRY_DATA[country]['data'].confirmed[-1],\n int(acceleration_figures[0] * pop),\n COUNTRY_DATA[country]['data'].deaths[-1],\n int(acceleration_figures[1] * pop),\n ]\n )\n\n return df_list", "def get_country(self, data: dict):\n country_entries = data.get(\"P27\")\n if country_entries is None or len(country_entries) == 0:\n country_entries = data.get(\"P19\")\n if country_entries is None or len(country_entries) == 0:\n return [{\"country\": \"Unknown\", \"region\": \"Unknown\"}]\n countries = []\n for entry in country_entries:\n country = entry.get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n countries.append(self._reference.get_country(country))\n return countries", "def getAllCases(self):\n return self.cases_per_country", "def _get_pollutants_for_vehicle(self):\n from . import session\n\n category = models.Category.get_for_type(self._vehicle)\n if not category:\n raise ValueError(\"Unable to find Category for vehicle: {}\".format(category))\n\n fuel = session.query(models.Fuel).filter_by(name=self._vehicle.fuel_type).first()\n if not fuel:\n raise ValueError(\"Unable to find Fuel in database: name={}\".format(self._vehicle.fuel_type))\n\n segment = session.query(models.Segment).filter_by(name=self._vehicle.segment).first()\n if not segment:\n raise ValueError(\"Unable to find segment in database: name={}\".format(str(self._vehicle.segment)))\n\n filter_parms = {\n \"category\": category,\n \"fuel\": fuel,\n \"segment\": segment\n }\n\n euro_std = session.query(models.EuroStd).filter_by(name=self._vehicle.euro_std).first()\n if euro_std:\n filter_parms.update({\"eurostd\": euro_std})\n\n mode = session.query(models.Mode).filter_by(name=self._vehicle.mode).first()\n if mode:\n filter_parms.update({\"mode\": mode})\n\n if self._vehicle.load > -1.0:\n filter_parms.update({\"load\": self._vehicle.load})\n\n # Get Parameters based on the other items found above\n params = session.query(models.Parameter).filter_by(**filter_parms)\n return params.all()", "def get_population(country_name):\n country_population=set_country_populations_dict()\n country_data=country_population[country_name]\n population_data=country_data[0]\n return population_data", "def get_all_poll_data():\n\trcp_poll_race_dict = get_rcp_poll_data('http://www.realclearpolitics.com/epolls/latest_polls/') # realclearpolotics poll data\n\treturn rcp_poll_race_dict", "def french_habits_trackings():\n return analytics.select_rows(\n analytics.habits_trackings_table(),\n 0,\n 5)", "def get_locations_by_country(df, country):\n locations = list(df[df.country == country].location.values)\n return locations", "def pays():\r\n df = ouvrir_fichier()\r\n choix_pays = set(df['country'].tolist())\r\n return choix_pays", "def get_top_five_countries():\n countries=country_populations.split('\\n')\n top_5=[]\n count=0\n for country in countries:\n if count<6:\n data= country.split('\\t')\n top_5.append(data[1])\n count+=1\n top_5.remove('Country')\n return top_5", "def get_country_lessthan_percent(db, percent):\n # Hint: Find out countries that do contain percent population of total\n # population.\n # then exclude those countries using nested query.\n # refer to the example showed in class for nested query\n # Nested Query\n pass", "def get_available_companies_and_people(team):", "def countries(self, unit=None, units=None, timezone=None,\r\n rollup=None, limit=None, unit_reference_ts=None):\r\n params = base.get_params(None, locals())\r\n return self._get('countries', params)", "def lookup(self,user_preferences):\n res = list()\n \n fit_area = set()\n fit_price = set()\n fit_food = set()\n \n if user_preferences[0] == \"any\" or user_preferences[0] == 0:\n fit_area = set(range(len(self.area)))\n else:\n for i,a in enumerate(self.area):\n if a == user_preferences[0]:\n fit_area.add(i)\n if user_preferences[1] == \"any\" or user_preferences[1] == 0:\n fit_price = set(range(len(self.price_range)))\n else:\n for j,p in enumerate(self.price_range):\n if p == user_preferences[1]:\n fit_price.add(j)\n if user_preferences[2] == \"any\" or user_preferences[2] == 0:\n fit_food = set(range(len(self.food_types)))\n else:\n for k,f in enumerate(self.food_types):\n if f == user_preferences[2]:\n fit_food.add(k)\n option_numbers = fit_area.intersection(fit_price, fit_food)\n if option_numbers:\n for i in option_numbers:\n res.append(self.restaurant_names[i])\n \n return res", "async def get_hotels(query: str, limit: int, lang: str) -> list:\n data = {'query': query,\n 'lang': lang,\n 'lookFor': 'hotel',\n 'limit': limit}\n resp = await booking_instance.get_hotels(data)\n results = resp.get('results')\n return results.get('hotels')", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def get_top_nationalities(result, n=5):\n nat_freq=pd.DataFrame(result['country'].value_counts())\n ratios=nat_freq[:n]/nat_freq.sum()*100\n res='The most common visitors are from'\n for i in range(0,len(ratios)):\n if i!=len(ratios)-1:\n res=res+f' {ratios.index[i]} ({np.round(ratios.country[i],2)}%),'\n else:\n res=res+f' and {ratios.index[i]} ({np.round(ratios.country[i],2)}%).'\n return res", "def get_country_data():\n\n parser = argparse.ArgumentParser(\n description='Retrieve aggregated stats by aggregation type, metric, and region.',\n )\n parser.add_argument(\n '--aggregation',\n required=True,\n choices=[\n 'avg',\n 'count',\n 'max',\n 'min',\n 'sum',\n ],\n help='Aggregation type',\n )\n parser.add_argument(\n '--field',\n required=True,\n choices=[\n 'area',\n 'borders',\n 'countries',\n 'currencies',\n 'gini',\n 'languages',\n 'latlng',\n 'population',\n ],\n help='Metric to aggregate',\n )\n parser.add_argument(\n '--by',\n required=True,\n choices=[\n 'region',\n 'subregion',\n ],\n help='Field to group aggregates by',\n )\n\n args = parser.parse_args()\n params = {\n 'aggregation': args.aggregation,\n 'field': args.field,\n 'by': args.by,\n }\n return process_aggregation_request(params)", "def pollutants(cls) -> dict[str, str]:\n\n with cls.cursor() as cur:\n cur.execute(\"SELECT pollutant, pollutant_id FROM pollutants;\")\n return dict(cur.fetchall())", "def test_all_available(self):\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(context[\"username\"], self.user)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(context[\"display_first_week\"], True)\n\n self.assertEqual(bookings[0].calendar_week,\n self.current_week.calendar_week)\n self.assertEqual(bookings[1].calendar_week,\n self.current_week.calendar_week + 1)\n self.assertEqual(bookings[2].calendar_week,\n self.current_week.calendar_week + 2)\n self.assertEqual(bookings[3].calendar_week,\n self.current_week.calendar_week + 3)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)", "def get_all_jh_countries():\n download_jh_data()\n file_path = os.path.join(JH_DATA_DIR, \"covid_confirmed.csv\")\n data = pd.read_csv(file_path)\n countries = data[\"Country/Region\"].to_list()\n countries = list(dict.fromkeys(countries))\n return countries" ]
[ "0.58904153", "0.56610507", "0.5657063", "0.54212266", "0.54170454", "0.53987294", "0.53270453", "0.5311308", "0.52836245", "0.5188203", "0.51842904", "0.51799196", "0.5174705", "0.516637", "0.51484", "0.5143588", "0.5081845", "0.5025447", "0.50168353", "0.49986464", "0.499537", "0.4989885", "0.49650738", "0.4961277", "0.4951884", "0.49505252", "0.4947596", "0.4943553", "0.49393883", "0.4937178" ]
0.6624939
0
Darkens the given color by multiplying luminosity by the given amount. Input can be matplotlib color string, hex string, or RGB tuple.
def darken_color(color, amount=0.5): import matplotlib.colors as mc import colorsys try: c = mc.cnames[color] except: c = color c = colorsys.rgb_to_hls(*mc.to_rgb(c)) return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_lightness(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])", "def _lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n c = color\n amount += 0.5\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def darken(color):\n hue, saturation, value = rgb_to_hsv(color.red, color.green, color.blue)\n value /= 1.5\n saturation /= 1.25\n return hsv_to_rgb(hue, saturation, value) + (color.alpha,)", "def darken(hex_color: str) -> str:\n amount = 0.2\n hex_color = hex_color.replace(\"#\", \"\")\n red = max(0, int(hex_color[0:2], 16) - int(255 * amount))\n green = max(0, int(hex_color[2:4], 16) - int(255 * amount))\n blue = max(0, int(hex_color[4:6], 16) - int(255 * amount))\n darker_color = (\n \"#%s\" % hex(red)[2:].zfill(2) + hex(green)[2:].zfill(2) + hex(blue)[2:].zfill(2)\n )\n return darker_color", "def saturate(self, amount):\n h, l, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n\n s = s + amount\n\n if s < 0.0:\n s = 0.0\n if s > 1.0:\n s = 1.0\n\n r, g, b = colorsys.hls_to_rgb(h, l, s)\n return Color(from_rgba=(c(r), c(g), c(b), c(self.a)))", "def lighten(self, amount):\n h, light, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n\n light = light + amount\n\n if light < 0.0:\n light = 0.0\n if light > 1.0:\n light = 1.0\n\n r, g, b = colorsys.hls_to_rgb(h, light, s)\n return Color(from_rgba=(c(r), c(g), c(b), c(self.a)))", "def adjust_saturation(self, amount):\n h, s, v = self.as_hsv()\n s *= 1.0 + amount\n return ScreenColor.from_hsv(h, s, v)", "def light(color, dist):\n return tuple( float(x*dist*dist) for x in color )", "def led(color: Tuple[int, int, int], /) -> None:", "def dark(r, d):\n return d * 1.0 / (r + d) + d * r * 1.0 / ((r + d) ** 2)", "def multiply_rgb(color, alter):\n rgb = color[1:]\n chunks, chunk_size = len(rgb), len(rgb)/3\n r, g, b = [ int(int('0x%s' % rgb[i:i+chunk_size], 0) * alter) for i in range(0, chunks, chunk_size) ]\n return '#%.2x%.2x%.2x' % (r, g, b)", "def darker(image):\r\n # Demonstrate looping over all the pixels of an image,\r\n # changing each pixel to be half its original intensity.\r\n for pixel in image:\r\n pixel.red = pixel.red // 2\r\n pixel.green = pixel.green // 2\r\n pixel.blue = pixel.blue // 2", "def lightness(color):\n\n strongest = max(color.red, color.green, color.blue)\n weakest = min(color.red, color.green, color.blue)\n return 0.5 * (strongest + weakest) / 255", "def LightContrastColour(c):\r\n\r\n amount = 120\r\n\r\n # if the colour is especially dark, then\r\n # make the contrast even lighter\r\n if c.Red() < 128 and c.Green() < 128 and c.Blue() < 128:\r\n amount = 160\r\n\r\n return StepColour(c, amount)", "def luminance(self, color):\n return 0.2426 * color[2] + 0.7152 * color[1] + 0.0722 * color[0]", "def modify_color(color, sigma):\n\t# get values of individual colors, convert to hex integers, and modify\n\tred, green, blue = (vary(int(color[i:i+2], 16), sigma) for i in (1, 3, 5))\n\t# return combined hex representation of new color values\n\treturn '#%02X%02X%02X' % (red, green, blue)", "def dark_color(color):\n rgb = hex_to_rgb(color)\n if rgb:\n return rgb_to_hls(*rgb)[1] < 128\n else: # default to False\n return False", "def led(color: int, /) -> None:", "def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)", "def color_val_matplotlib(color):\n color = mmcv.color_val(color)\n color = [color / 255 for color in color[::-1]]\n return tuple(color)", "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n color = _color_to_rgb(color, input)\n gray = \"#222222\"\n colors = [color, gray] if reverse else [gray, color]\n return blend_palette(colors, n_colors, as_cmap)", "def offsetColor(self, input_color, amount, clamp=None):\n\t\tif amount == 0: # Do nothing\n\t\t\treturn input_color\n\n\t\telif amount > 0: # Lighten\n\t\t\tif clamp is None:\n\t\t\t\tmin_clamp = 0\n\t\t\telse:\n\t\t\t\tmin_clamp = clamp\n\t\t\tmax_clamp = 255\n\n\t\telif amount < 0: # Darken\n\t\t\tmin_clamp = 0\n\t\t\tif clamp is None:\n\t\t\t\tmax_clamp = 255\n\t\t\telse:\n\t\t\t\tmax_clamp = clamp\n\n\t\tlum = max(min_clamp, min(input_color.lightness()+amount, max_clamp))\n\t\treturn QtGui.QColor(lum, lum, lum)", "def reduceColor(channel,levels):\n channel = channel.astype(numpy.double)\n return cutils.reduceColor(channel,levels)", "def lighten(color):\n hue, saturation, value = rgb_to_hsv(color.red, color.green, color.blue)\n value = 1 - (1 - value) / 1.5\n if saturation:\n saturation = 1 - (1 - saturation) / 1.25\n return hsv_to_rgb(hue, saturation, value) + (color.alpha,)" ]
[ "0.725701", "0.70771843", "0.7057821", "0.7057821", "0.7057821", "0.7057821", "0.7057821", "0.7057821", "0.6648363", "0.6628755", "0.65275615", "0.6517876", "0.65105426", "0.62327015", "0.61098164", "0.6095843", "0.6092338", "0.60866326", "0.6059547", "0.6035964", "0.6009311", "0.59968716", "0.5986704", "0.5983439", "0.59522843", "0.59234893", "0.58934516", "0.58669573", "0.5848537", "0.5828619" ]
0.81315416
0
Function needed for backwards compatibility with the old "col" argument in plt functions. It returns the default color 'C0' if both arguments are None. If 'color' is not None, it always uses that. If 'color' is None and 'col' is an integer, it returns the corresponding 'CN' color. If 'col' is neither None nor integer, an error is raised.
def get_color(col, color): if color is None and col is None: return 'C0' if col is None: return color if not isinstance(col, int): raise ValueError("`col` must be an integer. Consider using `color` instead.") return 'C{}'.format(col)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _color_for_labels(label_color, default_color, seq_index):\n if label_color is None:\n if hasattr(default_color, '__getitem__'):\n c = default_color[seq_index]\n else:\n c = default_color\n else:\n c = label_color\n\n return c or 'black'", "def getColor(col=None,caption=None):\n if type(col) == tuple:\n col = QtGui.QColor.fromRgb(*col)\n else:\n col = QtGui.QColor(col)\n dia = QtGui.QColorDialog\n #myButton = QtGui.QPushButton('MY')\n #dia.layout()\n col = dia.getColor(col)\n if col.isValid():\n return str(col.name())\n else:\n return None", "def shortcolour(c):\n return c if c == \"none\" or c[0] == 'u' else repr2col(col2repr(c))", "def _get_color(self, color_name):\n if not color_name:\n return 0\n\n if color_name == 'ORANGE':\n color = self.COLOR_ORANGE\n else:\n color = getattr(curses, 'COLOR_' + color_name)\n return curses.color_pair(color)", "def IntToColor(number):\n color = COLORS_INDEX.get(number)\n return color if color else 'default'", "def color(self) -> Optional[str]:\n return self.colour", "def uniqueish_color(color_data):\n # return plt.cm.gist_ncar(color_data)\n # return plt.cm.binary(color_data)\n return plt.cm.bwr(color_data)", "def get_color(key):\n if _with_colors:\n return _color_map.get(key, None)\n return None", "def pickAColor():\n color = _tkCall(tkColorChooser.askcolor)\n if color[0] != None:\n return Color(color[0][0], color[0][1], color[0][2])", "def fl_bk_color(colr):\n _fl_bk_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_bk_color\",\\\n None, [xfdata.FL_COLOR],\\\n \"\"\"void fl_bk_color(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n _fl_bk_color(ul_colr)", "def get_color(self) -> Optional[str]:\n return self.color", "def getColorFlag(color):\n if color == 0: # MONO\n return 0\n elif color == 1: # BAYER\n return -1\n elif color == 2: # AS IS RBG\n return 1", "def label_color(label):\n if label < len(colors):\n return colors[label]\n else:\n warnings.warn('Label {} has no color, returning default.'.format(label))\n return (0, 255, 0)", "def get_color(node, color_map):\r\n if node in color_map:\r\n return color_map[node]\r\n return \"black\"", "def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"", "def noise_or_color(col, noise) :\n if random.random() < noise :\n red = (col[0] + random.randrange(-50,50)) % 256\n green = (col[1] + random.randrange(-50,50)) % 256\n blue = (col[2] + random.randrange(-50,50)) % 256\n return (red,green,blue)\n else :\n return col", "def add_colours(self, colour):\n col = np.zeros(self.size)\n \n is_cen = self.get(\"is_cen\")\n is_sat = self.get(\"is_sat\")\n abs_mag = self.get(\"abs_mag\")\n z = self.get(\"zcos\")\n\n col[is_cen] = colour.get_central_colour(abs_mag[is_cen], z[is_cen])\n col[is_sat] = colour.get_satellite_colour(abs_mag[is_sat], z[is_sat])\n\n self.add(\"col\", col)", "def color_negative_red_positive_green(val):\n if val < 0:\n color = 'red'\n elif val > 0:\n color = 'green'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def fl_color(colr):\n _fl_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_color\",\\\n None, [xfdata.FL_COLOR],\\\n \"\"\"void fl_color(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n _fl_color(ul_colr)", "def defaultColor(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def get_continuous_color(colorscale, intermed):\r\n if len(colorscale) < 1:\r\n raise ValueError(\"colorscale must have at least one color\")\r\n\r\n if intermed <= 0 or len(colorscale) == 1:\r\n return colorscale[0][1]\r\n if intermed >= 1:\r\n return colorscale[-1][1]\r\n\r\n for cutoff, color in colorscale:\r\n if intermed > cutoff:\r\n low_cutoff, low_color = cutoff, color\r\n else:\r\n high_cutoff, high_color = cutoff, color\r\n break\r\n\r\n # noinspection PyUnboundLocalVariable\r\n return plotly.colors.find_intermediate_color(\r\n lowcolor=low_color, highcolor=high_color,\r\n intermed=((intermed - low_cutoff) / (high_cutoff - low_cutoff)),\r\n colortype=\"rgb\")", "def which_color(node):\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))", "def colorize(lead, num, color):\n if num != 0 and ANSIBLE_COLOR and color is not None:\n return \"%s%s%-15s\" % (stringc(lead, color), stringc(\"=\", color), stringc(str(num), color))\n else:\n return \"%s=%-4s\" % (lead, str(num))", "def fl_get_icm_color(colr):\n _fl_get_icm_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_icm_color\",\\\n None, [xfdata.FL_COLOR, cty.POINTER(cty.c_int),\n cty.POINTER(cty.c_int), cty.POINTER(cty.c_int)],\\\n \"\"\"void fl_get_icm_color(FL_COLOR col, int * r, int * g, int * b)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n i_red, ptr_red = library.make_intc_and_pointer()\n i_green, ptr_green = library.make_intc_and_pointer()\n i_blue, ptr_blue = library.make_intc_and_pointer()\n library.keep_elem_refs(colr, ul_colr, i_red, i_green, i_blue, \\\n ptr_red, ptr_green, ptr_blue)\n _fl_get_icm_color(ul_colr, ptr_red, ptr_green, ptr_blue)\n return i_red.value, i_green.value, i_blue.value", "def _color_name(self, color_name):\n try:\n for color in self.config[\"Defaults\"][\"Colors\"]:\n if color == color_name:\n return self.config[\"Defaults\"][\"Colors\"][color]\n print(\n f\"ERROR: Color {color_name} not found in default color list, returning Black\"\n )\n return \"#000000\"\n except KeyError as e:\n print(f\"Error: {e}\")\n print(\n \"config.yml requires minimal config section, please see documentation\"\n )\n sys.exit(1)", "def default_colour(self):\n colour = self.DEFAULT_COLOUR\n return colour", "def fl_bk_textcolor(colr):\n _fl_bk_textcolor = library.cfuncproto(\n library.load_so_libforms(), \"fl_bk_textcolor\",\\\n None, [xfdata.FL_COLOR],\\\n \"\"\"void fl_bk_textcolor(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n _fl_bk_textcolor(ul_colr)", "def colour(self) -> Optional[str]:\n return self._colour", "def get_colorstring(color) -> str:\n return f\"#{int(color[0]*255):02x}{int(color[1]*255):02x}{int(color[2]*255):02x}\"", "def color(self):\n if \"color\" in self._prop_dict:\n return self._prop_dict[\"color\"]\n else:\n return None" ]
[ "0.69194543", "0.65208554", "0.61030096", "0.58912325", "0.5869798", "0.58372813", "0.5826115", "0.58039564", "0.57599896", "0.57347935", "0.5713629", "0.56824017", "0.5678672", "0.5661202", "0.5658636", "0.56532097", "0.5652371", "0.5651904", "0.56345254", "0.5624993", "0.56208843", "0.5608985", "0.56024593", "0.5564539", "0.55595183", "0.5559476", "0.55536944", "0.5544192", "0.55417484", "0.5530024" ]
0.8174092
0
Render suborganizations like it is originally made in collective.contact.core.
def render_original_suborgs(self): original_suborgs_view = self.context.restrictedTraverse('@@original-suborganizations') return original_suborgs_view()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organizations(self):\n self.elements('organizations')", "def sub_organization(self) -> object:\n return self._sub_organization", "def organization(request, id):\n org = get_object_or_404(Organization, pk=id)\n context = {\n 'org': org,\n 'cover': modify_image_url(str(org.cover), 'cover'),\n 'logo': modify_image_url(str(org.logo), 'logo'),\n 'mission': \"\",\n 'values': \"\",\n 'members': \"\",\n }\n\n context['mission'] = org.mission\n\n if org.value_set.exists():\n context['values'] = org.value_set.all()\n\n if org.membership_set.exists():\n membership = org.membership_set.all()\n context['members'] = []\n for member in membership:\n m = User.objects.get(pk=member.user_id)\n context['members'].append(m)\n\n return render(request, 'wantedly_app/org.html', context)", "def atlas_organizations():\n pass", "def test_templates_person_detail_organizations_empty(self):\n person = PersonFactory(should_publish=True)\n\n # The \"organizations\" section should not be present on the public page\n url = person.public_extension.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertContains(response, person.extended_object.get_title())\n self.assertNotContains(response, \"organization\")\n\n # But it should be present on the draft page\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n url = person.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertContains(response, person.extended_object.get_title())\n self.assertContains(response, \"organization-glimpse-list\")", "def organizations(self):\r\n return organizations.Organizations(self)", "def test_organizations_list(self):\n pass", "def organizations(self):\r\n return Organizations(self)", "def test_retrieve_l_organizations(self):\n pass", "def sub_organization(self, sub_organization: object):\n\n self._sub_organization = sub_organization", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def _createOrganizationsCollections(folder):\n collections = [\n {'id': 'all_orgs', 'tit': _('all_orgs'), 'subj': (u'search', ), 'query': [\n {'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.is',\n 'v': ['organization']}],\n 'cond': u\"\", 'bypass': [],\n 'flds': (u'select_row', u'org_pretty_link_with_additional_infos',\n u'SelectedInPlonegroupColumn', u'PloneGroupUsersGroupsColumn',\n u'review_state', u'CreationDate', u'actions'),\n 'sort': u'sortable_title', 'rev': False, 'count': False},\n ]\n _createDashboardCollections(folder, collections)", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def generate_test_organizations(self):\n def generate_organizations_for_parent(org_names, parent_name=None):\n item_dict = {}\n for name in org_names:\n if parent_name:\n item_dict['{}_{}'.format(name, parent_name)] = {\n 'name': name,\n 'parent': parent_name\n }\n else:\n item_dict['{}'.format(name)] = {\n 'name': name\n }\n return item_dict\n\n self.os_dict = \\\n generate_organizations_for_parent(\n ['o1', 'o1', 'o2', 'o3', 'o4_del', 'o5_del'])\n\n # generate organizations in database\n self.orgs = self.create_orgs_from_data(self.os_dict)\n\n # generate sub organizations\n self.subs_o1_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del', 'sub4_del', 'sub5_del'], 'o1')\n\n self.subs_o2_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3', 'sub4_del', 'sub5_del'], 'o2')\n\n self.subs_o3_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del'], 'o3')\n\n # generate sub organizations dictionary\n self.subs_dict = {\n **self.subs_o1_dict,\n **self.subs_o2_dict,\n **self.subs_o3_dict,\n }\n\n # update organizations list with sub_organizations in database\n self.orgs.update(\n self.create_orgs_from_data(self.subs_dict, self.orgs))", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def transform_organisations(self, instance):\n return self.transform_entity(instance, 'Organisation')", "def localOrganization(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'localOrganizingCommittee.html',\n context_instance=RequestContext(request, {})\n )", "def organizations(self):\n return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)", "def test_getorganizations_item(self):\n pass", "def organization_list(request):\n return [o.slug for o in Organization.objects.all()]", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def export_organizations(self):\n print('\\n=== Exporting all organization data...')\n\n for organization in self.client.organizations:\n print('- Exporting organizations:', organization.name)\n\n json = {\n 'id': self.get_id(organization),\n 'href': organization.href,\n 'name': organization.name,\n 'nameKey': organization.name_key,\n 'description': organization.description,\n 'status': organization.status,\n 'createdAt': organization.created_at.isoformat(),\n 'modifiedAt': organization.modified_at.isoformat(),\n 'customData': self.get_custom_data(organization),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n }\n\n default_account_store_mapping = organization.default_account_store_mapping\n default_group_store_mapping = organization.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': organization.default_account_store_mapping.href.split('/')[-1],\n 'href': organization.default_account_store_mapping.href,\n 'type': organization.default_account_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_account_store_mapping.account_store.name,\n 'list_index': organization.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': organization.default_group_store_mapping.href.split('/')[-1],\n 'href': organization.default_group_store_mapping.href,\n 'type': organization.default_group_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_group_store_mapping.account_store.name,\n 'list_index': organization.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in organization.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(organization.tenant)\n self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def process_organizations(self, organizations):\n self.process_elements(\n organizations,\n self.organization_table,\n self.extract_organization,\n ['organization_data', 'member', 'organization']\n )", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def subCruise(self, footprints):\n sub = Cruise()\n sub.V = [v for v in self.V if v.uid in footprints]\n sub.E = []\n sub.neighbours = {}\n sub.resorts = {}\n for key, value in self.neighbours.items():\n sub.neighbours[key] = []\n if key in footprints:\n sub.V.extend([self.V[v] for v in value if self.V[v] not in sub.V])\n sub.E.extend([(key, v) for v in value])\n sub.neighbours.get(key).extend(value)\n for v in value:\n sub.neighbours[v] = [key]\n\n p = set(sub.V)\n #sorted_resorts = sorted(self.resorts, key=len, reverse=True)\n for key, resort in self.resorts.items():\n s = set(resort.footprints)\n if len(tuple(s & p)) > 0:\n sub.resorts[key] = Resort(list(tuple(s & p)))\n p -= s\n if len(tuple(p)) == 0:\n break\n\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in sub.V])\n sub.upperleft = list(map(min, zip(*addresstamp)))\n sub.bottomright = list(map(max, zip(*addresstamp)))\n sub.upperright = [sub.bottomright[0], sub.upperleft[1]]\n sub.bottomleft = [sub.upperleft[0], sub.bottomright[1]]\n (sub.W, sub.H) = (sub.upperright[0] - sub.bottomleft[0], sub.bottomleft[1] - sub.upperright[1])\n sub.grid = self.grid[np.ix_(list(range(sub.upperleft[1],sub.bottomleft[1])), list(range(sub.upperleft[0],sub.bottomright[0])))]\n\n assert sub.W >= 0\n assert sub.H >= 0\n return sub", "def test_organizations_read(self):\n pass", "def render(self, request):\n return self.leaf.render(request)", "def test_get_organization(self):\n pass" ]
[ "0.6251725", "0.6150702", "0.594913", "0.5577881", "0.5523968", "0.55234843", "0.5503316", "0.54320455", "0.53701484", "0.5356018", "0.53543377", "0.53503627", "0.52751493", "0.52619004", "0.5256815", "0.5246703", "0.52364534", "0.51886845", "0.5175095", "0.5121588", "0.51048803", "0.5104657", "0.50420874", "0.50409865", "0.49984685", "0.49902952", "0.49418074", "0.49184144", "0.48829037", "0.48729032" ]
0.71478117
0
Return group title. If self.short, turn "My config (My suffix)" into "My suffix".
def group_title(self, group): group_title = group.getProperty('title') if self.short: splitted = group_title.split('(') if len(splitted) > 1: group_title = group_title.split('(')[-1][:-1] return html.escape(group_title)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return f\"{self._group.friendly_name} {GROUP_SUFFIX}\"", "def abbreviate_title(s):\n if u'Group ' in s:\n return s.replace(u'Group ', u'')\n else:\n parts = s.split(None, 1)\n if len(parts) < 2:\n return s\n genus, rest = s.split(None, 1)\n return u'%s. %s' % (genus[0], rest)", "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "def getShortName(self) -> str:\n return self.short_name", "def short_displayname(self):\n return self.get_short_displayname()", "def short_title(self):\r\n return truncate_words(self.title, settings.CAMPAIGN_SHORT_TITLE_WORDS)", "def get_short_name(self):\r\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def shortname(self):\n return self.get(\"shortName\")", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\n\n return self.name", "def shortTitle(self, newShortTitle=None):\n pass", "def get_group_name(name: str) -> str:\n if is_shortcut_name(name):\n return name.split(config.name_separator)[0]\n raise CHCShortCutNameError(name)", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def get_short_name(self):\n\n return self.name", "def short_name_or_full(self):\n return self.short_name or self.title", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def short_name(self) -> str:\n return self.name_components[-1]", "def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def name(self):\n return self.config.get('name') or f\"{self.id.replace('_', ' ').title()}\"", "def short_name(self):\n return self.get(\"short_name\", decode=True)" ]
[ "0.6838066", "0.6740804", "0.6598238", "0.6544966", "0.64837915", "0.6482191", "0.6465803", "0.6433619", "0.6433619", "0.6433619", "0.6433619", "0.6433619", "0.6433619", "0.6433619", "0.6433619", "0.63969857", "0.6358775", "0.6357756", "0.6357756", "0.6346445", "0.633116", "0.631726", "0.631382", "0.63023293", "0.6279207", "0.6278057", "0.6246979", "0.62041694", "0.62041694", "0.6197689" ]
0.75570285
0
Returns a list of top 10 video IDs in the descending order of view count from the VIDEOS table.
def get_most_viewed(self): # WORKS self.cur.execute("SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10") most_viewed_video_IDs = [] for ID in self.cur.fetchall(): most_viewed_video_IDs.append(ID[0]) return most_viewed_video_IDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most_popular_videos.append(k)\r\n return most_popular_videos", "def get_most_popular_talks_by_views(videos):\r\n return sorted(videos, key=lambda x: int(x.metrics['viewCount']), reverse=True)", "def get_most_popular_talks_by_views(videos: list) -> List[Video]:\n return sorted(videos,\n key=lambda vid: get_vid_stat(vid, 'viewCount'),\n reverse=True)", "def get_latest_videos(self, count = 30, page = 1):\n uri = 'videos/latest'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def video_ids(self):\n return self._sorted_ids", "def get_vr_videos(self, count = 30, page = 1):\n uri = 'videos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_five_random_IDs(self):\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY RAND() LIMIT 5\")\n IDs = []\n for ID in self.cur.fetchall():\n IDs.append(ID[0])\n return IDs", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def top_controversial(self, n):\n return top_movies", "def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['videoId'] += 1\n rearrangeMovieArray()\n else:\n movieViewCounts[videoId] = movieViewCounts.get(videoId, 0) + 1\n moviesRanked.append(videoId)", "def get_channel_videos(self, channel_id, count = 30, page = 1):\n uri = 'channels/' + channel_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def top_by_num_of_ratings(self, n):\n return top_movies", "def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def top_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-rating')[:10]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)", "def get_video_views(self, response):\n return response.css(\".watch-view-count::text\")\\\n .extract_first(default='')", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets", "def get_youtube_video_ids(keyword: str, limit: int = 10) -> List[str]:\n video_search = VideosSearch(keyword, limit=limit)\n results = video_search.result()['result']\n return [r['id'] for r in results]", "def get_most_popular_talks_by_like_ratio(videos):\n return sorted(videos, key=get_ratio, reverse=True)", "def count_videos(self):\n return len(self.videos)", "def get_featured_videos(self, count = 30, page = 1):\n uri = 'videos/featured'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "async def top_10_specs(self):\r\n players = await self.get_players()\r\n specs = []\r\n for player in players:\r\n specs.append(player['specId'])\r\n del specs[10:]\r\n await ctx.message.channel.send('Top 10 3v3 Composition:')\r\n for key in self.specs:\r\n if specs.count(int(key)) > 0:\r\n await ctx.message.channel.send('{:s}: {:d}'.format(self.specs[key], specs.count(int(key))))", "def get_all_videos(self):\n\n return list(self._videos.values())", "def video_thumbnails(self, video_id):\n thumbs = glob.glob(os.path.join(self._id_to_path[video_id], 'thumbnail_*.*'))\n pairs = []\n id_index = self.video_ids.index(video_id)\n for thumb_path in thumbs:\n timestamp = int(thumb_path.split('_')[-1].split('.')[0])\n pairs.append((id_index, timestamp))\n return sorted(pairs, key=lambda x: x[1])", "def get_all_videos(channel_ids_list, latest_date, oldest_date, api_service):\n log_str = str()\n all_video_ids = []\n ignored_report = []\n\n nb_channels = len(channel_ids_list)\n\n for count, channel_id in enumerate(channel_ids_list):\n channel_selection = video_selection(api_get_channel_videos(channel_id, api_service), latest_date, oldest_date,\n channel_id, api_service)\n\n to_print = f\"Channel {count + 1} out of {nb_channels} ({(count + 1) * 100 / nb_channels:.2f} %).\\n\\n\" \\\n f\"Channel ID: {channel_id}\\n\" \\\n f\"Channel Name: {channel_selection['channel_name']}\\n\\n\" \\\n f\"Number of selected videos: {len(channel_selection['selection_list'])}\\n\"\n\n if channel_id in channels_ignored or len(channel_selection['selection_list']) * 50 > 2000:\n to_print += f\"Number of videos uploaded in a year: {channel_selection['a_year_ago_count']}\\n\" \\\n f\"STATUS: IGNORED\\n\"\n if channel_id in channels_ignored:\n ignored_report.append({'channel_id': channel_id,\n 'channel_name': channel_selection['channel_name'],\n 'cause': 'In ignored set.',\n 'n': len(channel_selection['selection_list'])})\n else:\n ignored_report.append({'channel_id': channel_id,\n 'channel_name': channel_selection['channel_name'],\n 'cause': 'To many videos selected',\n 'n': len(channel_selection['selection_list'])})\n\n elif channel_selection[\"a_year_ago_count\"] != 0:\n to_print += f\"Number of videos uploaded in a year: {channel_selection['a_year_ago_count']}\\n\" \\\n f\"STATUS: ACTIVE\\n\"\n all_video_ids += channel_selection[\"selection_list\"]\n\n else:\n to_print += \"Number of videos uploaded in a year: 0\\n\" \\\n \"STATUS: INACTIVE\\n\"\n\n if channel_id not in channels_url_exception:\n # Ignore exceptions.\n webbrowser.open(f\"https://www.youtube.com/channel/{channel_id}\")\n\n to_print += f\"\\nTotal number of videos selected so far: {len(all_video_ids)}\\n{'/' * 50}\\n\"\n\n log_str += f\"{to_print}\\n\"\n\n for ignored_elem in ignored_report:\n log_str += f'Channel \"{ignored_elem[\"channel_name\"]}\"' \\\n f' ({ignored_elem[\"channel_id\"]}) ignored - ' \\\n f'Cause: {ignored_elem[\"cause\"]} - N_Videos: {ignored_elem[\"n\"]}\\n'\n\n print(to_print)\n\n if len(all_video_ids) * 50 > 8000:\n print(\"Warning! API cost could be higher than 8000.\")\n log_str += \"\\nWarning! API cost could be higher than 8000.\"\n\n return {\"all_video_ids\": all_video_ids, \"log_str\": log_str}", "def video_list(request):\n mongodb = get_db() \n data = video_list_query()\n videos = video_info_query()\n # from edinsights.core.render import render\n return render(request, \"list-view.html\", {\n 'data': data, 'videos': videos\n })" ]
[ "0.7012187", "0.6970375", "0.69525117", "0.64749527", "0.6374998", "0.6189566", "0.6091879", "0.59435105", "0.58874273", "0.58376026", "0.5828731", "0.5826369", "0.58129585", "0.57645524", "0.57359713", "0.57298183", "0.5721471", "0.5713073", "0.57077444", "0.56908333", "0.5644075", "0.5638614", "0.56361663", "0.5613804", "0.5595398", "0.5579876", "0.5563129", "0.5558829", "0.55524486", "0.55233574" ]
0.80609137
0
Checks if the entered username and password corresponds to a valid user in the USERS and ADMINS table.
def is_valid_user(self, username, password): # WORKS done1 = self.cur.execute("SELECT password FROM users WHERE username=\"{}\"".format(username)) done2 = self.cur.execute("SELECT username FROM admins WHERE username=\"{}\"".format(username)) if done1 == 0 and done2 == 0: # If both queries are unsuccessful, username doesn't exist in both tables. return False else: if done1 == 1: # If username exists in USERS table. self.cur.execute("SELECT password FROM users WHERE username=\"{}\"".format(username)) stored_password = self.cur.fetchone()[0] return check_password_hash(stored_password, password) # Returns True if the hashes match. else: # If username exists in ADMINS table. self.cur.execute("SELECT password FROM admins WHERE username=\"{}\"".format(username)) stored_password = self.cur.fetchone()[0] return check_password_hash(stored_password, password) # Returns True if the hashes match.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def validate(self) -> bool:\n if not super().validate():\n return False\n\n # Does the user exist\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # Does given password match user's password\n if not user.check_password(self.password.data):\n self.username.errors.append('Invalid username or password')\n return False\n\n return True", "def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s", "def is_correct_user(self, login, password):\n pass", "def check_user(self, username, password):\n user = [user for user in self.db if user['username'] == username]\n if user:\n if check_password_hash(user[0][\"password\"], password):\n return True\n return False\n return False", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials", "def check_valid(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE username = %s\", (username,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if sha256_crypt.verify(password, credentials[1]):\n return True\n return False", "def validate_user(db, username, pw):\n print('\\n\\nVALIDATING USER\\n\\n')\n user = db['user'].find_one({'username': username})\n\n update_timestamp = {}\n update_timestamp['login_timestamp'] = str(datetime.datetime.utcnow())\n\n # Check if user exists\n if user is None:\n print(username + \" does not exist!\")\n return False\n\n user_id = str(user['_id'])\n print(\"User ID: \" + user_id)\n\n # Check user's password against what is stored in the database\n db_pw = db['security'].find_one({'user_id': user_id})\n\n # Check if the password for the user exists\n if db_pw is None:\n print(\"User password does not exist!\")\n return False\n\n # For backwards compatibility, treat password as unhashed first\n if db_pw['password'] == pw:\n db['user'].update_one({'_id': user_id}, {'$set': update_timestamp})\n print(\"Valid user!\")\n return True\n\n # Check for hashed password\n if check_password_hash(db_pw['password'], pw):\n db['user'].update_one({'_id': user_id}, {'$set': update_timestamp})\n print(\"Valid user!\")\n return True\n\n print(\"Invalid user!\")\n return False", "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def check_auth(username, password):\n user = User.query.filter(User.username == username).first()\n\n if user:\n return user.password == password\n else:\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def validate_user(self, username, password, client, request, *args, **kwargs):\n log.debug('Validating username %r and its password', username)\n if self._usergetter is not None:\n user = self._usergetter(username, password, client, request, *args, **kwargs)\n if user:\n log.debug('Successfully validated username %r', username)\n request.user = user\n return True\n return False\n log.debug('Password credential authorization is disabled.')\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def is_login_valid(email, password):\n user_data = Database.find_one(UserConstants.COLLECTION, {\"email\": email})\n admin_created_user = Database.find_one(UserConstants.COLLECTION, {\"email\": email, \"admin_created\": \"Yes\"})\n if user_data is None:\n # Tell the user that their e-mail doesn't exist\n raise UserErrors.UserNotExistsError(\n \"Email is not recognized. Please use link below to sign-up if you have not created an account.\")\n if admin_created_user is not None:\n # Tell the user to sign up\n raise UserErrors.AdminCreatedUserError(\n \"Your account was created by an admin. Please register with HHT to enjoy the full functionality of the site.\")\n if not sha512_crypt.verify(password, user_data['password']):\n # Tell the user that their password is wrong\n raise UserErrors.IncorrectPasswordError(\"Password does not match the one registered.\")\n\n return True", "def check_user(self, login, password):\n user = self.cursor.execute(\n '''SELECT * FROM users WHERE login = ?''', login).fetchone()\n if user is not None:\n if user[3] == password:\n return Message('response', 'User exists')\n else:\n return Message('response',\n 'Users exists. Check password')\n else:\n return Message('response', 'User does not exists')", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def is_valid_username(self, username): # WORKS\n done1 = self.cur.execute(\"SELECT username FROM users WHERE username=\\\"{}\\\"\".format(username))\n done2 = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done1 == 0 and done2 == 0: # If both queries are unsuccessful, username doesn't exist in both tables.\n return False\n else:\n return True", "def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def clean(self):\n cleaned_data = super(LoginForm, self).clean()\n username = cleaned_data['username']\n password = cleaned_data['password']\n\n error_message = \"Username/password combination does not match\"\n\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n raise forms.ValidationError(error_message)\n\n # Check the password\n if not user.check_password(password):\n raise forms.ValidationError(error_message)\n\n return cleaned_data", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'" ]
[ "0.73799044", "0.73681056", "0.7302386", "0.7233658", "0.7211134", "0.71928614", "0.7071615", "0.7062441", "0.70173335", "0.698397", "0.6976572", "0.6945523", "0.6932524", "0.6915142", "0.68958944", "0.6830708", "0.6821252", "0.6783548", "0.67477894", "0.6739477", "0.6738232", "0.6682403", "0.66783637", "0.6639771", "0.66235435", "0.6623419", "0.65957487", "0.65925217", "0.658986", "0.6578519" ]
0.79998374
0
Updates the password of the user in the USERS table.
def update_password(self, username, password): #WORKS password_hash = generate_password_hash(password) try: self.cur.execute("UPDATE users SET password = \"{}\" WHERE username = \"{}\"".format(password_hash, username)) self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def update_user_password(self, username):\n parser_password.add_argument('password',\n type=validate_password, required=True,\n nullable=False,\n help=\"Password must be at least 6 characters\"\n )\n args = parser_password.parse_args()\n password = self.set_password(request.json.get('password'))\n\n query = \"\"\"UPDATE users SET password=%s WHERE username=%s\"\"\"\n values = password, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True", "def update_password(self, username, password):\n self.update(('Password', password), username)", "def update_user_password(context, params):\n\n user = User.objects.filter(id=params.get('id')).first()\n if not user:\n raise ValueError(\"user not found\")\n # user.edited_by = context.user\n if params.get('password'):\n user.set_password(params.get('password'))\n\n user.save()\n return user", "def updatePassword(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', alter user '%s' password\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"alter user %s with password '%s'\" % (userName, userPassword))", "def change_user_password(self, user, new_pass):\n return self.update(user, password=new_pass)", "def update_password(self, user_id, password):\n user = self.get(user_id, raise_error=True)\n if user is None:\n raise KeyError\n salt_hashedpassword = ''.join(self.get_salt_hashedpassword(password))\n user.password = salt_hashedpassword\n self.session.add(user)", "def set_password(self, user, password):\n hashed_password = self.hash_password(password)\n server_name = self.get_server_name()\n hookenv.log(\"Storing hash: {}\".format(hashed_password), hookenv.DEBUG)\n result = self.pgsql_query(\n \"UPDATE users SET password_hash = '{}' WHERE name = '@{}:{}';\".format(\n hashed_password, user, server_name\n )\n )\n return result", "def view_update_user(self, user, username, password):\r\n user.realm._checker.passwd(username, password, True)", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def _change_password(self, user, password):\r\n user.set_password(password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def change_db_user_password(self, username, password):\n\n self.sql(\"ALTER USER %s WITH PASSWORD '%s'\" % (username, password))", "def change_user_password(self, instance, user, new_pass):\n return instance.change_user_password(user, new_pass)", "def updateWebAppUserPwd( self, username, password ):\n try:\n crypt_pass = crypt(password, username)\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n con.cursor().callproc('update_web_app_user_password', [username, crypt_pass])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def test_update_password(self):\n\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = UsersAndGroups()\n auag.add_user(\n User(name=\"userx\", mail=\"[email protected]\", display_name=\"User X\", password=\"password1\")\n )\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n sync.update_user_password(\n userid=\"userx\", currentpassword=TS_PASSWORD, password=\"password2\"\n )", "def update_password(self, pwd):\n self.password = bcrypt.generate_password_hash(pwd).decode('utf8')", "def changeUserPassword(self, name, password):\n self._client.changeUserPassword(name, password)", "def set_password(username, new_password):\n if not validate_password(new_password):\n return \"salasana on väärää muotoa\"\n new_password_hash = generate_password_hash(new_password)\n sql = \"UPDATE users \" \\\n \"SET password=:new_pw \" \\\n \"WHERE username=:username\"\n db.session.execute(sql, {\"new_pw\": new_password_hash, \"username\": username})\n db.session.commit()\n return \"ok\"", "def update_user_and_pw(cls, userid, user_email, user_password, user_phone):\n\n user_to_edit = User.query.filter_by(user_id=userid).one()\n\n user_to_edit.email = user_email\n user_to_edit.password = user_password\n user_to_edit.mobile_phone = user_phone\n\n db.session.commit()\n return user_to_edit", "def update_password(self, new_password=None):\n\n self.password = generate_password_hash(new_password)\n\n if self.save(verbose=False):\n self.logger.warn('Updated password! %s' % self)\n else:\n raise AttributeError('Password update failed!')", "def change_password(self, user, current_password, password):\n\n if not password:\n raise DoorstepError('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise DoorstepError('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def change_password(self, user, current_password, password):\n\n if not password:\n raise Exception('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise Exception('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def update_password(self, username, old_password, new_password):\n\n return self.user_manager.update_password(username, old_password, new_password)", "def update_Password(UserName,Password):\r\n\r\n try:\r\n conn = sql.connect('database.db')\r\n cur = conn.cursor()\r\n update_query=f\"update users1 set Password='{Password}' where UserName='{UserName}'\"\r\n update = cur.execute(update_query)\r\n conn.commit()\r\n msg=\"Password updated successfully in the database based on UserName.\"\r\n except:\r\n msg=\"Password Updation problem exists\"\r\n finally:\r\n conn.close()\r\n return msg", "def update_user_password(self, user_id, password, original_password):\n update_user = {\n 'password': password,\n 'original_password': original_password\n }\n update_user = json.dumps({'user': update_user})\n resp, _ = self.post('users/%s/password' % user_id, update_user)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp)", "def change_password(self, user):\n if not self.is_valid():\n return None\n password = self.clean_password2()\n user.set_password(password)\n user.save()\n return user", "def testUpdateUser(self):\n UserAPI().create([(u'test', u'secret', u'name', u'[email protected]')])\n user = getUser(u'test')\n passwordHash = user.passwordHash\n self.store.commit()\n info = TUserUpdate(u'test', u'password', u'new-name',\n u'[email protected]')\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n yield self.facade.updateUser(session, info)\n\n self.store.rollback()\n self.assertEqual(u'test', user.username)\n self.assertNotEqual(passwordHash, user.passwordHash)\n self.assertEqual(u'new-name', user.fullname)\n self.assertEqual(u'[email protected]', user.email)", "def doChangeUser(self, login, password, **kwargs):\n IUserChanger(self.context).setPassword(password)" ]
[ "0.8231751", "0.78945357", "0.7791965", "0.77891487", "0.7711647", "0.76875335", "0.76627403", "0.7620926", "0.7619506", "0.7600196", "0.7599043", "0.7467108", "0.7440784", "0.7408987", "0.7402097", "0.73598385", "0.7357057", "0.73238444", "0.7320691", "0.7301765", "0.72800183", "0.726323", "0.7252192", "0.7251468", "0.7238844", "0.72274584", "0.7224365", "0.7216154", "0.71032476", "0.7099949" ]
0.83124804
0
Updates VIDEOS table with video ID, uploader username and video title in the VIDEOS table.
def upload_video(self, video_ID, username, title): #WORKS try: view_count = 0 self.cur.execute("INSERT INTO videos VALUES(\"{}\", \"{}\", \"{}\", {}, NULL)".format(video_ID, title, username, view_count)) self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_watched(self, username, video_ID): #WORKS\n try:\n done = self.cur.execute(\"SELECT * FROM watched WHERE username = \\\"{}\\\" AND video_ID = \\\"{}\\\"\".format(username, video_ID))\n if done == 1: # If the query was successful, one row exists\n self.cur.execute(\"UPDATE watched SET count = count + 1 WHERE username = \\\"{}\\\" AND video_ID = \\\"{}\\\"\".format(username, video_ID))\n try:\n self.cur.execute(\"CALL add_to_fav(\\\"{}\\\", \\\"{}\\\")\".format(video_ID, username))\n except:\n pass\n if done == 0: # If the query was unsuccessful, row does not exist.\n count = 1\n self.cur.execute(\"INSERT INTO watched VALUES(\\\"{}\\\", \\\"{}\\\", {})\".format(video_ID, username, count))\n self.db.commit()\n except:\n self.db.rollback()", "def update_video(conn: sqlite3.Connection, cols_vals: dict, verbose=False):\n video_id = cols_vals.pop('id')\n query_string = generate_unconditional_update_query(list(cols_vals.keys()))\n values = list(cols_vals.values())\n values.append(video_id)\n if execute_query(conn, query_string, tuple(values)):\n if verbose:\n logger.info(f'Updated video {video_id!r}')\n return True", "def test_api_video_update_detail_token_user_other_video(self):\n video_token = factories.VideoFactory()\n video_update = factories.VideoFactory(title=\"my title\")\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video_token.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n data = {\"title\": \"my new title\"}\n response = self.client.put(\n f\"/api/videos/{video_update.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 403)\n video_update.refresh_from_db()\n self.assertEqual(video_update.title, \"my title\")", "def test_api_video_put_by_playlist_admin(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n video = factories.VideoFactory(playlist=playlist, title=\"existing title\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n json.dumps({\"title\": \"updated title\"}),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.title, \"updated title\")", "def test_api_video_update_detail_token_user_title(self):\n video = factories.VideoFactory(title=\"my title\")\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n data = {\"title\": \"my new title\"}\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.title, \"my new title\")", "def test_api_video_update_detail_token_user_id(self):\n video = factories.VideoFactory()\n original_id = video.id\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n data[\"id\"] = \"my new id\"\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.id, original_id)", "def test_api_video_update_detail_token_user_uploaded_on(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n self.assertIsNone(data[\"active_stamp\"])\n data[\"active_stamp\"] = \"1533686400\"\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.uploaded_on, None)", "def test_api_video_update_detail_token_user_upload_state(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n self.assertEqual(data[\"upload_state\"], \"pending\")\n data[\"upload_state\"] = \"ready\"\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.upload_state, \"ready\")", "def users_video_upload(self):\n email_token = auth.current_user()[0]\n content = request.form\n if not UPLOAD_VIDEO_MANDATORY_FIELDS.issubset(content.keys()) or not \"video\" in request.files:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (UPLOAD_VIDEO_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (UPLOAD_VIDEO_MANDATORY_FIELDS - set(content.keys()))), 400\n title = content[\"title\"]\n location = content[\"location\"]\n visible = True if content[\"visible\"] == \"true\" else False\n video = request.files['video'].stream\n description = content[\"description\"] if \"description\" in content else None\n try:\n file_location = self.media_server.upload_video(user_email=email_token,\n title=title, video=video)\n except InvalidVideoFormatError:\n self.logger.debug(messages.INVALID_VIDEO_FORMAT)\n return messages.ERROR_JSON % messages.INVALID_VIDEO_FORMAT, 400\n video_data = VideoData(title=title, location=location, creation_time=datetime.now(),\n file_location=file_location, visible=visible, description=description)\n self.video_database.add_video(user_email=email_token, video_data=video_data)\n response_dict = video_data._asdict()\n response_dict[\"creation_time\"] = response_dict[\"creation_time\"].isoformat()\n return json.dumps(response_dict), 200", "def test_api_video_put_by_organization_admin(self):\n user = factories.UserFactory()\n organization = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n role=models.ADMINISTRATOR, organization=organization, user=user\n )\n playlist = factories.PlaylistFactory(organization=organization)\n video = factories.VideoFactory(playlist=playlist, title=\"existing title\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n json.dumps({\"title\": \"updated title\"}),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.title, \"updated title\")", "def test_api_video_update_detail_token_user_description(self):\n video = factories.VideoFactory(description=\"my description\")\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n data[\"description\"] = \"my new description\"\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.description, \"my new description\")", "def test_api_video_update_detail_anonymous(self):\n video = factories.VideoFactory(title=\"my title\")\n data = {\"title\": \"my new title\"}\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 401)\n video.refresh_from_db()\n self.assertEqual(video.title, \"my title\")", "def merge_video_data(self):\n if self.overwrite:\n if self.wipe:\n self.videos = self.youtube_videos\n elif self.add_new_files or self.overwrite_fields:\n old_videos = {\n video.filename: video\n for video in self.file_videos\n }\n old_videos_url = {\n video.metadata['videos'][0]['url']: video\n for video in self.file_videos\n }\n new_videos = {}\n for video in self.youtube_videos:\n new_video_url = video.metadata['videos'][0]['url']\n if new_video_url in old_videos_url:\n new_video_filename = old_videos_url[new_video_url].filename\n else:\n new_video_filename = video.filename\n new_videos[new_video_filename] = video\n\n if self.overwrite_fields:\n forgotten = set(old_videos) - set(new_videos)\n for name in forgotten:\n logger.warning('Missing video: {} {}',\n old_videos[name].filename,\n old_videos[name].metadata['videos'][0]['url'],\n )\n\n changes = set(new_videos).intersection(set(old_videos))\n for path in changes:\n merged_video = old_videos[path].merge(\n new_videos[path], self.overwrite_fields)\n self.videos.append(merged_video)\n else:\n self.videos = self.file_videos\n if self.add_new_files:\n adds = set(new_videos) - set(old_videos)\n self.videos.extend([new_videos[path] for path in adds])\n else: # not self.overwrite\n self.videos = self.youtube_videos", "def test_api_video_put_by_playlist_instructor(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.INSTRUCTOR, playlist=playlist, user=user\n )\n video = factories.VideoFactory(playlist=playlist, title=\"existing title\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n json.dumps({\"title\": \"updated title\"}),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 403)\n video.refresh_from_db()\n self.assertEqual(video.title, \"existing title\")", "def update_video_and_gloss_by_new_upload(video, video_path, thumbnail_path):\n video.video_path = video_path\n video.thumbnail = thumbnail_path\n\n if video.user_id == config.SAMPLE_VIDEO_USER_ID:\n video.gloss.sample_video_id = video.id\n video.status = VideoStatus.SAMPLE\n else:\n video.review_summary = INITIAL_SUMMARY\n video.status = VideoStatus.PENDING_APPROVAL\n video.gloss.pending_approval_video_count += 1\n\n # hack for sample recording yinhuan\n if user_id == 36:\n video.status = VideoStatus.APPROVED\n video.gloss.pending_approval_video_count -= 1\n video.gloss.approved_video_count += 1\n\n video.gloss.save()\n video.save()", "def test_api_video_patch_by_playlist_admin(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n video = factories.VideoFactory(playlist=playlist, title=\"existing title\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.patch(\n f\"/api/videos/{video.id}/\",\n json.dumps({\"title\": \"updated title\"}),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.title, \"updated title\")", "def updateDB():\n #Loading data DB from txt file\n with open('dataVideo.txt','r') as f:\n videos_dict = json.load(f)\n #loading the dic into a DF\n DB = pd.DataFrame.from_dict(videos_dict)\n #extract the list of ID:\n id_list = DB['id'].tolist()\n #Using the ID of the video as DF index\n DB.set_index('id', inplace=True)\n videoData = []\n for videoId in id_list:\n print(videoId)\n dic = {}\n page = requests.get('https://www.tiktok.com/embed/v2/'+videoId+'?lang=en')\n tree = html.fromstring(page.content)\n buyers = tree.xpath('//*[@id=\"__NEXT_DATA__\"]/text()')\n jsonData = json.loads(buyers[0])\n if 'videoData' in jsonData['props']['pageProps']:\n dic['id'] = videoId\n dic['commentCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['commentCount']\n dic['likeCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['diggCount']\n dic['playCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['playCount']\n dic['shareCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['shareCount']\n print(dic)\n videoData.append(dic)\n else:\n print(\"video doesn't exist anymore and was deleted from the DB\")\n DB.drop(videoId, inplace=True)\n \n newDataDF = pd.DataFrame(videoData)\n #setting the index with the id\n newDataDF.set_index('id', inplace=True)\n DB.update(newDataDF)\n #putting back the index as a column to have it in the export\n DB['id'] = DB.index\n #saving DF as json into file\n DB.to_json(r'dataVideo.txt',orient=\"records\")", "def update_view_count(self, video_ID): #WORKS\n try:\n self.cur.execute(\"UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \\\"{}\\\"\".format(video_ID)) # Adds 1 to the existing value.\n self.db.commit()\n except:\n self.db.rollback()", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def save_video_data(self):\n if self.overwrite:\n # Erase old event videos\n for path in self.video_dir.glob('*.json'):\n path.unlink()\n for video in self.videos:\n video.save()", "def upload_video(self, video_file):\r\n part = \"snippet,status\"\r\n metadata = self.get_metadata(video_file)\r\n body = {\r\n \"snippet\": {\r\n \"title\": metadata['title'],\r\n \"description\": metadata['description'],\r\n \"tags\": metadata['categoryId'],\r\n \"categoryId\": metadata['categoryId']\r\n },\r\n \"status\": {\r\n \"privacyStatus\": \"public\",\r\n \"license\": \"youtube\", # temporary, see gh#414\r\n \"embeddable\": True,\r\n \"publicStatsViewable\": True\r\n }\r\n }\r\n # This is to fix a bug, the API thinks our .ogg files are audio/ogg\r\n mimetype = \"video/{}\".format(video_file.split(\".\")[-1])\r\n media_body = MediaFileUpload(video_file, chunksize=-1, resumable=True, mimetype=mimetype)\r\n insert_request = self.service.videos().insert(part=part, body=body, media_body=media_body)\r\n response = None\r\n error = None\r\n retry = 0\r\n sleep_seconds = 5.0\r\n while response is None:\r\n try:\r\n log.info(\"Uploading %s\" % video_file)\r\n (status, response) = insert_request.next_chunk()\r\n if 'id' in response:\r\n return (Response.SUCCESS, response)\r\n else:\r\n return (Response.UNEXPECTED_FAILURE, response)\r\n except HttpError as e:\r\n if e.resp.status in self.RETRIABLE_STATUS_CODES:\r\n error = \"A retriable HTTP error {} occurred:\\n{}\".format(e.resp.status, e.content)\r\n else:\r\n return (Response.UNRETRIABLE_ERROR, {\"status\": e.resp.status, \"content\": e.content})\r\n except self.RETRIABLE_EXCEPTIONS as e:\r\n error = \"A retriable error occurred: {}\".format(e)\r\n except client.AccessTokenRefreshError:\r\n return (Response.ACCESS_TOKEN_ERROR, None)\r\n if error is not None:\r\n log.error(error)\r\n retry += 1\r\n if retry > self.MAX_RETRIES:\r\n return (Response.MAX_RETRIES_REACHED, None)\r\n log.info(\"Sleeping %s seconds and then retrying...\" % sleep_seconds)\r\n time.sleep(sleep_seconds)", "def videos(self, videos):\n self._videos = videos", "def update(self, video_id, uri=None):\n\n fields = []\n params = ()\n\n if uri is not None:\n fields.append('uri = ?')\n params += (uri,)\n\n if len(fields) == 0:\n return False\n\n params += (video_id,)\n\n query = f\"\"\"\n UPDATE videos\n SET {', '.join(fields)}\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, params)\n\n return True if result.rowcount == 1 else False", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def update(self, media):\n update_query = \"\"\"UPDATE %s\n SET\n filename=?,\n uploader=?,\n width=?,\n height=?,\n size=?,\n quality_image=?,\n featured_picture=?,\n valued_image=?,\n timestamp=?\n WHERE pageid=?\"\"\" % MediaCollection.COLLECTIONS_TABLE\n fields = list(media.totuple())\n fields.append(fields[0])\n fields.remove(fields[0])\n fields = tuple(fields) # generate tuple with pageid as last element\n self.cursor.execute(update_query, fields)\n self.connection.commit()", "def set_display_name(self, request, video_id=None):\n serializer = serializers.LiveSessionDisplayUsernameSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(\n {\"detail\": \"Invalid request.\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n video_id = self.get_related_video_id()\n video = get_object_or_404(Video, pk=video_id)\n\n try:\n update_fields = {\n \"display_name\": serializer.validated_data[\"display_name\"],\n }\n if self.request.resource and is_lti_token(\n self.request.resource.token\n ): # LTI context\n token_user = self.request.resource.user\n consumer_site = get_object_or_404(\n ConsumerSite,\n pk=self.request.resource.token.payload[\"consumer_site\"],\n )\n # Update email only if it's defined in the token user\n if \"email\" in token_user:\n update_fields.update({\"email\": token_user[\"email\"]})\n\n # Update username only it's defined in the token user\n if \"username\" in token_user:\n update_fields.update({\"username\": token_user[\"username\"]})\n\n livesession, _created = LiveSession.objects.update_or_create(\n consumer_site=consumer_site,\n lti_id=self.request.resource.context_id,\n lti_user_id=token_user.get(\"id\"),\n video=video,\n defaults=update_fields,\n )\n elif self.request.resource: # Anonymous context\n if not serializer.validated_data.get(\"anonymous_id\"):\n return Response(\n {\"detail\": \"Invalid request.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n livesession, _created = LiveSession.objects.update_or_create(\n anonymous_id=serializer.validated_data[\"anonymous_id\"],\n video=video,\n defaults=update_fields,\n )\n else: # Standalone context\n livesession, _created = LiveSession.objects.update_or_create(\n video=video,\n user_id=self.request.user.id,\n defaults=update_fields,\n )\n return Response(self.get_serializer(livesession).data, status.HTTP_200_OK)\n\n except IntegrityError as error:\n if \"livesession_unique_video_display_name\" in error.args[0]:\n return Response(\n {\"display_name\": \"User with that display_name already exists!\"},\n status=status.HTTP_409_CONFLICT,\n )\n\n raise error", "def add_video(id):\n event = Event.query.get_or_404(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n upload_video_form = UploadVideoForm()\n if upload_video_form.validate_on_submit():\n video = Video(\n url=UploadVideoForm.parse_url(upload_video_form.video_url.data), event=event\n )\n db.session.add(video)\n db.session.commit()\n flash(\"Your upload was successful.\", \"success\")\n return redirect(url_for(\"events.media\", id=id))\n else:\n session[\"upload_video_form_errors\"] = upload_video_form.video_url.errors\n session[\"video_url\"] = upload_video_form.video_url.data\n return redirect(url_for(\"events.media\", id=event.id))", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def get_video_id(self, obj):\n return obj.video.id", "def test_api_video_put_by_organization_instructor(self):\n user = factories.UserFactory()\n organization = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n role=models.INSTRUCTOR, organization=organization, user=user\n )\n playlist = factories.PlaylistFactory(organization=organization)\n video = factories.VideoFactory(playlist=playlist, title=\"existing title\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n json.dumps({\"title\": \"updated title\"}),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 403)\n video.refresh_from_db()\n self.assertEqual(video.title, \"existing title\")" ]
[ "0.6349348", "0.6306322", "0.61023533", "0.6064381", "0.596619", "0.58223826", "0.580957", "0.57428473", "0.5715386", "0.5588825", "0.558088", "0.55619055", "0.5524529", "0.5521689", "0.5521673", "0.5514653", "0.549211", "0.54767346", "0.540602", "0.53847206", "0.53525865", "0.53412944", "0.53179497", "0.5295905", "0.5266192", "0.5239266", "0.52351534", "0.51978546", "0.51762795", "0.5170719" ]
0.7231752
0
Returns the title of the video with the corresponding video ID from the VIDEOS table.
def get_video_title(self, video_ID): #WORKS try: self.cur.execute("SELECT video_title FROM videos WHERE video_ID = \"{}\"".format(video_ID)) title = self.cur.fetchone()[0] return title except: return "Error getting title"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def video_title(self):\n # type: () -> string_types\n return self._video_title", "def fetch_title(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.info()\n\n return movie.title", "def get_video_title(self, response):\n return response.css(\".watch-title::text\").extract_first(default='')", "def select(self, video_id):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, (video_id,))\n\n return result.fetchone()", "def episode_title_for_tvdb(self):\n return self.episode_title", "def __calculate_title(video_data):\n title = 'Unknown'\n if 'fulltitle' in video_data.keys():\n title = video_data['fulltitle']\n elif 'title' in video_data.keys():\n title = video_data['title']\n elif '_filename' in video_data.keys():\n title = video_data['_filename']\n return title", "def the_tvdb_dot_com_id(title):\n pass", "def video_id(self) -> str:\r\n return self._video_id", "def get_title_by_id_from_table(table, id):\n\n # your code", "def media_title(self) -> str:\n return self._device.movie.title", "def video_detail(request, video_id):\n\n video = get_object_or_404(Video, id=video_id)\n\n context = {\"video\": video}\n\n return render(request, 'video_detail.html', context)", "def media_title(self):\n if self._track_id is not None and self._playlist:\n for track in self._playlist:\n if track.get(\"id\") == self._track_id:\n return track.get(\"title\")\n return None", "def video(title, hash = None, season = None, episode = None):\n if not hash:\n video = list(mythVideo.searchVideos(title = title, season = season, episode = episode))[0]\n else:\n video = [video for video in mythVideo.searchVideos(title = title) if video.hash == hash][0]\n\n return render_template('recording.html', item = video)", "def get_video_id(self, obj):\n return obj.video.id", "def get_video(self, video_id):\n return self._videos.get(video_id, None)", "def get_title_by_id(id):\n\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def _get_movie_name(self, movie_id):\n return self.movies[self.movies['movie_id'] == movie_id]['name'].iloc[0]", "def getTitle(self, pageId):\n\t\ti = bisect_left(self.pageList, pageId)\n\t\tposition = None\n\n\t\tif i != len(self.pageList) and self.pageList[i] == pageId:\n\t\t\tposition = self.titlePos[i]\n\n\t\tif position is None:\n\t\t\treturn None\n\n\t\t# Go to that position in titles indexed file and take one line\n\t\tself.titlesIndexFile.seek(position)\n\t\tline = self.titlesIndexFile.readline()\n\t\ttitle = line.split(':', 1)[1]\n\t\treturn unicode(title, 'utf-8').strip()", "def get_title_by_id_from_table(table, id):\n\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def get_title_by_id(id):\n\n # your code", "def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title", "def video_dict(self):\n self.cur.execute(\"SELECT video_ID, video_title FROM videos\")\n videos = {}\n video_titles = []\n for video in self.cur.fetchall():\n video_titles.append(video[1])\n videos.update({video[0] : video[1]})\n return videos, video_titles", "def route_video_details(id_title):\n\n result = video_dal_retriever.retrieve_details(id_title)\n return jsonify({'details' : result})", "def episode_title_for_tvdb(self):\n \n # strip out the year from the episode title:\n return \"Episode %d\"%self.episode_number[1]", "def _http_get_title_by_id(self, id) -> dict:\n if int(id) == -1:\n # there is no title\n return None\n playl = self._http_playlist()\n return [title for title in playl if int(title['id']) == int(id)][0]", "def video_id(self):\n # type: () -> string_types\n return self._video_id", "def fetch_pyvideo_pk(self):\n url = 'http://pyvideo.org/search?models=videos.video&q={0}'.format(self.full_name.replace(\" \", \"+\"))\n soup = BeautifulSoup(requests.get(url).content).findAll(\"a\")\n if soup:\n for link in soup:\n if link.string == self.full_name:\n self.pyvideo_pk = link.get('href').split('/')[2]\n self.save()\n return self.pyvideo_pk\n self.pyvideo_pk = None\n self.save()\n return None", "def get_movie_id(self) -> str:\n return self.movie.id", "def getTitle(self):\n cmdId = self.executeCommand(Command.GET_TITLE)\n return cmdId", "def media_title(self):\n return self._table.active_track.name if self._table.active_track else None" ]
[ "0.73725605", "0.73459566", "0.68171036", "0.6612213", "0.6597776", "0.6552867", "0.6513855", "0.6465385", "0.64621156", "0.6405156", "0.63849044", "0.6364947", "0.63489693", "0.63430095", "0.6326501", "0.63252646", "0.6307263", "0.6270719", "0.6268202", "0.62612736", "0.62375826", "0.62258726", "0.62195796", "0.6210881", "0.6193627", "0.6174004", "0.6160619", "0.61492926", "0.6138391", "0.6127009" ]
0.870934
0
Updates the view count for the corresponding video ID in the VIDEOS table.
def update_view_count(self, video_ID): #WORKS try: self.cur.execute("UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \"{}\"".format(video_ID)) # Adds 1 to the existing value. self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])", "def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['videoId'] += 1\n rearrangeMovieArray()\n else:\n movieViewCounts[videoId] = movieViewCounts.get(videoId, 0) + 1\n moviesRanked.append(videoId)", "def increase_view_count(self):\n try:\n self.view_counter += 1\n self.save(update_fields=['view_counter'])\n except:\n warnings.warn(\"Unable to increase view count for advert {}\".format(self.pk))", "def increment_views(self):\n self.views += 1\n self.save()", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def _increment_viewcount(model, model_id: int, request):\n object_key = model.__name__ + ':' + str(model_id)\n\n redis = get_redis_connection('traffic_stats')\n view_count = redis.get(object_key)\n if not view_count:\n # Cache miss. Get the view count from the database and cache it.\n try:\n view_count = int(model.objects.get(identifier=model_id).view_count)\n except ObjectDoesNotExist:\n # If the object doesn't even exist in the database, don't track it.\n return\n except FieldDoesNotExist:\n log.error(\n 'Cannot track model {} because it has no view_count field. '\n 'Views for this model will be lost.'.format(model.__name__)\n )\n return -1\n redis.set(object_key, view_count)\n else:\n view_count = int(view_count)\n\n # Only increment the view count if the user has not visited the resource in\n # the last few minutes. Prevents metrics gaming shenanigans.\n ip = _get_user_ip(request)\n if not _is_recent_visitor(ip, object_key):\n redis.incr(object_key)\n view_count += 1\n _mark_recent_visitor(ip, object_key)\n\n # Update the last access time of the model.\n # Store in a sorted set so we can easily find the oldest keys.\n timestamp = time.time()\n redis.execute_command(\n 'ZADD model-last-accessed {} {}'.format(timestamp, object_key)\n )\n return view_count", "def update_watched(self, username, video_ID): #WORKS\n try:\n done = self.cur.execute(\"SELECT * FROM watched WHERE username = \\\"{}\\\" AND video_ID = \\\"{}\\\"\".format(username, video_ID))\n if done == 1: # If the query was successful, one row exists\n self.cur.execute(\"UPDATE watched SET count = count + 1 WHERE username = \\\"{}\\\" AND video_ID = \\\"{}\\\"\".format(username, video_ID))\n try:\n self.cur.execute(\"CALL add_to_fav(\\\"{}\\\", \\\"{}\\\")\".format(video_ID, username))\n except:\n pass\n if done == 0: # If the query was unsuccessful, row does not exist.\n count = 1\n self.cur.execute(\"INSERT INTO watched VALUES(\\\"{}\\\", \\\"{}\\\", {})\".format(video_ID, username, count))\n self.db.commit()\n except:\n self.db.rollback()", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def update_count(self):\n pass", "def update_count(self, model, view):\r\n view.SetLabel(str(len(model)))", "def SaveVideoStatus(request):\n data = request.data\n task_status = False\n up = UserProfile.objects.get(emp_id=request.data['emp_id'])\n if data['course_id'] == '2':\n if (up.c2cm < 4) and (int(data['video_id']) == up.c2cm):\n up.c2cm+=1\n up.save()\n task_status = True\n return Response(data={'status': task_status}, status=status.HTTP_200_OK)", "def update_view_times(app):\n app.logger.info('Scheduler update_view_times running: %s' % post_view_times_counter)\n d = dict(post_view_times_counter)\n post_view_times_counter.clear()\n for k, v in d.items():\n p = Post.find_one({'_id': k})\n if p:\n try:\n p.viewTimes += v\n p.save()\n except:\n app.logger.exception('Failed when updating the viewTime for album %s' % p._id)", "def change_video(self, n_vid):\n self.video_model.change_video(self.video_model.dyad, n_vid)", "def update_count(self):\n pass # Do nothing", "def upload_video(self, video_ID, username, title): #WORKS\n try:\n view_count = 0\n self.cur.execute(\"INSERT INTO videos VALUES(\\\"{}\\\", \\\"{}\\\", \\\"{}\\\", {}, NULL)\".format(video_ID, title, username, view_count))\n self.db.commit()\n except:\n self.db.rollback()", "def update_comments(self):\n self.nb_comments = self.comments.count()\n self.save()", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def test_api_video_update_detail_token_user_id(self):\n video = factories.VideoFactory()\n original_id = video.id\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n data[\"id\"] = \"my new id\"\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.id, original_id)", "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {\"post\": post})", "def count_videos(self):\n return len(self.videos)", "def updateDB():\n #Loading data DB from txt file\n with open('dataVideo.txt','r') as f:\n videos_dict = json.load(f)\n #loading the dic into a DF\n DB = pd.DataFrame.from_dict(videos_dict)\n #extract the list of ID:\n id_list = DB['id'].tolist()\n #Using the ID of the video as DF index\n DB.set_index('id', inplace=True)\n videoData = []\n for videoId in id_list:\n print(videoId)\n dic = {}\n page = requests.get('https://www.tiktok.com/embed/v2/'+videoId+'?lang=en')\n tree = html.fromstring(page.content)\n buyers = tree.xpath('//*[@id=\"__NEXT_DATA__\"]/text()')\n jsonData = json.loads(buyers[0])\n if 'videoData' in jsonData['props']['pageProps']:\n dic['id'] = videoId\n dic['commentCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['commentCount']\n dic['likeCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['diggCount']\n dic['playCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['playCount']\n dic['shareCount'] = jsonData['props']['pageProps']['videoData']['itemInfos']['shareCount']\n print(dic)\n videoData.append(dic)\n else:\n print(\"video doesn't exist anymore and was deleted from the DB\")\n DB.drop(videoId, inplace=True)\n \n newDataDF = pd.DataFrame(videoData)\n #setting the index with the id\n newDataDF.set_index('id', inplace=True)\n DB.update(newDataDF)\n #putting back the index as a column to have it in the export\n DB['id'] = DB.index\n #saving DF as json into file\n DB.to_json(r'dataVideo.txt',orient=\"records\")", "def update_video(conn: sqlite3.Connection, cols_vals: dict, verbose=False):\n video_id = cols_vals.pop('id')\n query_string = generate_unconditional_update_query(list(cols_vals.keys()))\n values = list(cols_vals.values())\n values.append(video_id)\n if execute_query(conn, query_string, tuple(values)):\n if verbose:\n logger.info(f'Updated video {video_id!r}')\n return True", "def test_model_updating_works_properly(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n\r\n tm.count = 100\r\n tm.save()\r\n\r\n tm.count = 80\r\n tm.save()\r\n\r\n tm.count = 60\r\n tm.save()\r\n\r\n tm.count = 40\r\n tm.save()\r\n\r\n tm.count = 20\r\n tm.save()\r\n\r\n tm2 = TestModel.get(tm.vid)\r\n self.assertEquals(tm.count, tm2.count)", "def updateVisits(self):\n self.nVisits += 1", "def test_basic(self):\n with build_video(self.user, votes=0) as video:\n votes = video.votes\n add_vote(video)\n video = Video.objects.get(pk=video.pk)\n eq_(video.votes, votes + 1)", "def addVote(self, videoId):\n\t\tglobal tt_play_session\n\t\turi = \"{}/rooms/{}/playlist/votes\".format(tt_base_uri, self.room)\n\t\turi_args = tt_base_args\n\t\turi_args[\"mediaId\"] = videoId\n\t\tstatus_code = None\n\t\ttries = 0\n\t\twhile tries < 3 and (not status_code or status_code != 201):\n\t\t\tr = requests.post(uri, json=uri_args, cookies={\"PLAY_SESSION\":self.play_session, \"__uvt\":\"\"})\n\t\t\tprint(\"vote: status code:\", r.status_code)\n\t\t\tstatus_code = r.status_code\n\t\t\ttries += 1\n\t\t\tif status_code != 201 and tries < 3:\n\t\t\t\ttime.sleep(0.25 * tries)\n\t\t\t\tprint(\"retrying...\")", "def test_api_video_update_detail_token_user_other_video(self):\n video_token = factories.VideoFactory()\n video_update = factories.VideoFactory(title=\"my title\")\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video_token.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n data = {\"title\": \"my new title\"}\n response = self.client.put(\n f\"/api/videos/{video_update.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 403)\n video_update.refresh_from_db()\n self.assertEqual(video_update.title, \"my title\")", "def test_video_update(self):\n updated = timezone.now()\n v1 = make_video(media_id='1234', updated=updated)\n v2 = make_video(media_id='5678', updated=updated)\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n\n # Update video 1 but not 2\n v1['updated'] += 1\n v1['title'] += 'new title'\n set_resources_and_sync([v1, v2])\n\n new_i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n new_i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n\n # Only video 1 was updated\n self.assertGreater(new_i1.updated_at, i1.updated_at)\n self.assertEqual(new_i1.title, v1['title'])\n self.assertEqual(new_i2.updated_at, i2.updated_at)", "def get_video_views(self, response):\n return response.css(\".watch-view-count::text\")\\\n .extract_first(default='')", "def test_increment_view_count(self):\n shortUrl = 'increment_url'\n url = 'http://www.google.com'\n author = 'author'\n self.urlShortener.saveUrl(shortUrl, url, author)\n\n self.urlShortener.increment_visited_count(shortUrl)\n self.urlShortener.increment_visited_count(shortUrl)\n\n doc = self.urlShortener.get_doc_from_shorturl(shortUrl)\n self.assertEqual(int(doc['clicks']), 2)\n\n self.urlShortener.removeUrl(shortUrl)" ]
[ "0.7064346", "0.6721617", "0.6701861", "0.66842705", "0.6578619", "0.6062659", "0.60547465", "0.5957374", "0.570349", "0.55510974", "0.55167", "0.5509747", "0.55055314", "0.54816747", "0.54619527", "0.54047817", "0.53531873", "0.53466165", "0.5299417", "0.52944577", "0.523298", "0.5223692", "0.520196", "0.5180334", "0.51792544", "0.51738113", "0.5158344", "0.5135979", "0.5125408", "0.51185006" ]
0.85083866
0
Adds the username and video ID to the WATCHED table.
def update_watched(self, username, video_ID): #WORKS try: done = self.cur.execute("SELECT * FROM watched WHERE username = \"{}\" AND video_ID = \"{}\"".format(username, video_ID)) if done == 1: # If the query was successful, one row exists self.cur.execute("UPDATE watched SET count = count + 1 WHERE username = \"{}\" AND video_ID = \"{}\"".format(username, video_ID)) try: self.cur.execute("CALL add_to_fav(\"{}\", \"{}\")".format(video_ID, username)) except: pass if done == 0: # If the query was unsuccessful, row does not exist. count = 1 self.cur.execute("INSERT INTO watched VALUES(\"{}\", \"{}\", {})".format(video_ID, username, count)) self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_video(self, video_ID, username, title): #WORKS\n try:\n view_count = 0\n self.cur.execute(\"INSERT INTO videos VALUES(\\\"{}\\\", \\\"{}\\\", \\\"{}\\\", {}, NULL)\".format(video_ID, title, username, view_count))\n self.db.commit()\n except:\n self.db.rollback()", "def flag_ID(self, username, video_ID):\n done = self.cur.execute(\"SELECT video_ID from flags WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n if done == 0: # Not yet flagged by any user.\n try:\n self.cur.execute(\"INSERT INTO flags VALUES(\\\"{}\\\", \\\"{}\\\")\".format(video_ID, username))\n self.db.commit()\n except:\n self.db.rollback()", "def new_watched_movie(username: str, movie_id: int) -> bool:\n with connection:\n all_movies = connection.execute(MOVIES_IDS, (movie_id,)).fetchone()\n all_usernames = connection.execute(USERS_IDS, (username,)).fetchone()\n if all_usernames is not None and all_movies is not None:\n connection.execute(ADD_WATCHED_MOVIE, (all_usernames[0], movie_id))\n return True\n return False", "def get_watched(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\"\".format(username))\n watched_video_IDs = []\n for ID in self.cur.fetchall():\n watched_video_IDs.append(ID[0])\n return watched_video_IDs", "def adds_yt_video_info_to_db(contains_yt_playlist_info):\n \n # parameter from creates_yt_video_playlist(yt_playlist_query):\n # in youtube.py\n\n print 'SEED.PY, adds_yt_video_info_to_db, contains_yt_playlist_info ', contains_yt_playlist_info\n\n for video_item in contains_yt_playlist_info:\n yt_video_id = video_item['yt_video_id']\n video_title = video_item['video_title']\n video_thumbnail = video_item['video_thumbnail']\n searched_artist = video_item['searched_artist']\n searched_song = video_item['searched_song']\n artist_id = video_item['artist_id']\n\n does_video_exist = db.session.query(exists().where(YouTubeVideo.yt_video_id == yt_video_id)).scalar()\n\n if does_video_exist:\n print \"Video in db\"\n else:\n print \"Video doesn't exist. Ading to db\"\n video_info = YouTubeVideo(yt_video_id=yt_video_id,\n video_title=video_title,\n video_thumbnail=video_thumbnail,\n searched_artist=searched_artist,\n searched_song=searched_song,\n artist_id=artist_id)\n\n db.session.add(video_info)\n db.session.flush()\n print \"youtube, adds_yt_song_results_to_db, Video and artist_id successfully flushed to database.\"", "def update_video_and_gloss_by_new_upload(video, video_path, thumbnail_path):\n video.video_path = video_path\n video.thumbnail = thumbnail_path\n\n if video.user_id == config.SAMPLE_VIDEO_USER_ID:\n video.gloss.sample_video_id = video.id\n video.status = VideoStatus.SAMPLE\n else:\n video.review_summary = INITIAL_SUMMARY\n video.status = VideoStatus.PENDING_APPROVAL\n video.gloss.pending_approval_video_count += 1\n\n # hack for sample recording yinhuan\n if user_id == 36:\n video.status = VideoStatus.APPROVED\n video.gloss.pending_approval_video_count -= 1\n video.gloss.approved_video_count += 1\n\n video.gloss.save()\n video.save()", "def put_user_in_table(user_dict):\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Insert the user if they do not exist\n if user_dict[\"is_new_user\"] == True:\n cur.execute(\"\"\"\n INSERT INTO test (user_id, username, id_last_message_sent, \n id_last_message_stickered, count_since_last_stickered)\n VALUES (%s, %s, %s, %s, %s)\n ;\n \"\"\",\n (\n user_dict[\"user_id\"],\n user_dict[\"username\"], \n user_dict[\"id_last_message_sent\"], \n user_dict[\"id_last_message_stickered\"],\n user_dict[\"count_since_last_stickered\"]\n )\n )\n # Update the user if they do exist\n else:\n cur.execute(\"\"\"\n UPDATE test \n SET username = %(username)s, \n id_last_message_sent = %(IdLMS)s, \n id_last_message_stickered = %(IdLMSt)s,\n count_since_last_stickered = %(CSLSt)s\n WHERE user_id = %(user_id)s\n ;\n \"\"\",\n {\n \"user_id\" : user_dict[\"user_id\"],\n \"username\" : user_dict[\"username\"], \n \"IdLMS\" : user_dict[\"id_last_message_sent\"], \n \"IdLMSt\" : user_dict[\"id_last_message_stickered\"],\n \"CSLSt\" : user_dict[\"count_since_last_stickered\"]\n }\n )\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()", "def post_movie():\n id = request.params.get('id')\n mongo_mgr.add_film_to_user(id, aaa.current_user.id)", "def _insert_into_player_queue_spreadsheet(self, username, times_played, player_queue):\n spreadsheet_name, _ = self.spreadsheets['player_queue']\n gc = gspread.authorize(self.credentials)\n sheet = gc.open(spreadsheet_name)\n ws = sheet.worksheet('Player Queue')\n\n records = ws.get_all_records()\n records = records[1:] # We don't want the blank space\n for i, tup in enumerate(player_queue):\n try:\n if records[i]['User'] != tup[0]:\n ws.insert_row([username, times_played], index=i+3)\n break\n except IndexError:\n ws.insert_row([username, times_played], index=i+3)", "def update_video(conn: sqlite3.Connection, cols_vals: dict, verbose=False):\n video_id = cols_vals.pop('id')\n query_string = generate_unconditional_update_query(list(cols_vals.keys()))\n values = list(cols_vals.values())\n values.append(video_id)\n if execute_query(conn, query_string, tuple(values)):\n if verbose:\n logger.info(f'Updated video {video_id!r}')\n return True", "def _insert_user_title(cursor, user_id, title_id):\n # Add title id and user id to new table\n cursor.execute(dbq.INSERT_TITLE_USER, [title_id, user_id])\n logger.debug(\"Linked title_id '{}' and user_id '{}'\".format(title_id, user_id))", "def remote_addUsertoROSProxy(self, userID, key):\r\n # TODO: Should this be deferred to a separate thread due to flock,\r\n # which is a blocking call?\r\n with open(self._dbFile, \"a\") as bridgefile:\r\n fcntl.flock(bridgefile.fileno(), fcntl.LOCK_EX)\r\n bridgefile.write('{0}:{1}\\n'.format(userID, key))", "def add(self, username, title, body):\n epoch = int(time.time())\n q = \"INSERT INTO profiles (username, title, body, epoch) \" +\\\n \"VALUES (?, ?, ?, ?)\"\n try:\n self._query(q, (username, title, body, epoch), fetch='none')\n except Exception, e:\n raise e", "def create_watchlist(name, user_id):\n watchlist = Watchlist(\n name = name, \n user_id = user_id\n )\n \n db.session.add(watchlist)\n db.session.commit()\n\n return watchlist", "def add_to_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='add')", "def addPlayer(self, steamid, name):\r\n self.execute(\"INSERT INTO Player (steamid, popup, credits, name, lastconnected) VALUES (?,?,?,?,?)\", steamid, int(popupStatus), int(startCredits), name, int(time.time()))\r\n return self.cursor.lastrowid", "def set_display_name(self, request, video_id=None):\n serializer = serializers.LiveSessionDisplayUsernameSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(\n {\"detail\": \"Invalid request.\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n video_id = self.get_related_video_id()\n video = get_object_or_404(Video, pk=video_id)\n\n try:\n update_fields = {\n \"display_name\": serializer.validated_data[\"display_name\"],\n }\n if self.request.resource and is_lti_token(\n self.request.resource.token\n ): # LTI context\n token_user = self.request.resource.user\n consumer_site = get_object_or_404(\n ConsumerSite,\n pk=self.request.resource.token.payload[\"consumer_site\"],\n )\n # Update email only if it's defined in the token user\n if \"email\" in token_user:\n update_fields.update({\"email\": token_user[\"email\"]})\n\n # Update username only it's defined in the token user\n if \"username\" in token_user:\n update_fields.update({\"username\": token_user[\"username\"]})\n\n livesession, _created = LiveSession.objects.update_or_create(\n consumer_site=consumer_site,\n lti_id=self.request.resource.context_id,\n lti_user_id=token_user.get(\"id\"),\n video=video,\n defaults=update_fields,\n )\n elif self.request.resource: # Anonymous context\n if not serializer.validated_data.get(\"anonymous_id\"):\n return Response(\n {\"detail\": \"Invalid request.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n livesession, _created = LiveSession.objects.update_or_create(\n anonymous_id=serializer.validated_data[\"anonymous_id\"],\n video=video,\n defaults=update_fields,\n )\n else: # Standalone context\n livesession, _created = LiveSession.objects.update_or_create(\n video=video,\n user_id=self.request.user.id,\n defaults=update_fields,\n )\n return Response(self.get_serializer(livesession).data, status.HTTP_200_OK)\n\n except IntegrityError as error:\n if \"livesession_unique_video_display_name\" in error.args[0]:\n return Response(\n {\"display_name\": \"User with that display_name already exists!\"},\n status=status.HTTP_409_CONFLICT,\n )\n\n raise error", "def insert_username(song: str, username: str) -> str:\n return song.replace('XXXXX', username)", "def registerPlayer(name):\n print \"\\n\"\n print \"\\t\\t\\tRegistering....\\t\", name\n cur2 = conn.cursor()\n\n # Since ID column in players is auto-increment. Only 'Name' is specified.\n SQL = \"INSERT INTO players(NAME) VALUES ( %s );\"\n data = (name, )\n cur2.execute(SQL, data) # Note: no % operator\n cur2.execute(\"commit;\")\n cur2.execute(\"\\t\\t\\tSELECT * from players;\")\n\n print \"\\t\\t\\tRegistered!!\\n\"", "def registerPlayer(name):\n db, cursor = connect()\n cursor.execute(\"INSERT INTO players (name, wins, matches) VALUES (%s, 0, 0)\" , (name, ) ) \n db.commit() \n db.close()", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def add_user(uid):\n if \"drop tables\" in uid:\n raise DropTablesError(\"Drop Tables command detected in input commands - Print Error Message\")\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n user_table_name = uid #This might be changed later\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO user_ids VALUES (NULL, ?,?)\",(uid, user_table_name))\n variable_table_command = '''CREATE TABLE {} (row_id INTEGER PRIMARY KEY AUTOINCREMENT, song_notes TEXT, author_name TEXT, creation_date TEXT, project_name TEXT)'''.format(user_table_name)\n cursor.execute(variable_table_command)\n db.commit()\n cursor.close()\n db.close()", "def addUserEntry(userName):\n connector = appEngine.connect()\n rows = connector.execute('SELECT count(*) FROM user').rowcount\n newUserId = 'u' + str(ceil(time.time()))\n connector.execute('INSERT INTO user(userID,userName) VALUES(?, ?)', (newUserId, userName))", "def add_in_secondary(self, login, app, id, lock):\r\n self.curs.execute(f\"\"\"INSERT INTO {login} VALUES (?, ?, ?)\"\"\", ( app, id, lock))\r\n self.conn.commit()", "def registerPlayer(name):\n # cn=name\n # title='playerName'\n # data=[title,cn]\n DB = connect()\n c = DB.cursor()\n #cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s)\",*/\n #c.execute(\"INSERT INTO tournament (playerName) values ('al pachino2') \")\n #c.execute(\"INSERT INTO tournament name values (%s)\", name)\n #cur.execute('INSERT INTO %s (day, elapsed_time, net_time, length, average_speed, geometry) VALUES (%s, %s, %s, %s, %s, %s)', (escaped_name, day, ))\n c.execute(\"INSERT INTO tournament VALUES (%s)\", (name,))\n DB.commit()\n DB.close()", "def add_to_db(table, user_list):\n\n client, db = open_db_connection()\n db[table].remove()\n for user in user_list:\n db[table].insert({\"net_id\": user.replace(\"\\r\\n\", \"\").encode(\"utf-8\")})\n close_db_connection(client)", "def register_player(name):\n\n \"\"\" use bleach to clean the name of the registered user \"\"\"\n clean_name = bleach.clean(name, strip=True)\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO players (player_name) VALUES (%s)\", (clean_name,))\n DB.commit()\n DB.close()", "def add_movie_to_db(self):\n MOVIE.insert_one({\n \"title\": self.title,\n \"year\": self.year\n })", "def add_player_v1(self, dbsession, player):\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(\n game=self.id,\n player_one=self.player1,\n player_two=self.player2,\n one_won=True,\n two_won=True)\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, self.board))),\n move_num=self._board.move_count,\n player=self.active_player(),\n game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}", "def on_yt_video_queue_add(self, data):\n # video_time = data[6]\n # queue number? = data[7]\n print ('%s added %s (%s) to the video queue.' % (data[3], data[5], data[4]))" ]
[ "0.6086152", "0.57009065", "0.56358945", "0.53841454", "0.5195449", "0.5182404", "0.5076552", "0.50528973", "0.5050366", "0.50346893", "0.4994848", "0.4992545", "0.4992251", "0.49108446", "0.49001947", "0.48948443", "0.48926342", "0.4877786", "0.4862681", "0.48542246", "0.4852175", "0.48462528", "0.48399085", "0.4836209", "0.48290825", "0.48244134", "0.48230258", "0.48196763", "0.48183048", "0.48166522" ]
0.6312015
0
Returns the username of the user that uploaded the video with the corresponding video ID.
def get_video_uploader(self, video_ID): #WORKS try: done = self.cur.execute("SELECT uploader FROM videos WHERE video_ID = \"{}\"".format(video_ID)) uploader = self.cur.fetchone()[0] return uploader except: return "Error getting username"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_username(self):\n \n if self.livestream_user:\n return self.livestream_user\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path_term = p.path.split('/')\n \n if len(path_term) == 3:\n if path_term[2] == 'video':\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n return path_term[1]\n if path_term[1] == 'embed':\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n return path_term[2]\n \n return ''", "def get_username_by_id(self, id):\n return User.query.get(id).username", "def username(self, instance):\r\n return instance.user.username", "def get_username(self, tg_user_id):\n\n data = {\n 'user_id': tg_user_id\n }\n result = self._send_data('getUser', data)\n if result.update:\n return result.update.get('username','')", "def get_best_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\" ORDER BY view_count DESC\".format(username))\n return self.cur.fetchone()[0]", "def get_username(self, obj):\n return obj.user.username", "def get_username(self, obj):\n return obj.user.username", "def video_id(self) -> str:\r\n return self._video_id", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def username(user_id):\n return UserIndex.instance().name(user_id)", "def get_user_from_post(media_obj):\n if media_obj:\n user_id = media_obj.get('user', {}).get('pk')\n user_name = media_obj.get('user', {}).get('username')\n return user_id, user_name\n return", "def username(self):\n return self.user.username", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def get_video_channel_name(self, response):\n return response.css(\"div.yt-user-info\")\\\n .extract_first(default='')", "def username(self, login_failures):\n return login_failures.user.username", "def _get_username(user_id):\n username = select(u.username for u in UserInformationData if u.user_id == user_id).first()\n\n return username", "def get_uploaded(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n uploaded_video_IDs = []\n for ID in self.cur.fetchall():\n uploaded_video_IDs.append(ID[0])\n return uploaded_video_IDs", "def username(self):\n if self._username is not None:\n return self._username\n # Try to get a username from the userprofile\n try:\n self._username = self.userprofile.user.username\n except UserProfile.DoesNotExist:\n # User profile does not exist\n return None\n return self._username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_username(self):\n return self.username", "def get_video_id(self, obj):\n return obj.video.id", "def get_identifier(self, request):\n return request.user.username", "def get_identifier(self, request):\r\n return request.user.username", "def get_id(self):\r\n return self.username", "def get_user_id(self, details, response):\n return details['username']", "def get_username(self):\r\n return self.username", "def set_display_name(self, request, video_id=None):\n serializer = serializers.LiveSessionDisplayUsernameSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(\n {\"detail\": \"Invalid request.\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n video_id = self.get_related_video_id()\n video = get_object_or_404(Video, pk=video_id)\n\n try:\n update_fields = {\n \"display_name\": serializer.validated_data[\"display_name\"],\n }\n if self.request.resource and is_lti_token(\n self.request.resource.token\n ): # LTI context\n token_user = self.request.resource.user\n consumer_site = get_object_or_404(\n ConsumerSite,\n pk=self.request.resource.token.payload[\"consumer_site\"],\n )\n # Update email only if it's defined in the token user\n if \"email\" in token_user:\n update_fields.update({\"email\": token_user[\"email\"]})\n\n # Update username only it's defined in the token user\n if \"username\" in token_user:\n update_fields.update({\"username\": token_user[\"username\"]})\n\n livesession, _created = LiveSession.objects.update_or_create(\n consumer_site=consumer_site,\n lti_id=self.request.resource.context_id,\n lti_user_id=token_user.get(\"id\"),\n video=video,\n defaults=update_fields,\n )\n elif self.request.resource: # Anonymous context\n if not serializer.validated_data.get(\"anonymous_id\"):\n return Response(\n {\"detail\": \"Invalid request.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n livesession, _created = LiveSession.objects.update_or_create(\n anonymous_id=serializer.validated_data[\"anonymous_id\"],\n video=video,\n defaults=update_fields,\n )\n else: # Standalone context\n livesession, _created = LiveSession.objects.update_or_create(\n video=video,\n user_id=self.request.user.id,\n defaults=update_fields,\n )\n return Response(self.get_serializer(livesession).data, status.HTTP_200_OK)\n\n except IntegrityError as error:\n if \"livesession_unique_video_display_name\" in error.args[0]:\n return Response(\n {\"display_name\": \"User with that display_name already exists!\"},\n status=status.HTTP_409_CONFLICT,\n )\n\n raise error", "def get_username(self, master_id):\r\n return self._handler.get_username(master_id)" ]
[ "0.641162", "0.6297686", "0.62112385", "0.6183648", "0.61202574", "0.6118284", "0.6118284", "0.60755575", "0.60589427", "0.6056746", "0.6052009", "0.60495865", "0.60349935", "0.60280395", "0.60260934", "0.59834033", "0.59371436", "0.592973", "0.5911973", "0.5911973", "0.5911973", "0.5910871", "0.5907509", "0.5905671", "0.5900743", "0.5876974", "0.58686894", "0.5856723", "0.5843484", "0.583008" ]
0.7299416
0
Returns the date when the video was uploaded from VIDEOS table.
def get_upload_date(self, video_ID): self.cur.execute("SELECT upload_date FROM videos WHERE video_ID = \"{}\"".format(video_ID)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def creation_date_video(path_to_file):\n print(\"Last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n print(\"Created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n # return os.path.getctime(path_to_file)", "def get_video_publishing_date(self, response):\n return response.css(\".watch-time-text\").extract_first(default='')", "def upload_date(self):\n return self.proto.details.appDetails.uploadDate", "def get_uploaded_date_long(self):\n return self.uploaded_date_long", "def getMatchDate(self) -> str:\n return self.__getDataField(\"date\")", "def date(self):\n return self.status.created_at", "def last_video(self) -> str:\n return max(glob.glob(VIDEOS_DIR), key=os.path.getmtime)", "def created(self):\n\n c = self.db.cursor()\n\n c.execute(\"\"\"\n SELECT version, created FROM version\n \"\"\")\n\n rows = c.fetchall()\n c.close()\n\n if len(rows) == 0:\n raise RuntimeError(\n \"database '%s' does not contain any version information\" % (\n self.dbname))\n\n time = datetime.datetime.strptime(rows[0][1], '%Y-%m-%d %H:%M:%S.%f')\n return rows[0][0], time", "def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]", "def version_date(self) -> str:\n\n return self._version_date", "def get_last_image_date(self) -> datetime.datetime:\n\n soup = self.load_page()\n header = soup.select('.posted-on')\n data = header[0].getText()\n return datetime.datetime.strptime(data, \" %A, %B %d, %Y at %I:%M%p\")", "def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")", "def created_date_time(self) -> str:\n return pulumi.get(self, \"created_date_time\")", "def release_date(self):\n for item in self.proto.releaseInfo.item:\n if item.label == 'Released on':\n return item.container.value", "def founding_date(self) -> datetime:\n return self._founding_date", "def get_last_image_date(self) -> datetime.datetime:\n\n soup = self.load_page()\n header = soup.select('#comicwrap > div.comicnav.top > div')\n\n lst = header[0].text.split(\" \")[-3:]\n lst[0] = lst[0][lst[0].find('\\n') + 1:]\n\n return datetime.datetime.strptime(\" \".join(lst), '%B %d, %Y')", "def deletion_date(self) -> str:\n return pulumi.get(self, \"deletion_date\")", "def created_on(self):\n return self.get_time(\"created_on\")", "def date(self):\n return DateTime(self.created)", "def getFirmwareDate(self): \n return self.firmware_date", "def creationTime(self):\n \n if not self.logMessage is None :\n return self.logMessage[\"date\"]", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def media_position_updated_at(self) -> datetime | None:\n if self._device.movie.play_status in KALEIDESCAPE_PLAYING_STATES:\n return utcnow()\n return None", "def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date", "def date(self) -> datetime.datetime:\n return self._data['Date'] - datetime.timedelta(0, float(self.exposuretime), 0)", "def revision_date(self) -> int:\n raise NotImplementedError", "def date_posted(tweet):\n return tweet.created_at.date()", "def date(self):\n return self._push.get('date', None)", "def date(self):\n return self._date" ]
[ "0.67232716", "0.664657", "0.6525809", "0.6451199", "0.60254365", "0.5908486", "0.5774346", "0.577208", "0.5711206", "0.56496966", "0.56353307", "0.56186765", "0.5566246", "0.5565173", "0.55505514", "0.55357367", "0.5500961", "0.5481637", "0.5464294", "0.545051", "0.5447081", "0.54363525", "0.54363525", "0.54292226", "0.54244936", "0.5420998", "0.54032797", "0.54010725", "0.53979206", "0.53906876" ]
0.8272224
0
Returns a random video ID from the VIDEOS table.
def get_random_ID(self): # WORKS self.cur.execute("SELECT video_ID FROM videos ORDER BY RAND() LIMIT 1") # Selects video_ID from 1 random row. return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_random_video(self):\n if self.video_id != None:\n print(f\"Stopping video: {self.video_id}\")\n video_info = self._video_library.get_all_videos()\n a_list = []\n for i in video_info:\n a_list.append(video_info.title)\n a = random.randint(0, len(a_list)-1)\n print(f\"Playing video: {a_list[a]}\")", "def play_random_video(self):\n num_videos = len(self._video_library.get_all_videos())\n videos = self._video_library.get_all_videos()\n random_index = randint(0, num_videos-1)\n self.play_video(videos[random_index].video_id)\n # print(\"play_random_video needs implementation\")", "def video_id(self):\n # type: () -> string_types\n return self._video_id", "def video_id(self) -> str:\r\n return self._video_id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def get_video_id(self, obj):\n return obj.video.id", "def test_random_uuid(self):\n movie = Movie.objects.get(title='The Two Towers')\n assert isinstance(movie.id, uuid.UUID), ( 'Expected UUID, got %s.' %\n movie.id.__class__.__name__ )\n tt_uuid = str(movie.id)\n self.assertEqual(tt_uuid[14], '4')\n assert tt_uuid[19] in ('8', '9', 'a', 'b'), 'Invalid random UUID.'", "def play_random_video(self):\n if self._current_video:\n self.stop_video()\n videos = [v for v in self._video_library.get_all_videos() if v.flag is None]\n if not videos:\n print(\"No videos available\")\n return\n self.play_video(choice(videos).video_id)", "def get_five_random_IDs(self):\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY RAND() LIMIT 5\")\n IDs = []\n for ID in self.cur.fetchall():\n IDs.append(ID[0])\n return IDs", "def play_random_video(self):\n video = random.choice(self._video_library.get_all_videos())\n self.play_video(video.video_id)", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if path_list[0] == 'v':\n # https://vine.co/v/bjHh0zHdgZT\n return path_list[1]\n \n return ''", "def get_video_id(self, obj):\n return obj.id", "def play_random_video(self):\n #[ expression for item in list if conditional ]\n unflagged_videos = [video for video in self._video_library.get_all_videos() if not video.flagged]\n #ran_num = randrange(len(self._video_library.get_all_videos()))\n if len(unflagged_videos) == 0:\n print(\"No videos available\")\n else:\n ran_num = randrange(len(unflagged_videos))\n #random_video_id = self._video_library.get_all_videos()[ran_num]._video_id\n random_video_id = unflagged_videos[ran_num]._video_id\n self.play_video(random_video_id)", "def fetch_pyvideo_pk(self):\n url = 'http://pyvideo.org/search?models=videos.video&q={0}'.format(self.full_name.replace(\" \", \"+\"))\n soup = BeautifulSoup(requests.get(url).content).findAll(\"a\")\n if soup:\n for link in soup:\n if link.string == self.full_name:\n self.pyvideo_pk = link.get('href').split('/')[2]\n self.save()\n return self.pyvideo_pk\n self.pyvideo_pk = None\n self.save()\n return None", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n \n if p.path.startswith('/v/') or p.path.startswith('/broadcast/'):\n path = p.path.split('/')\n if len(path) == 3:\n return p.path.split('/')[-1].replace('.live', '')\n \n return ''", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def select(self, video_id):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, (video_id,))\n\n return result.fetchone()", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n if self.res.get('slideshow_id'):\n return self.res.get('slideshow_id')\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/slideshow/embed_code')):\n # http://www.slideshare.net/slideshow/embed_code/1293644\n return path_list[2]\n elif len(path_list) == 2 and p.path.startswith('/swf'):\n # return -1 when url is like : http://static.slideshare.net/swf/ssplayer2.swf?doc=working-dogs-1201800078341935-2\n # FixMe :slideshare oembed api doesnt support this kind of url\n return -1\n return ''", "def tubeid():\n return binascii.hexlify(os.urandom(12))", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.youtube_fix_url(self.original_url))\n if p.path == '/watch':\n # Url of type http://www.youtube.com/watch?v=KRaeHxwZvms&feature=g-u-u&context=G2b00124FUAAAAAAAAAA\n #logger.debug('is a watch')\n params = cgi.parse_qs(p.query)\n if 'v' in params:\n return params['v'][0]\n elif p.fragment.startswith('/watch?v='):\n # sample. http://m.youtube.com/#/watch?v=ZXkW1-HdRC8\n params = cgi.parse_qs(p.fragment)\n if '/watch?v' in params:\n return params['/watch?v'][0]\n elif p.path.startswith('/v/') or p.path.startswith('/embed/'):\n path = p.path.split('/')\n return path[-1]\n elif p.netloc == 'youtu.be':\n return p.path[1:]\n elif re.match('(.{1}/){3}([\\w+-_^/]+)', p.fragment):\n parts = p.fragment.split('/')\n return parts[-1]\n return ''", "def get_movie_id(self) -> str:\n return self.movie.id", "def get_video(self, video_id):\n uri = 'videos/' + video_id\n return self.make_request(uri)", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def get_video_id_from_link(link):\n query_string = urlparse.urlparse(link).query\n qs_params = urlparse.parse_qs(query_string)\n return qs_params['v'][0]", "def regex_video_id(param):\n miregex = '(.*)v=(.*)&?(.*)'\n vid = None\n #log.debug(\"get video id: \" + repr(param))\n try:\n rs = re.search(miregex, param)\n params = rs.group(2)\n #log.debug(\"params \" + params)\n vid = params\n #id = params.split(\"&\")[0] if params != None and len(params)>12 else params\n except Exception as e:\n #log.debug(\"HURU\")\n #log.exception(e)\n pass # yes, we pass\n return vid", "def vid(self):\n return self._id", "def get_video(self, video_id):\n return self._videos.get(video_id, None)" ]
[ "0.71237874", "0.6802794", "0.66620547", "0.6631764", "0.65763164", "0.652057", "0.6504283", "0.6471329", "0.6438538", "0.64067775", "0.6335452", "0.62914747", "0.6240138", "0.622437", "0.6203106", "0.6133006", "0.6125536", "0.61106116", "0.61035943", "0.6083071", "0.60749495", "0.60379136", "0.6032911", "0.5972331", "0.5942209", "0.59264284", "0.5922463", "0.58956534", "0.5853642", "0.58436596" ]
0.8383894
0
Returns a list of video IDs watched by the user from the WATCHED table.
def get_watched(self, username): # WORKS self.cur.execute("SELECT video_ID FROM watched WHERE username = \"{}\"".format(username)) watched_video_IDs = [] for ID in self.cur.fetchall(): watched_video_IDs.append(ID[0]) return watched_video_IDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_watchlists(user_id):\n # user = User.query.get(user_id)\n\n watchlists = Watchlist.query.filter(Watchlist.user_id == user_id).all()\n\n return watchlists", "def view_watched_movies(username: str) -> list[tuple]:\n with connection:\n return connection.execute(VIEW_WATCHED_MOVIES, (username,)).fetchall()", "def get_users_who_watched(movie_ids: List[int],\n movie_users: MovieUserDict) -> List[int]:\n lst = []\n for m in movie_users:\n if m in movie_ids:\n for u in movie_users[m]:\n if u not in lst:\n lst.append(u)\n return lst", "def completed_video_ids(conn, workerId):\n query = \"SELECT video_id FROM completed_tasks WHERE worker_id = '{}'\".format(workerId)\n return [idx[0] for idx in conn.execute(query).fetchall()]", "def get_uploaded(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n uploaded_video_IDs = []\n for ID in self.cur.fetchall():\n uploaded_video_IDs.append(ID[0])\n return uploaded_video_IDs", "def video_ids(self):\n return self._sorted_ids", "def get_videos(self, user):\n raise NotImplementedError", "def fetch_video_list_ids(self, params):\n guid = self.netflix_session.user_data.get('guid')\n cached_list = self.video_list_cache.get(guid, None)\n if cached_list is not None:\n self.kodi_helper.log(msg='Serving cached list for user: ' + guid)\n return cached_list\n video_list_ids_raw = self.netflix_session.fetch_video_list_ids()\n\n if 'error' in video_list_ids_raw:\n return video_list_ids_raw\n video_list = self.netflix_session.parse_video_list_ids(\n response_data=video_list_ids_raw)\n return video_list", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def video_list(self) -> list:\n return self._video_list", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_youtube_video_ids(keyword: str, limit: int = 10) -> List[str]:\n video_search = VideosSearch(keyword, limit=limit)\n results = video_search.result()['result']\n return [r['id'] for r in results]", "def list_games(self, user_id: UUID) -> Iterable[UUID]:\n return (game_id for game_id, game in self.games.items() if user_id in game.players)", "def get_youtube_ids():\n global _id_list\n if _id_list is None:\n all_videos_in = urllib2.urlopen(\"http://www.khanacademy.org/api/internal/videos/localized/all\")\n try:\n all_videos = simplejson.load(all_videos_in)\n finally:\n all_videos_in.close()\n\n # Now get our CS videos that are not part of the content topic tree,\n # but are part of the scratchpad tutorials\n all_talkies_in = urllib2.urlopen(\n \"https://www.khanacademy.org/api/internal/talkthroughs/youtube_ids\")\n try:\n all_talkies = simplejson.load(all_talkies_in)\n finally:\n all_talkies_in.close()\n\n _id_list = all_talkies[:]\n for v in all_videos:\n _id_list += v[\"youtube_ids\"].values()\n\n return _id_list", "def get_video_ids(playlist_id):\n \n #search for all the videos given a playlist id\n search_response = youtube.playlistItems().list(part='contentDetails',maxResults=50,playlistId=playlist_id).execute()\n all_videos = search_response['items']\n video_ids = []\n for vid in all_videos:\n video_id = vid['contentDetails']['videoId']\n video_ids.append(video_id)\n\n return video_ids", "def user_ids(self):\n return list(self.get_users())", "def get_queryset(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n video = generics.get_object_or_404(Video, **filter_kwargs)\n video_content_type = ContentType.objects.get_for_model(video)\n \n return User.objects.filter(\n activities__verb='like', activities__object_id=video.id,\n activities__object_content_type=video_content_type)", "def _find_users_by_movies(self, movie_id):\n return self.ratings[self.ratings['movie_id'] == movie_id]['user_id'].tolist()", "def videos(self):\n return self._videos", "def user_ids(self):\n return list(self._user_ids)", "def search_videos(self):\n query = request.args.get('query')\n if not query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"query\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"query\"), 400\n videos_data = self.video_database.search_videos(query)\n user_videos = [data[1]._asdict() for data in videos_data]\n user_emails = [data[0] for data in videos_data]\n user_reactions = [{k.name: v for k, v in data[2].items()} for data in videos_data]\n\n email_token = auth.current_user()[0]\n filtered_videos = []\n filtered_users = []\n filtered_reactions = []\n for v, u, r in zip(user_videos, user_emails, user_reactions):\n if v[\"visible\"] or (u[\"email\"] == email_token or self.friend_database.are_friends(u[\"email\"], email_token)):\n filtered_videos.append(v)\n filtered_users.append(u)\n filtered_reactions.append(r)\n for i in range(len(user_videos)):\n filtered_videos[i][\"creation_time\"] = filtered_videos[i][\"creation_time\"].isoformat()\n return json.dumps([{\"user\": u, \"video\": v, \"reactions\": r}\n for v, u, r in zip(filtered_videos, filtered_users, filtered_reactions)]), 200", "def get_talk_ids(self):\r\n return QtSql.QSqlQuery('''SELECT Id FROM presentations''')", "async def watching(self, ctx: commands.Context):\n\n global _ # MyPy was complaining this was a unresolved reference until global was called\n watching_data = await self.get_players_per_activity(ctx=ctx, movie=True)\n embed_colour = await ctx.embed_colour()\n if watching_data:\n embed_list = []\n count = -1\n splitter = 1\n for key, value in sorted(watching_data.items()):\n count += 1\n if count % splitter == 0:\n embed = discord.Embed(title=_(\"Who's watching what?\"), colour=embed_colour)\n\n title = \"{key} ({value} {status})\".format(\n key=key, value=len(value), status=_(\"watching\")\n )\n content = \"\"\n for mention, display_name, black_hole, account in sorted(\n value, key=itemgetter(2, 1)\n ):\n content += f\"{display_name}\"\n if account:\n content += f\" | {account}\"\n content += \"\\n\"\n\n outputs = pagify(content, page_length=1000, priority=True)\n for enum_count, field in enumerate(outputs, 1):\n if enum_count > 1:\n title = \"{key} ({extra} {value})\".format(\n key=key, value=enum_count, extra=_(\"Part\")\n )\n embed.add_field(name=f\"{title}\", value=field)\n if count % splitter == 0:\n embed_list.append(copy(embed))\n\n await menu(\n ctx, pages=embed_list, controls=DEFAULT_CONTROLS, message=None, page=0, timeout=60\n )\n else:\n await ctx.maybe_send_embed(_(\"No one is watching anything.\"))", "def user_ids(self):\r\n raise NotImplementedError", "def get_user_session_ids(user_id: str) -> List[str]:\n listOfSessions = os.listdir('public_dataset/'+user_id)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions", "def users_list_videos(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n email_token = auth.current_user()[0]\n user_videos = self.video_database.list_user_videos(email_query)\n user_videos = [(video_data._asdict(), reaction_data) for video_data, reaction_data in user_videos]\n if email_query != email_token and not self.friend_database.are_friends(email_query, email_token):\n user_videos = [data for data in user_videos if data[0][\"visible\"]]\n for i in range(len(user_videos)):\n user_videos[i][0][\"creation_time\"] = user_videos[i][0][\"creation_time\"].isoformat()\n user_videos[i] = (user_videos[i][0], {k.name: v for k, v in user_videos[i][1].items()})\n user_videos = [{\"video\": video_data, \"reactions\": reaction_data} for video_data, reaction_data in user_videos]\n return json.dumps(user_videos), 200", "def get_videos(self, user):\n youtube = self.get_service(user)\n # Gather a list of all IDs of the channels that the user is subscribed to.\n\n request = youtube.subscriptions().list(part='snippet', mine=True, maxResults=50)\n upload_playlists = []\n while request is not None:\n sub_ids = []\n subs = request.execute()\n for item in subs.get('items', []):\n sub_ids.append(item['snippet']['resourceId']['channelId'])\n # Get the ID of the uploads playlist for each subscription.\n channels_request = youtube.channels().list(part='contentDetails', id=','.join(sub_ids), maxResults=50)\n for item in channels_request.execute().get('items', []):\n upload_playlists.append(item['contentDetails']['relatedPlaylists']['uploads'])\n # Cycle to the next page of subs\n request = youtube.subscriptions().list_next(request, subs)\n all_upload_items = []\n playlist_items_batch = BatchHttpRequest()\n for playlist in upload_playlists:\n # TODO: Find a suitable max_results. 15 seems to miss videos, skips over recent ones for older ones\n # TODO: 50 max_results * 22 subscribers = 1100-ish videos, taking around 30 seconds. Batching might help.\n self.get_playlist_items(youtube,\n playlist,\n max_results=50,\n batch=None,\n callback=lambda vids: all_upload_items.extend(vids))\n playlist_items_batch.execute(http=youtube._http)\n videos = []\n vid_info_batch = BatchHttpRequest()\n for chunk in chunks(all_upload_items, 50):\n self.get_video_info(youtube, chunk, batch=None, callback=lambda info: videos.extend(info))\n vid_info_batch.execute(youtube._http)\n videos.sort(key=lambda v: v.publishedAt)\n return videos", "def get_videos(self):\n return list(self._videos.values())", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def problem_watchers(self, identifier):\n return self._get(\"problems/%d/watchers\" % identifier).json()" ]
[ "0.67549974", "0.65549356", "0.6315282", "0.6313528", "0.6200434", "0.59684265", "0.58552593", "0.582406", "0.5795294", "0.57791805", "0.5738125", "0.5682407", "0.5559121", "0.5552698", "0.5543253", "0.548902", "0.5432729", "0.53820366", "0.5381979", "0.5370005", "0.53598285", "0.534237", "0.5321028", "0.5301414", "0.52837163", "0.52809113", "0.5263157", "0.5242231", "0.5241525", "0.5236191" ]
0.7925406
0
Returns the view count of the video with the corresponding video_ID.
def get_views(self, video_ID): # WORKS self.cur.execute("SELECT view_count FROM videos WHERE video_ID = \"{}\"".format(video_ID)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def count_videos(self):\n return len(self.videos)", "def update_view_count(self, video_ID): #WORKS\n try:\n self.cur.execute(\"UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \\\"{}\\\"\".format(video_ID)) # Adds 1 to the existing value.\n self.db.commit()\n except:\n self.db.rollback()", "def getNumberOfViews(self) -> int:\n ...", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def get_video_views(self, response):\n return response.css(\".watch-view-count::text\")\\\n .extract_first(default='')", "def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['videoId'] += 1\n rearrangeMovieArray()\n else:\n movieViewCounts[videoId] = movieViewCounts.get(videoId, 0) + 1\n moviesRanked.append(videoId)", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def count(self):\n return self.vcount", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def count_dash(self):\n dashs = VideoRepresentation.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == VideoRepresentation.video_id)).filter_by(youtube_query_id=self.id)\n count = dashs.count()\n return count", "def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def get_recent_release_with_count(self):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies where release_year = 2016;''')\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count", "def get(self):\n return {'status': 'success', 'count': Video.query.count()}, 200", "def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs", "def get_vertex_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetVertexCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetVertexCount(key1, result_val)\n return result_val.i", "def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])", "def getNumViews(self):\n\n # Compute number of views of each 2D points\n self.num_views = np.sum( np.sum(self.pts2D, axis = 0) != 0, 1 )\n return self.num_views", "def get_toprated_with_count(self):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies;''')\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count", "def vsvrcount(self) :\n\t\ttry :\n\t\t\treturn self._vsvrcount\n\t\texcept Exception as e:\n\t\t\traise e", "def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done", "def get_video_statistics(self):\n return self._video_statistics", "def _increment_viewcount(model, model_id: int, request):\n object_key = model.__name__ + ':' + str(model_id)\n\n redis = get_redis_connection('traffic_stats')\n view_count = redis.get(object_key)\n if not view_count:\n # Cache miss. Get the view count from the database and cache it.\n try:\n view_count = int(model.objects.get(identifier=model_id).view_count)\n except ObjectDoesNotExist:\n # If the object doesn't even exist in the database, don't track it.\n return\n except FieldDoesNotExist:\n log.error(\n 'Cannot track model {} because it has no view_count field. '\n 'Views for this model will be lost.'.format(model.__name__)\n )\n return -1\n redis.set(object_key, view_count)\n else:\n view_count = int(view_count)\n\n # Only increment the view count if the user has not visited the resource in\n # the last few minutes. Prevents metrics gaming shenanigans.\n ip = _get_user_ip(request)\n if not _is_recent_visitor(ip, object_key):\n redis.incr(object_key)\n view_count += 1\n _mark_recent_visitor(ip, object_key)\n\n # Update the last access time of the model.\n # Store in a sorted set so we can easily find the oldest keys.\n timestamp = time.time()\n redis.execute_command(\n 'ZADD model-last-accessed {} {}'.format(timestamp, object_key)\n )\n return view_count", "def get_number_of_movies(self):\n raise NotImplementedError", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def get_vote_count(self, post):\n return post.vote_set.count()", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched" ]
[ "0.761071", "0.75590754", "0.69225156", "0.6922456", "0.6912246", "0.67060393", "0.66868937", "0.6662882", "0.66086495", "0.6460109", "0.6458782", "0.6238349", "0.61823505", "0.6116544", "0.6043676", "0.5959066", "0.59584385", "0.5930518", "0.5929406", "0.59067196", "0.58900255", "0.5853399", "0.58154315", "0.5810758", "0.57353216", "0.56966513", "0.56952775", "0.56706214", "0.5621658", "0.56119347" ]
0.86707634
0
Deletes the video from the database.
def delete_video(self, video_ID): # WORKS try: self.cur.execute("DELETE FROM videos WHERE video_ID = \"{}\"".format(video_ID)) self.db.commit() os.remove('static/videos/' + str(video_ID) + '.mp4') os.remove('static/images/' + str(video_ID) + '.jpg') except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, video_id):\n\n query = \"\"\"\n DELETE\n FROM videos\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, (video_id,))\n\n return True if result.rowcount == 1 else False", "def delete_video(event_id, video_id):\n event = Event.query.get_or_404(event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n video = Video.query.get_or_404(video_id)\n db.session.delete(video)\n db.session.commit()\n flash(\"Your video has been deleted.\", \"success\")\n return redirect(url_for(\"events.media\", id=event_id))", "def delete(self, video_id, subvideo_name):\n\n video = Video.query.get(video_id)\n if not video:\n return {'message': 'video entry not exist'}, http.HTTPStatus.NOT_FOUND\n videofile = VideoFile.query.filter_by(name=subvideo_name).first()\n if videofile:\n videofile.delete()\n else:\n return {'message': 'no related video file'}, http.HTTPStatus.NOT_FOUND\n\n return {'message': 'delete success'}, http.HTTPStatus.NO_CONTENT", "def users_video_delete(self):\n user_email = request.args.get('email')\n video_title = request.args.get('video_title')\n email_token = auth.current_user()[0]\n if not video_title or not user_email:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"video_title or user_email\"))\n return messages.ERROR_JSON % \"video_title or user_email\", 400\n if user_email != email_token and not self.auth_server.profile_query(email_token)[\"admin\"]:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n try:\n self.media_server.delete_video(user_email, video_title)\n except UnexistentVideoError:\n self.logger.debug((messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token)))\n return messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token), 404\n self.video_database.delete_video(user_email, video_title)\n return messages.SUCCESS_JSON, 200", "def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()", "def delete_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"DELETE FROM movie_table;\")\n self._close_connection(conn)", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def schedule_delete_video(video: Video):\n job = scheduler.scheduler.add_job(delete_video, args=[video])\n log.info('Scheduled delete video job video=(%s), job=%s', video, job.id)", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')", "def DeleteVideo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, request, *args, **kwargs):\n clip = self.get_object()\n clips_count = clip.video.clips.all().count()\n if clips_count <= 1:\n return Response(\n {'detail': \"You can't delete this video's only clip.\"}, \n status=status.HTTP_403_FORBIDDEN)\n \n else:\n self.perform_destroy(clip)\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def delete_movie():\n id = request.params.get('id')\n mongo_mgr.remove_film_from_user(id, aaa.current_user.id)", "def delete(self):\n\n cursor = self._conn.cursor()\n cursor.execute(\"DELETE FROM saves\")\n self._conn.commit()", "def delete(movie_id):\n # Get the movie requested\n movie = Movie.query.filter(Movie.movie_id == movie_id).one_or_none()\n\n if movie is not None:\n db.session.delete(movie)\n db.session.commit()\n\n return \"\", 200", "def delete(self):\n DATABASE_CONNECTION.delete(self.__class__.__name__, self.id)", "def delete_user(self, username): #WORKS\n try:\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n videos_to_delete = []\n for row in self.cur.fetchall():\n videos_to_delete.append(row[0]) # Get video IDs of all videos uploaded by the user.\n for ID in videos_to_delete:\n os.remove('static/videos/' + str(ID) + '.mp4') # Deletes the video from the static/videos directory.\n os.remove('static/images/' + str(ID) + '.jpg') # Deletes the image from the static/images directory.\n self.cur.execute(\"DELETE FROM users WHERE username = \\\"{}\\\"\".format(username))\n self.db.commit()\n except:\n self.db.rollback()", "def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)", "def delete_from_db(self):\n self.db.newsdb.delete_one({'_id': ObjectId(self.id)})", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def delete():", "def delete_movie(jwt, id):\n movie = Movie.query.get(id)\n\n if movie is None:\n abort(404)\n try:\n movie.delete()\n return jsonify({\n 'success': True,\n 'movie': movie.format()\n })\n except Exception:\n db.session.rollback()\n abort(500)", "def test_delete_video(mocker):\n patched_delete_task = mocker.patch(\"search.search_index_helpers.deindex_document\")\n video = VideoFactory.create()\n deindex_video(video)\n assert patched_delete_task.called is True\n assert patched_delete_task.call_args[0] == (gen_video_id(video), VIDEO_TYPE)", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def deletePlayers():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM player\")\n dbConn.commit()\n dbConn.close()", "def delete(self, player_id):\n current_player = DBPlayer.query.get(player_id)\n if not current_player:\n return get_response(404, 'Not exists.')\n try:\n db.session.delete(current_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(200, 'done!')", "def del_mvcomment(request, pk):\n\n comment = get_object_or_404(MovieComment, pk=pk)\n comment.delete()\n movie = comment.movie\n url = '../../' + str(comment.movie.pk)\n return redirect(url)", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())" ]
[ "0.73853815", "0.7320137", "0.7140269", "0.7012943", "0.68155694", "0.68002707", "0.6758645", "0.673845", "0.66331697", "0.6509932", "0.64298964", "0.6426344", "0.64150596", "0.6369963", "0.63509244", "0.6335846", "0.6276657", "0.6246866", "0.62201995", "0.61679363", "0.6158402", "0.6139184", "0.6138905", "0.61382586", "0.6132556", "0.6132556", "0.6114986", "0.61146176", "0.6108351", "0.6107301" ]
0.80854136
0
Returns a list of all videos uploaded by the user with the corresponding username.
def get_uploaded(self, username): # WORKS self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\"".format(username)) uploaded_video_IDs = [] for ID in self.cur.fetchall(): uploaded_video_IDs.append(ID[0]) return uploaded_video_IDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_videos(self, user):\n raise NotImplementedError", "def get_videos(self, user):\n youtube = self.get_service(user)\n # Gather a list of all IDs of the channels that the user is subscribed to.\n\n request = youtube.subscriptions().list(part='snippet', mine=True, maxResults=50)\n upload_playlists = []\n while request is not None:\n sub_ids = []\n subs = request.execute()\n for item in subs.get('items', []):\n sub_ids.append(item['snippet']['resourceId']['channelId'])\n # Get the ID of the uploads playlist for each subscription.\n channels_request = youtube.channels().list(part='contentDetails', id=','.join(sub_ids), maxResults=50)\n for item in channels_request.execute().get('items', []):\n upload_playlists.append(item['contentDetails']['relatedPlaylists']['uploads'])\n # Cycle to the next page of subs\n request = youtube.subscriptions().list_next(request, subs)\n all_upload_items = []\n playlist_items_batch = BatchHttpRequest()\n for playlist in upload_playlists:\n # TODO: Find a suitable max_results. 15 seems to miss videos, skips over recent ones for older ones\n # TODO: 50 max_results * 22 subscribers = 1100-ish videos, taking around 30 seconds. Batching might help.\n self.get_playlist_items(youtube,\n playlist,\n max_results=50,\n batch=None,\n callback=lambda vids: all_upload_items.extend(vids))\n playlist_items_batch.execute(http=youtube._http)\n videos = []\n vid_info_batch = BatchHttpRequest()\n for chunk in chunks(all_upload_items, 50):\n self.get_video_info(youtube, chunk, batch=None, callback=lambda info: videos.extend(info))\n vid_info_batch.execute(youtube._http)\n videos.sort(key=lambda v: v.publishedAt)\n return videos", "def get_watched(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\"\".format(username))\n watched_video_IDs = []\n for ID in self.cur.fetchall():\n watched_video_IDs.append(ID[0])\n return watched_video_IDs", "def users_list_videos(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n email_token = auth.current_user()[0]\n user_videos = self.video_database.list_user_videos(email_query)\n user_videos = [(video_data._asdict(), reaction_data) for video_data, reaction_data in user_videos]\n if email_query != email_token and not self.friend_database.are_friends(email_query, email_token):\n user_videos = [data for data in user_videos if data[0][\"visible\"]]\n for i in range(len(user_videos)):\n user_videos[i][0][\"creation_time\"] = user_videos[i][0][\"creation_time\"].isoformat()\n user_videos[i] = (user_videos[i][0], {k.name: v for k, v in user_videos[i][1].items()})\n user_videos = [{\"video\": video_data, \"reactions\": reaction_data} for video_data, reaction_data in user_videos]\n return json.dumps(user_videos), 200", "def user_videos(username):\n for page_index in count():\n entry_list = download_video_feed(\n create_feed_url(username, page_index)\n )\n\n for entry in entry_list:\n yield entry\n\n if len(entry_list) < MAX_RESULTS:\n break", "def get_user_videos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list", "def videos(self):\n return self._videos", "def list_users(self, stream_name:str, version:int=1)->List[str]:\n stream_path = self._get_storage_path(stream_name=stream_name, version=version)\n all_users = self._ls_dir(stream_name=stream_name, version=version)\n user_ids = []\n for usr in all_users:\n user_ids.append(usr.replace(stream_path,\"\").replace(\"user=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return user_ids", "def video_list(self) -> list:\n return self._video_list", "def get_playlists_for_user_by_name(self, request): \n user = Account.find_by_username(request.username)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def get_videos(self):\n return list(self._videos.values())", "def get_queryset(self):\n queryset = MediaFile.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if len(users) and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset", "def view_watched_movies(username: str) -> list[tuple]:\n with connection:\n return connection.execute(VIEW_WATCHED_MOVIES, (username,)).fetchall()", "async def get_videos(self) -> APIReturn:\n return await self._request(\"GET\", \"/getVideos\")", "def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites", "def list(self):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n \"\"\"\n\n result = Model.execute(query)\n\n return result.fetchall()", "def get_videos(channel_name, CLIENT_SECRETS_FILE):\r\n\r\n video_list = []\r\n\r\n MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\"\r\n\r\n YOUTUBE_READONLY_SCOPE = \"https://www.googleapis.com/auth/youtube.readonly\"\r\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\r\n YOUTUBE_API_VERSION = \"v3\"\r\n\r\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\r\n message=MISSING_CLIENT_SECRETS_MESSAGE,\r\n scope=YOUTUBE_READONLY_SCOPE)\r\n\r\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\r\n credentials = storage.get()\r\n\r\n if credentials is None or credentials.invalid:\r\n flags = argparser.parse_args()\r\n credentials = run_flow(flow, storage, flags)\r\n\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n http=credentials.authorize(httplib2.Http()))\r\n\r\n # Retrieve the contentDetails part of the channel resource for the\r\n # authenticated user's channel.\r\n channels_response = youtube.channels().list(\r\n forUsername=channel_name,\r\n part=\"contentDetails\"\r\n ).execute()\r\n\r\n for channel in channels_response[\"items\"]:\r\n # From the API response, extract the playlist ID that identifies the list\r\n # of videos uploaded to the authenticated user's channel.\r\n uploads_list_id = channel[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\r\n\r\n # Retrieve the list of videos uploaded to the authenticated user's channel.\r\n playlistitems_list_request = youtube.playlistItems().list(\r\n playlistId=uploads_list_id,\r\n part=\"snippet\",\r\n maxResults=50\r\n )\r\n\r\n while playlistitems_list_request:\r\n playlistitems_list_response = playlistitems_list_request.execute()\r\n\r\n # Print information about each video.\r\n for playlist_item in playlistitems_list_response[\"items\"]:\r\n title = playlist_item[\"snippet\"][\"title\"]\r\n video_id = playlist_item[\"snippet\"][\"resourceId\"][\"videoId\"]\r\n video_list.append((title, video_id, 'https://img.youtube.com/vi/' + video_id + '/0.jpg'))\r\n\r\n playlistitems_list_request = youtube.playlistItems().list_next(\r\n playlistitems_list_request, playlistitems_list_response)\r\n\r\n return(video_list)", "def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)", "def search_videos(self):\n query = request.args.get('query')\n if not query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"query\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"query\"), 400\n videos_data = self.video_database.search_videos(query)\n user_videos = [data[1]._asdict() for data in videos_data]\n user_emails = [data[0] for data in videos_data]\n user_reactions = [{k.name: v for k, v in data[2].items()} for data in videos_data]\n\n email_token = auth.current_user()[0]\n filtered_videos = []\n filtered_users = []\n filtered_reactions = []\n for v, u, r in zip(user_videos, user_emails, user_reactions):\n if v[\"visible\"] or (u[\"email\"] == email_token or self.friend_database.are_friends(u[\"email\"], email_token)):\n filtered_videos.append(v)\n filtered_users.append(u)\n filtered_reactions.append(r)\n for i in range(len(user_videos)):\n filtered_videos[i][\"creation_time\"] = filtered_videos[i][\"creation_time\"].isoformat()\n return json.dumps([{\"user\": u, \"video\": v, \"reactions\": r}\n for v, u, r in zip(filtered_videos, filtered_users, filtered_reactions)]), 200", "def test_api_video_read_list_user_with_no_access(self):\n user = factories.UserFactory()\n # An organization with a playlist and one video\n organization = factories.OrganizationFactory()\n organization_playlist = factories.PlaylistFactory(organization=organization)\n factories.VideoFactory(playlist=organization_playlist)\n # A playlist with a video but no organization\n other_playlist = factories.PlaylistFactory()\n factories.VideoFactory(playlist=other_playlist)\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n response = self.client.get(\n \"/api/videos/\", HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(), {\"count\": 0, \"next\": None, \"previous\": None, \"results\": []}\n )", "def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos", "def get_videos(self, **kwargs):\n return self.get('videos', **kwargs)", "def get_playlists_for_user(self, request): \n user = Account.find_by_id(request.userid)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def videos(self):\r\n return v3.Videos(self)", "def get_all_videos(self):\n\n return list(self._videos.values())", "def get_queryset(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n video = generics.get_object_or_404(Video, **filter_kwargs)\n video_content_type = ContentType.objects.get_for_model(video)\n \n return User.objects.filter(\n activities__verb='like', activities__object_id=video.id,\n activities__object_content_type=video_content_type)", "def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def gather_videos(files):\r\n # Because we are using a set, no duplicates will be present\r\n videos = set()\r\n for item in files:\r\n # Crawl subfolders\r\n if os.path.isdir(item):\r\n for root, _, filenames in os.walk(item):\r\n for filename in filenames:\r\n filepath = os.path.join(root, filename)\r\n # Check if its a video\r\n if YoutubeService.valid_video_file(filepath):\r\n videos.add(filepath)\r\n # If it exists it is a single file, check if its a video\r\n elif os.path.exists(item) and YoutubeService.valid_video_file(item):\r\n videos.add(item)\r\n return videos" ]
[ "0.7457652", "0.70110625", "0.6914144", "0.67175204", "0.66885626", "0.66715485", "0.65637517", "0.650081", "0.63107467", "0.6185674", "0.6175587", "0.6173313", "0.6156263", "0.61157864", "0.61140674", "0.61111623", "0.610035", "0.6082998", "0.6081753", "0.6064207", "0.6061557", "0.6018898", "0.59913576", "0.59904575", "0.5964251", "0.5939627", "0.5905008", "0.58927333", "0.5887541", "0.58785754" ]
0.76592815
0
Returns True if the username is present in the USERS table.
def is_user_present(self, username): # WORKS done = self.cur.execute("SELECT username FROM users WHERE username = \"{}\"".format(username)) if done == 1: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_user(self, username):\n return username in self.user_table", "def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0", "def userExists(self, username):\n data = db.session.query(User.id).filter_by(username = username).first()\n if data is None:\n return False\n else:\n return True", "def _user_exists(self, username):\n return self.db.query(User).filter_by(name=username).first() is not None", "def exists(username):\n if Users.query.filter_by(username=username).first():\n return True\n return False", "def username_exists(self, username):\n user = [user for user in ALL_USERS if user['username'] == username]\n if user:\n return True\n return False", "def has_user(self, username):\n\t\treturn username in self.users", "def user_exists(username):\n sql = \"SELECT username \" \\\n \"FROM users \" \\\n \"WHERE username=:username\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n else:\n return True", "def username_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False", "def exists_user(self, tenant_name, username):\n base = basedn.people_dn(username, tenant_name)\n return self.exists_entry(base)", "def user_exists(username):\n db, c = config.start_db()\n # Check whether there is a row in 'users' where the column 'username' has\n # the value of `username`\n c.execute(\n 'SELECT EXISTS(SELECT 1 FROM users WHERE username=? LIMIT 1)',\n (username,)\n )\n result = c.fetchone()[0] # 1 if user exists, else 0\n config.end_db(db)\n return result == 1", "def verify_user(self, username):\n try:\n self.c.execute('SELECT name FROM profiles WHERE name=(?)' (username,))\n user = self.c.fetchone()[0]\n return user == username\n\n except TypeError:\n return False", "def username_exists(username):\n\n hashed_username = base64.b64encode(Cryptography.hash(username).digest()).decode()\n\n if os.path.exists(getcwd() + Database.__DB_FILENAME):\n with open(getcwd() + Database.__DB_FILENAME, 'r') as f:\n for entry in f:\n parts = entry.split(':')\n if parts[0] == hashed_username:\n return True\n return False\n else:\n return False", "def checkIsUsernameAvailable(self, username):\n\n return User.objects.filter(username__iexact=username).exists()", "def is_user(username: str) -> bool:\n db = get_db()\n if username is None:\n return False\n return not db.get_user_by_name(username) is None", "def is_username_taken(username):\n if User.objects.filter(username=username).exists():\n return True\n return False", "def is_username_available(username):\n from corehq.apps.users.dbaccessors import user_exists\n\n local_username = username\n if '@' in local_username:\n # assume email format since '@' is an invalid character for usernames\n local_username = username.split('@')[0]\n reserved_usernames = ['admin', 'demo_user']\n if local_username in reserved_usernames:\n return False\n\n exists = user_exists(username)\n return not exists.exists", "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists", "def user_exists(cls, name):\n\n for user in cls.user_list:\n if user.user_name == name:\n return True\n\n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def checkUserExists(self, email, username):\n query = \"SELECT * FROM User WHERE Email='\"+email+\"' OR UserName = '\"+username+\"';\"\n self.cur.execute(query)\n\n data = self.cur.fetchall()\n if len(data):\n return True\n else:\n return False", "def UserExist(self, username):\n return self.com.CheckUserexists(username)", "def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def has_username(self):\n return self.username is not None", "def has_user(self, user):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_all_users_query+\" WHERE $username_field$='$username$'\",{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: has_user: %s\" % (query,))\n cursor.execute(query)\n\n for row in cursor:\n return True\n return False", "def username_exist(username):\n return User.objects.filter(username=username).first()", "def check_user_exists(self):\n is_exists = False\n if auth.UserInfo.objects.filter(\n user_id__username=self.username,\n is_active=True).exists():\n is_exists = True\n return is_exists" ]
[ "0.87660056", "0.8282255", "0.8261474", "0.8222565", "0.8204488", "0.82031626", "0.80208254", "0.8006336", "0.7955199", "0.7936963", "0.79178756", "0.7800615", "0.7787375", "0.7707385", "0.7649575", "0.7648491", "0.76411563", "0.7584512", "0.7551436", "0.75165737", "0.7509721", "0.7509721", "0.74845684", "0.745041", "0.7390505", "0.7385719", "0.734056", "0.7262856", "0.72344625", "0.7208844" ]
0.84399956
1
Returns a maximum of 5 random video IDS from the VIDEOS table.
def get_five_random_IDs(self): self.cur.execute("SELECT video_ID FROM videos ORDER BY RAND() LIMIT 5") IDs = [] for ID in self.cur.fetchall(): IDs.append(ID[0]) return IDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_ID(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY RAND() LIMIT 1\") # Selects video_ID from 1 random row.\n return self.cur.fetchone()[0]", "def play_random_video(self):\n #[ expression for item in list if conditional ]\n unflagged_videos = [video for video in self._video_library.get_all_videos() if not video.flagged]\n #ran_num = randrange(len(self._video_library.get_all_videos()))\n if len(unflagged_videos) == 0:\n print(\"No videos available\")\n else:\n ran_num = randrange(len(unflagged_videos))\n #random_video_id = self._video_library.get_all_videos()[ran_num]._video_id\n random_video_id = unflagged_videos[ran_num]._video_id\n self.play_video(random_video_id)", "def play_random_video(self):\n if self.video_id != None:\n print(f\"Stopping video: {self.video_id}\")\n video_info = self._video_library.get_all_videos()\n a_list = []\n for i in video_info:\n a_list.append(video_info.title)\n a = random.randint(0, len(a_list)-1)\n print(f\"Playing video: {a_list[a]}\")", "def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs", "def play_random_video(self):\n num_videos = len(self._video_library.get_all_videos())\n videos = self._video_library.get_all_videos()\n random_index = randint(0, num_videos-1)\n self.play_video(videos[random_index].video_id)\n # print(\"play_random_video needs implementation\")", "def get_five_random(self):\r\n if self.get_length() > 5:\r\n random_selection = []\r\n\r\n from random import randrange\r\n\r\n for i in range(0, 5):\r\n while True:\r\n rnd = randrange(0, self.get_length())\r\n if self.get_tweet(rnd) not in random_selection:\r\n random_selection.append(self.get_tweet(rnd))\r\n break\r\n return random_selection\r\n else:\r\n return self.tweets", "def web_videos_random_all(channel):\n\n try:\n choice = random.choice([\n video['snippet']['resourceId']['videoId']\n for video in yt_get_channel_videos(channel)\n ])\n except IndexError:\n return flask.redirect(flask.url_for('videos',\n channel = channel\n ))\n\n return flask.redirect(flask.url_for('videos',\n channel = channel, video = choice)\n )", "def play_random_video(self):\n if self._current_video:\n self.stop_video()\n videos = [v for v in self._video_library.get_all_videos() if v.flag is None]\n if not videos:\n print(\"No videos available\")\n return\n self.play_video(choice(videos).video_id)", "def get_user_ids():\n TOTAL_USERS = 50\n return list(numpy.random.choice(\n TOTAL_USERS, random.randint(1, TOTAL_USERS), replace=False\n ))", "def choose_random(N):\n db = pymongo.MongoClient('localhost',27020).chembldb\n # Get all CHEMBL IDs\n db.molecules.ensure_index('chembl_id')\n chembl_ids = [m['chembl_id'] for m in db.molecules.find().sort('chembl_id')]\n print len(chembl_ids)\n random.seed(201405291515)\n rands = random.sample(chembl_ids, N)\n return(rands)", "def shuffled_thumbnails(self):\n while True:\n video_id = random.choice(self.video_ids)\n metadata = self._id_to_meta[video_id]\n thumbs = [th for th in self.video_thumbnails(video_id)]\n if thumbs:\n yield random.choice(thumbs) + (metadata,)", "def play_random_video(self):\n video = random.choice(self._video_library.get_all_videos())\n self.play_video(video.video_id)", "def tournament_selection(pool):\n return max(random.sample(pool, len(pool) // 5))", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def random_video(nb_keywords: int = 1, kv_ratio: int = 1, video_id: int = 0, channel_id: int = 0):\n\n # Type-checking\n if not isinstance(nb_keywords, int):\n raise TypeError\n if not isinstance(kv_ratio, int):\n raise TypeError\n # Assertion checking\n assert kv_ratio <= nb_keywords\n\n keywords_idx = np.random.choice(np.arange(nb_keywords), size=kv_ratio, replace=False)\n keywords = np.zeros(nb_keywords, dtype=np.float)\n keywords[keywords_idx] = 1. # non-zeros keywords for the video\n return Video(keywords, video_id, channel_id)", "def generate_number_of_events(max_number):\n\n return randint(1, max_number)", "def random_nodes(self):\n SAMPLE_BUFFER_SIZE = 1000\n samples = []\n log('log: ')\n log('log: random_nodes()')\n while True:\n # if there are some left, try returning those\n if len(samples) > 0:\n # make sure the video has neighbors\n log('log: ensuring node has neighbors')\n video_id = samples.pop()\n video_node = Node(video_id)\n if len(video_node.neighbors) == 0:\n continue\n \n log('log: END random_nodes()')\n yield video_node\n else:\n # refill the buffer\n log('log: filling up buffer for random_nodes')\n samples = DB.sample(\n SAMPLE_BUFFER_SIZE,\n {\n \"related_videos\": { \"$exists\": True },\n \"basic_info\": { \"$exists\": True },\n \"frames.0\": { \"$exists\": True },\n }\n )\n log('log: buffer filled')\n \n # sanity check\n if len(samples) == 0:\n print('log: len(samples) == 0 AFTER retriving from the database, something is broken')\n break", "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people", "def get_random_neighbor(movie_id):\n users = self._find_users_by_movies(movie_id)\n return users[random.randint(0, len(users) - 1)]", "def create_videos(user, gloss_ids):\n uuids = []\n for gloss_id in gloss_ids:\n video = Video(\n user_id=user.id,\n gloss_id=gloss_id,\n uuid=str(uuid.uuid4()),\n review_summary=INITIAL_SUMMARY,\n created_time=int(time.time()),\n status=VideoStatus.WAITING_UPLOAD\n )\n video.save()\n uuids.append(video.uuid)\n\n if len(uuids) == 1:\n uuids = uuids[0]\n\n return uuids", "def test_random_uuid(self):\n movie = Movie.objects.get(title='The Two Towers')\n assert isinstance(movie.id, uuid.UUID), ( 'Expected UUID, got %s.' %\n movie.id.__class__.__name__ )\n tt_uuid = str(movie.id)\n self.assertEqual(tt_uuid[14], '4')\n assert tt_uuid[19] in ('8', '9', 'a', 'b'), 'Invalid random UUID.'", "def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most_popular_videos.append(k)\r\n return most_popular_videos", "def random_five(min_x, max_x):\n return random.sample(xrange(min_x, max_x), 5)", "def randomInt():\n wordList = Word.objects.all()\n max = Word.objects.all().order_by(\"-id\")[0].id - 1\n half = round((xAppeared(\">\") - xAppeared(\"<\")) / 2)\n while True:\n randID = random.randint(0,max)\n word = wordList[randID]\n if randomBoolean():\n return randID\n elif word.times_appeared < half:\n return randID", "def get_latest_videos(self, count = 30, page = 1):\n uri = 'videos/latest'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def select_random_meme(self):\n cursor = self.conn.cursor()\n cursor.execute(f\"select meme_id from memes where include_random and not blacklisted order by random() limit 1\")\n result = cursor.fetchone()\n cursor.close()\n return result[0]", "def random_sample(df, batch_size):\n sample = df.sample(n=batch_size)\n #print(sample)\n video_ids = list(sample.video_id.values.astype(str))\n labels = list(sample.label.values)\n\n return video_ids, labels", "def populate_game_questions():\n indices = random.sample(range(0, len(quizquestion.questions_all)), 5) # If user doesn't specify, choose 5 random questions\n return quizquestion.QuizQuestion.get_game_questions(indices)", "def gen_all_arms_ids(\n self,\n ) -> None:\n num_all_arms = (\n self.num_unique_batches * self.batch_size * self.num_arms_per_episode\n )\n all_arms_ids = torch.randperm(num_all_arms)\n self.all_unique_arm_ids = all_arms_ids.reshape(\n [self.num_unique_batches, self.batch_size, self.num_arms_per_episode]\n )\n assert self.all_unique_arm_ids.ndim == 3\n return" ]
[ "0.6715515", "0.64491177", "0.6400545", "0.62863255", "0.6253465", "0.6194111", "0.60011953", "0.5903442", "0.58871764", "0.5860959", "0.5722055", "0.56515723", "0.5600675", "0.55765074", "0.55703866", "0.5550376", "0.554172", "0.5523666", "0.55215514", "0.5498145", "0.5474059", "0.545877", "0.5457806", "0.5443098", "0.54227126", "0.53769815", "0.53708476", "0.535543", "0.5352715", "0.53462654" ]
0.8638253
0
Adds the video ID to the FLAGS table.
def flag_ID(self, username, video_ID): done = self.cur.execute("SELECT video_ID from flags WHERE video_ID = \"{}\"".format(video_ID)) if done == 0: # Not yet flagged by any user. try: self.cur.execute("INSERT INTO flags VALUES(\"{}\", \"{}\")".format(video_ID, username)) self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"Not supplied\"):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot flag video: Video does not exist\")\n elif self._video_library.get_video(video_id).flagged:\n print(\"Cannot flag video: Video is already flagged\")\n else:\n if self.current_video is not None and self.current_video.video_id == video_id:\n self.stop_video() \n print(f\"Successfully flagged video: {self._video_library.get_video(video_id).title} (reason: {flag_reason})\")\n self._video_library.get_video(video_id).flagged = True\n self._video_library.get_video(video_id).flag_reason = flag_reason", "def flag_video(self, video_id, flag_reason=\"\"):\n videos = self._video_library.get_all_videos()\n matched = False\n for video_f in self.flagged:\n if video_id.lower() in video_f:\n print(\"Cannot flag video: Video is already flagged\")\n break\n else:\n for v in videos:\n if video_id.lower() in v.video_id:\n matched = True\n if flag_reason:\n self.flagged.append([v.video_id,flag_reason])\n print(f\"Successfully flagged video: {v.title} (reason: {flag_reason})\")\n else:\n self.flagged.append([v.video_id, \"Not supplied\"])\n print(f\"Successfully flagged video: {v.title} (reason: Not supplied)\")\n \n if matched == False:\n print(\"Cannot flag video: Video does not exist\")\n\n # print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"Not supplied\"):\n video = self._video_library.get_video(video_id)\n if not video:\n print(\"Cannot flag video: Video does not exist\")\n return\n if video.flag is not None:\n print(\"Cannot flag video: Video is already flagged\")\n return\n video.set_flag(flag_reason)\n if self._current_video and self._current_video.video_id == video.video_id:\n self.stop_video()\n print(f\"Successfully flagged video: {video._title} (reason: {flag_reason})\")", "def flag_video(self, video_id, flag_reason=\"\"):\n object=self._video_library.get_video(video_id)\n \n print(f\"{object}\")", "def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"", "def addid(catalog,ide,video):\n ids=catalog['ids']\n existid=mp.contains(ids,ide)\n if existid:\n entry=mp.get(ids,ide)\n ID=me.getValue(entry)\n else:\n ID=newid(ide)\n mp.put(ids,ide,ID)\n \n lt.addLast(ID['videos'],video)", "def add_to_list (self, video_id):\n return self._update_my_list(video_id=video_id, operation='add')", "def delete_flag(self, video_ID):\n try:\n self.cur.execute(\"DELETE FROM flags WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n except:\n self.db.rollback()", "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)", "def test_addFlags(self):\n self._flagsTest('addFlags', b'+FLAGS')", "def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done", "def add_video(self, video: Video):\n\n self._videos[video.video_id] = video", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return\n if not video:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return\n if video.flag is not None:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {video.flag})\")\n return\n playlist = self._playlists[playlist_name.lower()]\n if video in playlist.videos:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n return\n playlist.videos.append(self._video_library.get_video(video_id))\n print(f\"Added video to {playlist_name}: {video.title}\")", "def add_parameter(self, param_id, value, flags=0):\n param = bytearray()\n param.extend(param_id)\n param.extend(flags)\n param.extend(binary.pack_le32(value))\n self.parameters.append(param)\n raise PyedbglibNotSupportedError(\"Parameters are not yet supported!\")", "def add_to_playlist(self, playlist_name, video_id):\n playlist_exists = False\n video_id_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n \n videos = self._video_library.get_all_videos()\n for v in videos:\n if v.video_id.lower() == video_id.lower():\n video_id_exists = True\n video_title = v.title\n break\n video_flagged = False\n if self.flagged:\n for videos_f in self.flagged:\n if video_id.lower() in videos_f:\n video_flagged = True\n reason = videos_f[1]\n break\n if video_flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason:{reason})\")\n elif playlist_exists == False:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n \n elif video_id_exists == False:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n\n elif video_id.lower() in self.playlists[real_playlist_name]:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n self.playlists[real_playlist_name].append(video_id.lower())\n print(f\"Added video to {playlist_name}: {video_title}\")\n\n # print(\"add_to_playlist needs implementation\")", "def add_video(id):\n event = Event.query.get_or_404(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n upload_video_form = UploadVideoForm()\n if upload_video_form.validate_on_submit():\n video = Video(\n url=UploadVideoForm.parse_url(upload_video_form.video_url.data), event=event\n )\n db.session.add(video)\n db.session.commit()\n flash(\"Your upload was successful.\", \"success\")\n return redirect(url_for(\"events.media\", id=id))\n else:\n session[\"upload_video_form_errors\"] = upload_video_form.video_url.errors\n session[\"video_url\"] = upload_video_form.video_url.data\n return redirect(url_for(\"events.media\", id=event.id))", "def add_flag(self, flag):\n self.flags.append(flag)", "def add_flag(flags, flag, tensor):\n flags[flag][\"count\"] += 1\n if tensor not in flags[flag][\"tensors\"]:\n flags[flag][\"tensors\"].append(tensor)", "def add_to_playlist(self, playlist_name, video_id):\n if playlist_name.lower() not in self.playlists:\n print(\"Cannot add video to\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif self._video_library.get_video(video_id) is None:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video does not exist\")\n elif self._video_library.get_video(video_id).flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {self._video_library.get_video(video_id).flag_reason})\")\n elif self._video_library.get_video(video_id) in self.playlists[playlist_name.lower()]:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video already added\")\n else:\n print(\"Added video to\", playlist_name, end=\"\") \n print(\":\",self._video_library.get_video(video_id).title)\n self.playlists[playlist_name.lower()].append(self._video_library.get_video(video_id))", "def update_video(conn: sqlite3.Connection, cols_vals: dict, verbose=False):\n video_id = cols_vals.pop('id')\n query_string = generate_unconditional_update_query(list(cols_vals.keys()))\n values = list(cols_vals.values())\n values.append(video_id)\n if execute_query(conn, query_string, tuple(values)):\n if verbose:\n logger.info(f'Updated video {video_id!r}')\n return True", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def web_archive_insert_video(id):\n\n db = get_db()\n user_id = flask.session['user']['id']\n\n video_id = id\n video = yt_get_video(video_id)\n channel_id = video['snippet']['channelId']\n\n archive = None\n for playlist in db_get_archives():\n if playlist['contentDetails']['itemCount'] < 5000:\n archive = playlist\n break\n\n if archive is None:\n archive = yt_create_playlist()\n\n if yt_insert_to_playlist(video_id, archive['id']):\n if channel_id not in db[user_id]:\n db[user_id][channel_id] = {\n 'played': {}, 'archived': {}\n }\n db[user_id][channel_id]['archived'][video_id] = archive['id']\n update_db(db)", "def video_id(self, video_id):\n # type: (string_types) -> None\n\n if video_id is not None:\n if not isinstance(video_id, string_types):\n raise TypeError(\"Invalid type for `video_id`, type has to be `string_types`\")\n\n self._video_id = video_id", "def activate_video(video: dict):\n\tif video.get('state')=='INACTIVE':\n\t\tvideo_id = video.get('id')\n\t\tjson = { 'state': 'ACTIVE' }\n\t\tprint(f'Activating video ID {video_id}: {get_cms().UpdateVideo(video_id=video_id, json_body=json).status_code}')" ]
[ "0.61697054", "0.61697054", "0.59331304", "0.5890974", "0.5873597", "0.5819396", "0.58043575", "0.56625754", "0.56442875", "0.5636927", "0.5559719", "0.5406017", "0.5353216", "0.5332353", "0.53097856", "0.53097856", "0.53097856", "0.53097856", "0.52876383", "0.5258258", "0.52437603", "0.52134454", "0.5183439", "0.51545733", "0.51028985", "0.51027286", "0.5048784", "0.5005427", "0.4937641", "0.49253362" ]
0.7335359
0
Returns username of flagger of the video ID.
def get_flagger(self, video_ID): self.cur.execute("SELECT username FROM flags WHERE video_ID = \"{}\"".format(video_ID)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def username(self) -> str:", "def username(self) -> str:", "def get_username(self):\n \n if self.livestream_user:\n return self.livestream_user\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path_term = p.path.split('/')\n \n if len(path_term) == 3:\n if path_term[2] == 'video':\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n return path_term[1]\n if path_term[1] == 'embed':\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n return path_term[2]\n \n return ''", "def get_id(self):\r\n return self.username", "def get_identifier(self, request):\r\n return request.user.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def video_id(self) -> str:\r\n return self._video_id", "def get_identifier(self, request):\n return request.user.username", "def username(self, instance):\r\n return instance.user.username", "def username(user_id):\n return UserIndex.instance().name(user_id)", "def get_video_channel_name(self, response):\n return response.css(\"div.yt-user-info\")\\\n .extract_first(default='')", "def getName(self):\n return self.__username", "def get_player_name(self):\n return self._player_name", "def get_video_uploader(self, video_ID): #WORKS\n try:\n done = self.cur.execute(\"SELECT uploader FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n uploader = self.cur.fetchone()[0]\n return uploader\n except:\n return \"Error getting username\"", "def username(self):\n return self.idnumber\n # if self._grade in ['6', '7', '8', '9', '10', '11', '12']:\n # return self.idnumber\n # mapping = ssis_synctree_settings[STUDENT_PSIDUSERNAME_MAPPINGS].get(self.idnumber)\n # return (self.name + self._year_of_graduation).lower().replace(' ', '').replace('-', '') if not mapping else mapping", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def get_name(self):\n return self._player_name", "def get_identifier(self, request):\r\n username, api_key = self.extract_credentials(request)\r\n return username or 'nouser'", "def username(self):\n return self._username()", "def username(self, inst):\r\n return inst.user.username", "def get_bceid_user_name(self):\n token = self.get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n return unverified_claims['bceid_username']", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def get_username(self):\r\n return self.username", "def get_username(self) -> str:\n return self._username", "def get_username(self):\n return self.username", "def GetUsername(self):\n pass" ]
[ "0.66205794", "0.66205794", "0.654406", "0.6508635", "0.64972645", "0.649588", "0.649588", "0.649588", "0.64898527", "0.6474817", "0.6413524", "0.64029515", "0.6389556", "0.6362839", "0.63601893", "0.633342", "0.63329804", "0.6328341", "0.6322044", "0.6317198", "0.6309156", "0.630491", "0.6301781", "0.62777734", "0.62777734", "0.62777734", "0.6273612", "0.6266703", "0.6266124", "0.6263277" ]
0.7351368
0
Returns a list of flagged videos from FLAGS table.
def get_flagged(self): self.cur.execute("SELECT video_ID FROM flags") flagged_IDs = [] for ID in self.cur.fetchall(): flagged_IDs.append(ID[0]) return flagged_IDs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done", "def video_list(self) -> list:\n return self._video_list", "def flags(self):\n flags = self.Flags\n return [x for x in self.FLAGS_VALUES if flags & x]", "def flag_video(self, video_id, flag_reason=\"\"):\n object=self._video_library.get_video(video_id)\n \n print(f\"{object}\")", "def flag_video(self, video_id, flag_reason=\"\"):\n videos = self._video_library.get_all_videos()\n matched = False\n for video_f in self.flagged:\n if video_id.lower() in video_f:\n print(\"Cannot flag video: Video is already flagged\")\n break\n else:\n for v in videos:\n if video_id.lower() in v.video_id:\n matched = True\n if flag_reason:\n self.flagged.append([v.video_id,flag_reason])\n print(f\"Successfully flagged video: {v.title} (reason: {flag_reason})\")\n else:\n self.flagged.append([v.video_id, \"Not supplied\"])\n print(f\"Successfully flagged video: {v.title} (reason: Not supplied)\")\n \n if matched == False:\n print(\"Cannot flag video: Video does not exist\")\n\n # print(\"flag_video needs implementation\")", "def flags(self):\n return list(self._flags_generator())", "def get_all_videos_in_directory(directory: str):\n\n all_files_and_folders = os.listdir(directory)\n\n only_videos = []\n for file in all_files_and_folders:\n if is_video(file):\n only_videos.append(file)\n ...\n\n return only_videos", "def flags(session, flags):\n\tfl = []\n\tfor f in flags:\n\t\ttry: \n\t\t\tflag = Flag(**f)\n\t\t\tsession.add(flag)\n\t\t\tsession.commit()\n\t\texcept Exception: \n\t\t\tsession.rollback()\n\t\t\tflag = session.query(Flag).filter_by(**f).one()\n\t\tfl.append(flag)\n\treturn fl", "def video_to_features(vid):\n ext = Extractor()\n return [ext.extract(frame) for frame in vid]", "def test_getlist(self):\n flags = flag_lists(appversions={\"code\": \"fx1.0\"})\n eq_(flags, {(\"fx\", \"pl\"): [0],\n (\"fx\", \"de\"): [1],\n (\"fx\", \"fr\"): [2],\n (\"fx\", \"da\"): [1, 0]})", "def get_flagged_num(self, username):\n done = self.cur.execute(\"SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \\\"{}\\\"\".format(username))\n return done", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def box_flags(self, box):\n return [flag for flag in self.flags if flag.box == box]", "def video_ids(self):\n return self._sorted_ids", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos", "def get_all_videos(dir, extension='mp4'):\n\n list_video_fn = []\n for dirpath, dirnames, filenames in os.walk(dir):\n for filename in [f for f in filenames if f.endswith(extension)]:\n fn = os.path.join(dirpath, filename)\n list_video_fn.append(fn)\n\n return list_video_fn", "def get_videos(self):\n return list(self._videos.values())", "def filter_videos(\n files: list\n):\n#cSpell:words webm vchd rmvb gifv xvid vidx\n video_extensions = [\n \"WEBM\",\n \"MPG\",\"MP2\", \"MPEG\", \"MPE\", \"MPV\",\n \"OGV\",\"OGG\",\n \"MP4\", \"M4P\", \"M4V\",\n \"AVI\",\n \"WMV\",\n \"MOV\",\"QT\",\n \"FLV\",\"SWF\",\n \"F4V\",\"F4P\",\"F4A\",\"F4B\",\n \"VCHD\",\n \"RMVB\",\"RM\",\n \"VOB\",\n \"MKV\",\n \"MTS\", \"M2TS\", \"TS\",\n \"MNG\",\n \"GIFV\",\n \"GIF\",\n \"DRC\",\n \"XVID\",\n \"VIDX\",\n \"ASF\",\n \"AMV\",\n \"M2V\",\n \"SVI\",\n \"3GP\",\n \"MXF\",\n \"ROQ\",\n \"NSV\",\n \"3G2\",\n ]\n return filter_files_by_extension(files, video_extensions)\n ...", "def flags(self) -> list[\"ProjectCommandFlag\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"flags\", _args)\n _ctx = ProjectCommandFlag(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n )\n return _ctx.execute_sync(list[ProjectCommandFlag])", "def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def get_all_videos(self):\n\n return list(self._videos.values())", "def videos(self):\n return self._videos", "def listFlag(flaglist):\n flag = 0\n for index, item in enumerate(flaglist):\n flag = setFlag(flag, index, item)\n return flag", "def get_video_frames(self):\r\n\r\n vid_dir = self._video_dir\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.jpg')]\r\n if len(vid_frames) == 0:\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.png')]\r\n list_of_frames = sorted(vid_frames)\r\n\r\n self._vid_frames = [list_of_frames]\r\n\r\n return self._vid_frames", "def getFlaggedSeqList(df, pre, post):\n maskidx = df.index[df.flag] # series of (DT) indexes with true flags\n result = []\n for idx in maskidx:\n maskseq = getIndexSequence(df, idx, pre, post)\n result.append((maskseq[0], maskseq[-1]))\n\n return result", "def get_villager_ids(g):\n return [id for id in g.keys()\n if g[id] in ('v', 'b', 's', 'c')]", "def get_frames(self):\n if not self.video:\n return []\n # We cannot validate shape on construction as that happens inside graph\n # mode as we construct from a tf.data.Dataset, so we validate here.\n self.video[0].validate_shape_and_dtype()\n return self.video", "def find_all(v):\n screen = G.DEVICE.snapshot(quality=ST.SNAPSHOT_QUALITY)\n return v.match_all_in(screen)" ]
[ "0.59645563", "0.5873178", "0.568918", "0.56486547", "0.5638592", "0.5637153", "0.5600878", "0.5555618", "0.5444726", "0.54408675", "0.5410056", "0.54091847", "0.54091847", "0.53483254", "0.53305584", "0.5321235", "0.53046703", "0.5302004", "0.5291554", "0.52836734", "0.52633953", "0.5261544", "0.52035224", "0.5198038", "0.51850253", "0.518199", "0.5128771", "0.50699383", "0.50691426", "0.5063503" ]
0.7468905
0
Returns number of videos of user flagged by other users.
def get_flagged_num(self, username): done = self.cur.execute("SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \"{}\"".format(username)) return done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def count_videos(self):\n return len(self.videos)", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def get_number_un_watched(self):\n movies_un_watched = 0\n for movie in self.movies:\n if not movie.is_watched:\n movies_un_watched += 1\n return movies_un_watched", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def get_videos(self, user):\n raise NotImplementedError", "def annotate_with_number_of_private_imageannotationcomments_from_user(self, user):\n from devilry.devilry_group.models import ImageAnnotationComment\n return self.annotate(\n number_of_private_imageannotationcomments_from_user=models.Count(\n models.Case(\n models.When(feedbackset__imageannotationcomment__visibility=ImageAnnotationComment.VISIBILITY_PRIVATE,\n feedbackset__imageannotationcomment__user=user,\n then=1)\n )\n )\n )", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def get_watched(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\"\".format(username))\n watched_video_IDs = []\n for ID in self.cur.fetchall():\n watched_video_IDs.append(ID[0])\n return watched_video_IDs", "def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)", "def total_rated(users: List[int], movie: int, ratings: UserRatingDict) -> int:\n result = 0\n for i in users:\n if movie in list(ratings[i].keys()):\n result += 1\n return result", "def get_user_videos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def count_revisions_by_user(self):\n return self.run_query(f\"count({self.r}/contributor[id = 5558])\")", "def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n private_count = Image.objects.filter(uploader=uploader, is_private=True, is_profile_image=False).count()\n liked_count = Image.objects.filter(likes__id=uploader).count()\n \n return {\n \"public\": public_count,\n \"private\": private_count,\n \"liked\": liked_count,\n }", "def get_best_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\" ORDER BY view_count DESC\".format(username))\n return self.cur.fetchone()[0]", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count", "def playedCount(player1, player2):\n if player1 == player2:\n # Early return when trying to match against themselves\n return 1\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"\"\"SELECT Count(id) AS CountOfid FROM results\n WHERE id = %s and opponent = %s\"\"\", (player1, player2))\n numberPlayed = cursor.fetchone()[0]\n conn.close()\n return numberPlayed", "def getPlayerCount(self):\n return self.sandboxplayergroupplayer_set.filter(quit=False).count()", "def flag_ID(self, username, video_ID):\n done = self.cur.execute(\"SELECT video_ID from flags WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n if done == 0: # Not yet flagged by any user.\n try:\n self.cur.execute(\"INSERT INTO flags VALUES(\\\"{}\\\", \\\"{}\\\")\".format(video_ID, username))\n self.db.commit()\n except:\n self.db.rollback()", "def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n \n return {\n \"public\": public_count,\n }" ]
[ "0.7523925", "0.6733229", "0.63746536", "0.6260079", "0.6234799", "0.6202073", "0.6138974", "0.60228616", "0.59842056", "0.59239006", "0.58508843", "0.57385385", "0.56081533", "0.5598427", "0.55598706", "0.55529207", "0.5511205", "0.5501292", "0.5490401", "0.5456478", "0.54491866", "0.54486126", "0.54162097", "0.5352136", "0.53236985", "0.53166246", "0.5279979", "0.5270777", "0.52635056", "0.5262623" ]
0.7559999
0
Returns number of videos in the VIDEOS table.
def get_video_count(self): done = self.cur.execute("SELECT video_ID FROM videos") return done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_videos(self):\n return len(self.videos)", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def __len__(self):\n return len(self.all_videos)", "def num_frames(self):\n return len(self.video)", "def get(self):\n return {'status': 'success', 'count': Video.query.count()}, 200", "def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))", "def count_dash(self):\n dashs = VideoRepresentation.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == VideoRepresentation.video_id)).filter_by(youtube_query_id=self.id)\n count = dashs.count()\n return count", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def get_toprated_with_count(self):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\tcursor.execute('''select count(*) from movies;''')\n\t\tpage_count = cursor.fetchone()[0]\n\t\tconnection.close()\n\t\tpage_count = int(ceil(page_count))\n\t\treturn page_count", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def get_number_of_movies(self):\n raise NotImplementedError", "def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count", "def count(self):\n return self.vcount", "def countPlayers():\n conn = connect()\n c = conn.cursor()\n # Counts the number of entries in the \"players\" table.\n c.execute(\"select count(*) as num from players;\")\n num = c.fetchone()[0]\n conn.commit()\n conn.close()\n return num", "def countPlayers():\n conn, cur = connect()\n query = \"SELECT count(*) AS player_count FROM players;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player count from players table\")\n num_players = cur.fetchone()\n conn.close()\n return num_players['player_count']", "def get_nb_frames_for_video(video_parts):\n train_or_test, classname, filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join(\"/data/niteshku001/Ravdess/data\", train_or_test, classname,\n filename_no_ext + '*.jpg'))\n return len(generated_files)", "def countPlayers():\n\n conn = psycopg2.connect(\"dbname=tournament\")\n c = conn.cursor()\n c.execute(\"SELECT count(*) FROM player\")\n count = c.fetchall()[0][0]\n conn.close\n\n return count", "def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)", "def countPlayers():\n conn, c = connect()\n\n q = \"select count(id) FROM PLAYERS;\"\n c.execute(q)\n res = c.fetchone()\n\n c.close()\n conn.commit()\n conn.close()\n return int(res[0])", "def count_rows(self):\n with self.connection:\n result = self.cursor.execute(\"SELECT * FROM music\").fetchall()\n return len(result)", "def countPlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT count(*) FROM players\")\n playerCount = cursor.fetchone()[0]\n conn.close()\n return playerCount", "def countPlayers():\n\n conn, c = main.connect()\n c.execute(\"SELECT count(*) FROM player\")\n\n return c.fetchone()[0]", "def countPlayers():\n\n count = 0\n query = (\"SELECT COUNT(id) FROM players;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'find'})\n for row in results:\n count = row[0]\n return count", "def countPlayers():\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM players\")\n players = int(cur.fetchone()[0])\n conn.close()\n return players", "def video_width(self):\n return self._el._parent.execute_script(\"return arguments[0].videoWidth\", self._el)" ]
[ "0.82683337", "0.7324633", "0.7128083", "0.705069", "0.69343895", "0.6885626", "0.67733824", "0.6640411", "0.6491014", "0.6469322", "0.646399", "0.6452515", "0.6443329", "0.6419106", "0.6393201", "0.6381259", "0.63574064", "0.6304938", "0.6295545", "0.6275196", "0.6236569", "0.61987513", "0.6181855", "0.6173832", "0.61493814", "0.61159503", "0.61039704", "0.60950726", "0.60751295", "0.60663134" ]
0.8443735
0
Returns number of flagged videos in the VIDEOS table.
def get_flag_count(self): done = self.cur.execute("SELECT video_ID FROM flags") return done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def count_videos(self):\n return len(self.videos)", "def get_flagged_num(self, username):\n done = self.cur.execute(\"SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \\\"{}\\\"\".format(username))\n return done", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def __len__(self):\n return len(self.all_videos)", "def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def num_frames(self):\n return len(self.video)", "def get_number_un_watched(self):\n movies_un_watched = 0\n for movie in self.movies:\n if not movie.is_watched:\n movies_un_watched += 1\n return movies_un_watched", "def count_dash(self):\n dashs = VideoRepresentation.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == VideoRepresentation.video_id)).filter_by(youtube_query_id=self.id)\n count = dashs.count()\n return count", "def count(self):\n return self.vcount", "def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def get(self):\n return {'status': 'success', 'count': Video.query.count()}, 200", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def n_featured():\r\n sql = text('''select count(*) from featured;''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def get_likes_count():\n return Flag.objects.filter(flag=Flag.LIKE_FLAG).count()", "def get_no_of_annotations(database, label, train_vids_all):\n count = 0\n for vid in train_vids_all:\n for ann in database[vid]['annotations']:\n if ann['label'] == label:\n count += 1\n return count", "def get_nb_frames_for_video(video_parts):\n train_or_test, classname, filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join(\"/data/niteshku001/Ravdess/data\", train_or_test, classname,\n filename_no_ext + '*.jpg'))\n return len(generated_files)", "def numOfLiveNeighbors(self):\n return len(list(filter(lambda x: x.isAlive(), self._neighbors)))", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs", "def get_nb_frames_for_video(video_parts):\n train_or_test, filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join('demo_frames',\n filename_no_ext + '*.jpg'))\n return len(generated_files)", "def get_video_views(self, response):\n return response.css(\".watch-view-count::text\")\\\n .extract_first(default='')", "def vectors_count(self) -> Optional[str]:\n return pulumi.get(self, \"vectors_count\")" ]
[ "0.77415645", "0.7383914", "0.7020163", "0.6678284", "0.665664", "0.65775", "0.6527008", "0.6088112", "0.6027986", "0.5998238", "0.59302866", "0.5903157", "0.5892386", "0.5889689", "0.5801771", "0.57928395", "0.57669586", "0.5756323", "0.57086295", "0.57069635", "0.55990773", "0.5578125", "0.55573684", "0.5557269", "0.55478555", "0.5501734", "0.55012685", "0.5447957", "0.5422594", "0.5394731" ]
0.78880155
0
Returns number of videos uploaded by the user from VIDEOS table.
def get_user_video_count(self, username): done = self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\"".format(username)) return done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def count_videos(self):\n return len(self.videos)", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def get_uploaded(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n uploaded_video_IDs = []\n for ID in self.cur.fetchall():\n uploaded_video_IDs.append(ID[0])\n return uploaded_video_IDs", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def get_flagged_num(self, username):\n done = self.cur.execute(\"SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \\\"{}\\\"\".format(username))\n return done", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def get(self):\n return {'status': 'success', 'count': Video.query.count()}, 200", "def get_best_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\" ORDER BY view_count DESC\".format(username))\n return self.cur.fetchone()[0]", "def __len__(self):\n return len(self.all_videos)", "def num_frames(self):\n return len(self.video)", "def countPlayers():\n\n conn = psycopg2.connect(\"dbname=tournament\")\n c = conn.cursor()\n c.execute(\"SELECT count(*) FROM player\")\n count = c.fetchall()[0][0]\n conn.close\n\n return count", "def countPlayers():\n\n db = connect()\n c = db.cursor()\n query = (\"SELECT count(players.id) AS count_player FROM players;\")\n c.execute(query)\n count_player = c.fetchone()[0]\n db.close()\n return count_player", "def count_players():\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT * FROM count_players\")\n DB.commit()\n player_count = c.fetchall()[0][0]\n DB.close()\n return player_count", "def countPlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT count(*) FROM players\")\n playerCount = cursor.fetchone()[0]\n conn.close()\n return playerCount", "def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)", "def countPlayers():\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM players\")\n players = int(cur.fetchone()[0])\n conn.close()\n return players", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def countPlayers():\n conn, cur = connect()\n query = \"SELECT count(*) AS player_count FROM players;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player count from players table\")\n num_players = cur.fetchone()\n conn.close()\n return num_players['player_count']", "def countPlayers():\n conn = connect()\n c = conn.cursor()\n # Counts the number of entries in the \"players\" table.\n c.execute(\"select count(*) as num from players;\")\n num = c.fetchone()[0]\n conn.commit()\n conn.close()\n return num", "def countPlayers():\n conn, c = connect()\n\n q = \"select count(id) FROM PLAYERS;\"\n c.execute(q)\n res = c.fetchone()\n\n c.close()\n conn.commit()\n conn.close()\n return int(res[0])", "def get_user_videos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def countPlayers():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT COUNT(*) from players;\"\"\")\n ret = int(cur.fetchone()[0])\n return ret", "def countPlayers():\n\n conn, c = main.connect()\n c.execute(\"SELECT count(*) FROM player\")\n\n return c.fetchone()[0]", "def update_view_count(self, video_ID): #WORKS\n try:\n self.cur.execute(\"UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \\\"{}\\\"\".format(video_ID)) # Adds 1 to the existing value.\n self.db.commit()\n except:\n self.db.rollback()" ]
[ "0.78139544", "0.75539935", "0.7544837", "0.7432486", "0.7323483", "0.7125673", "0.67732424", "0.65567315", "0.6395767", "0.6310928", "0.617212", "0.6141789", "0.61221516", "0.6081953", "0.6072957", "0.6031323", "0.6020064", "0.59904104", "0.59509546", "0.5947412", "0.5942394", "0.5936862", "0.59340984", "0.59319395", "0.5927489", "0.59190303", "0.58918715", "0.5876923", "0.58519524", "0.58169514" ]
0.8299071
0
Returns number of views on all videos uploaded by the user from VIDEOS table.
def get_user_view_count(self, username): self.cur.execute("SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \"{}\"".format(username)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def count_videos(self):\n return len(self.videos)", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def getNumberOfViews(self) -> int:\n ...", "def get_video_views(self, response):\n return response.css(\".watch-view-count::text\")\\\n .extract_first(default='')", "def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs", "def getNumViews(self):\n\n # Compute number of views of each 2D points\n self.num_views = np.sum( np.sum(self.pts2D, axis = 0) != 0, 1 )\n return self.num_views", "def count_video_meta(self):\n metas = YoutubeVideoMeta.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeVideoMeta.id)).filter_by(youtube_query_id=self.id)\n count = metas.count()\n return count", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def present_voter_cnt(self):\n\n return len(self._present_voters())", "def update_view_count(self, video_ID): #WORKS\n try:\n self.cur.execute(\"UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \\\"{}\\\"\".format(video_ID)) # Adds 1 to the existing value.\n self.db.commit()\n except:\n self.db.rollback()", "def __len__(self):\n return len(self.all_videos)", "def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])", "def count(self):\n return self.vcount", "def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))", "def get_user_videos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_videos(self, user):\n raise NotImplementedError", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def get(self):\n return {'status': 'success', 'count': Video.query.count()}, 200", "def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['videoId'] += 1\n rearrangeMovieArray()\n else:\n movieViewCounts[videoId] = movieViewCounts.get(videoId, 0) + 1\n moviesRanked.append(videoId)", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def get_visits_count(visit_container):\r\n return visit_container.visits.all().count()", "def count_revisions_by_user(self):\n return self.run_query(f\"count({self.r}/contributor[id = 5558])\")", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def video_list(request):\n mongodb = get_db() \n data = video_list_query()\n videos = video_info_query()\n # from edinsights.core.render import render\n return render(request, \"list-view.html\", {\n 'data': data, 'videos': videos\n })", "def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count" ]
[ "0.78535324", "0.73414063", "0.7268572", "0.7242948", "0.7227357", "0.7038107", "0.6769094", "0.6495603", "0.61842996", "0.6127641", "0.6106806", "0.6050462", "0.5961984", "0.5937592", "0.5896913", "0.58555007", "0.58042014", "0.5800134", "0.57887244", "0.57736903", "0.57569325", "0.57495517", "0.572176", "0.5720808", "0.5696537", "0.56798565", "0.5669487", "0.5661208", "0.55986774", "0.557798" ]
0.77311456
1
Returns the video ID of the video uploaded by the user with most views.
def get_best_video_ID(self, username): self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\" ORDER BY view_count DESC".format(username)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['videoId'] += 1\n rearrangeMovieArray()\n else:\n movieViewCounts[videoId] = movieViewCounts.get(videoId, 0) + 1\n moviesRanked.append(videoId)", "def get_video_id(self, obj):\n return obj.video.id", "def get_most_popular_talks_by_views(videos):\r\n return sorted(videos, key=lambda x: int(x.metrics['viewCount']), reverse=True)", "def get_most_watched_movie_index(self, user_id, count):\r\n # find most watched movie by 0 rating count on each movie,\r\n # then sorted out increasing order which indicates which movie has least 0 ratings\r\n # meaning most watched movie.\r\n zero_ratings = self.df_ratmat.apply(pd.Series.value_counts).iloc[0, 1:]\r\n user_rated_movies = self.get_user_ratings(user_id)[\"item_id\"]\r\n most_watched_movies_not_rated = zero_ratings[\r\n zero_ratings.index.isin(user_rated_movies.astype(int).astype(str)) != True].sort_values().head(count)\r\n return most_watched_movies_not_rated.index", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def get_video_id(self, obj):\n return obj.id", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def last_video(self) -> str:\n return max(glob.glob(VIDEOS_DIR), key=os.path.getmtime)", "def get_most_popular_talks_by_views(videos: list) -> List[Video]:\n return sorted(videos,\n key=lambda vid: get_vid_stat(vid, 'viewCount'),\n reverse=True)", "def video_id(self) -> str:\r\n return self._video_id", "def get_view_max(self):\n return self._view_max", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if path_list[0] == 'v':\n # https://vine.co/v/bjHh0zHdgZT\n return path_list[1]\n \n return ''", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n \n if p.path.startswith('/v/') or p.path.startswith('/broadcast/'):\n path = p.path.split('/')\n if len(path) == 3:\n return p.path.split('/')[-1].replace('.live', '')\n \n return ''", "def get_max_id(self):\r\n max_id = None\r\n for pid in self.players:\r\n if max_id is None or pid > max_id:\r\n max_id = pid\r\n return max_id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n if self.res.get('slideshow_id'):\n return self.res.get('slideshow_id')\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/slideshow/embed_code')):\n # http://www.slideshare.net/slideshow/embed_code/1293644\n return path_list[2]\n elif len(path_list) == 2 and p.path.startswith('/swf'):\n # return -1 when url is like : http://static.slideshare.net/swf/ssplayer2.swf?doc=working-dogs-1201800078341935-2\n # FixMe :slideshare oembed api doesnt support this kind of url\n return -1\n return ''", "def video_id(self):\n # type: () -> string_types\n return self._video_id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def videoview(request):\n obj=videos.objects.last()\n print(\"------------>\",obj)\n return render(request,\"myapp/videoview.html\",{'obj':obj})", "def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most_popular_videos.append(k)\r\n return most_popular_videos", "def get_flagged_num(self, username):\n done = self.cur.execute(\"SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \\\"{}\\\"\".format(username))\n return done", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def vid(self):\n return self._id", "def media_videos_count_max(self, media_videos_count_max):\n\n self._media_videos_count_max = media_videos_count_max" ]
[ "0.73119", "0.6968702", "0.6827411", "0.6747587", "0.6502863", "0.63282526", "0.61964494", "0.6152308", "0.60951626", "0.60849375", "0.60268444", "0.5864287", "0.5831303", "0.57951087", "0.5778345", "0.57708985", "0.5739955", "0.5739642", "0.5708817", "0.5672575", "0.5642984", "0.557079", "0.55654526", "0.55448556", "0.5515626", "0.5495516", "0.549539", "0.5458386", "0.5442935", "0.5426413" ]
0.79390967
0
Returns the video ID of the user's favourite video.
def get_fav_video_ID(self, username): self.cur.execute("SELECT video_ID FROM watched WHERE username = \"{}\" ORDER BY CAST(count as decimal) DESC".format(username)) return self.cur.fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def get_video_id(self, obj):\n return obj.video.id", "def get_best_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\" ORDER BY view_count DESC\".format(username))\n return self.cur.fetchone()[0]", "def content_favorite_id(self):\n return self._content_favorite_id", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if path_list[0] == 'v':\n # https://vine.co/v/bjHh0zHdgZT\n return path_list[1]\n \n return ''", "def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done", "def video_id(self) -> str:\r\n return self._video_id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n \n if p.path.startswith('/v/') or p.path.startswith('/broadcast/'):\n path = p.path.split('/')\n if len(path) == 3:\n return p.path.split('/')[-1].replace('.live', '')\n \n return ''", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n if p.netloc.endswith('vimeo.com') and 'hubnut/album/' in p.path:\n return ''\n \n if p.netloc.endswith('vimeo.com') and p.path.split('/')[-1:][0].isdigit():\n # Url of type http://vimeo.com/21347521\n # mobile type http://vimeo.com/m/21347521\n return p.path.split('/')[-1:][0]\n elif p.netloc.endswith('vimeo.com') and p.path == '/moogaloop.swf' and 'clip_id' in p.query:\n # Old embed code style url\n #params = dict([part.split('=') for part in p.query.split('&')])\n params = cgi.parse_qs(p.query)\n if 'clip_id' in params:\n return params['clip_id'][0]\n elif p.netloc == 'player.vimeo.com' and p.path.startswith('/video/'):\n # Url of type http://player.vimeo.com/video/21347521?title=0&amp;byline=0&amp;portrait=0\n path = p.path.split('/')\n return path[-1]\n \n return ''", "def video_id(self):\n # type: () -> string_types\n return self._video_id", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.youtube_fix_url(self.original_url))\n if p.path == '/watch':\n # Url of type http://www.youtube.com/watch?v=KRaeHxwZvms&feature=g-u-u&context=G2b00124FUAAAAAAAAAA\n #logger.debug('is a watch')\n params = cgi.parse_qs(p.query)\n if 'v' in params:\n return params['v'][0]\n elif p.fragment.startswith('/watch?v='):\n # sample. http://m.youtube.com/#/watch?v=ZXkW1-HdRC8\n params = cgi.parse_qs(p.fragment)\n if '/watch?v' in params:\n return params['/watch?v'][0]\n elif p.path.startswith('/v/') or p.path.startswith('/embed/'):\n path = p.path.split('/')\n return path[-1]\n elif p.netloc == 'youtu.be':\n return p.path[1:]\n elif re.match('(.{1}/){3}([\\w+-_^/]+)', p.fragment):\n parts = p.fragment.split('/')\n return parts[-1]\n return ''", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def get_video_id(self, obj):\n return obj.id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n if self.res.get('slideshow_id'):\n return self.res.get('slideshow_id')\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/slideshow/embed_code')):\n # http://www.slideshare.net/slideshow/embed_code/1293644\n return path_list[2]\n elif len(path_list) == 2 and p.path.startswith('/swf'):\n # return -1 when url is like : http://static.slideshare.net/swf/ssplayer2.swf?doc=working-dogs-1201800078341935-2\n # FixMe :slideshare oembed api doesnt support this kind of url\n return -1\n return ''", "def vid(self):\n return self._id", "def get_movie_id(self):\n\n return self.id_movie", "def get_video_uploader(self, video_ID): #WORKS\n try:\n done = self.cur.execute(\"SELECT uploader FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n uploader = self.cur.fetchone()[0]\n return uploader\n except:\n return \"Error getting username\"", "def get_favorite(self):\n\n\t\treturn self.__favorite", "def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]", "def get_video_id_from_link(link):\n query_string = urlparse.urlparse(link).query\n qs_params = urlparse.parse_qs(query_string)\n return qs_params['v'][0]", "def video_id_from_url(url):\n\n parsed_url = urlparse(url)\n url_params = dict(parse_qsl(parsed_url.query))\n return url_params.get(\"v\", parsed_url.path.split(\"/\")[-1])", "def get_movie_id(self) -> str:\n return self.movie.id", "def get_video_id(vid_folder_string):\n parts = vid_folder_string.split(\"_\")\n return parts[0] + \"_\" + parts[1]", "def get_video(self, video_id):\n return self._videos.get(video_id, None)", "def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done", "def get_user_favorite_activity(self, user_id: int) -> Optional[str]:\n activity_counts = Activity.query.filter(Activity.user_id == user_id).with_entities(Activity.type, func.count(\n Activity.type)).group_by(Activity.type).all()\n\n if activity_counts:\n sorted(activity_counts, key=lambda x: x[1])\n local_max = max(([x[1] for x in activity_counts]))\n\n return \", \".join([str(act[0]) for act in activity_counts if act[1] == local_max])\n return None", "def get_resource_id(self, obj):\n return obj.video.id", "def fetch_pyvideo_pk(self):\n url = 'http://pyvideo.org/search?models=videos.video&q={0}'.format(self.full_name.replace(\" \", \"+\"))\n soup = BeautifulSoup(requests.get(url).content).findAll(\"a\")\n if soup:\n for link in soup:\n if link.string == self.full_name:\n self.pyvideo_pk = link.get('href').split('/')[2]\n self.save()\n return self.pyvideo_pk\n self.pyvideo_pk = None\n self.save()\n return None" ]
[ "0.65228593", "0.63321614", "0.62774026", "0.6269768", "0.6204911", "0.6177124", "0.61610436", "0.6155687", "0.6141831", "0.6064822", "0.60217404", "0.59948564", "0.5968167", "0.59483504", "0.59404963", "0.5832371", "0.57427824", "0.55508655", "0.55254495", "0.5523709", "0.55054796", "0.54723626", "0.547067", "0.54328865", "0.5385014", "0.53247386", "0.5323151", "0.5306893", "0.5272987", "0.526629" ]
0.7914319
0
Returns list of videos favourited by the user from FAVOURITES table.
def get_favourites(self, username): self.cur.execute("SELECT video_ID FROM favourites WHERE username = \"{}\"".format(username)) favourites = [] for ID in self.cur.fetchall(): favourites.append(ID[0]) return favourites
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)", "def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks", "def users_list_videos(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n email_token = auth.current_user()[0]\n user_videos = self.video_database.list_user_videos(email_query)\n user_videos = [(video_data._asdict(), reaction_data) for video_data, reaction_data in user_videos]\n if email_query != email_token and not self.friend_database.are_friends(email_query, email_token):\n user_videos = [data for data in user_videos if data[0][\"visible\"]]\n for i in range(len(user_videos)):\n user_videos[i][0][\"creation_time\"] = user_videos[i][0][\"creation_time\"].isoformat()\n user_videos[i] = (user_videos[i][0], {k.name: v for k, v in user_videos[i][1].items()})\n user_videos = [{\"video\": video_data, \"reactions\": reaction_data} for video_data, reaction_data in user_videos]\n return json.dumps(user_videos), 200", "def get_videos(self, user):\n raise NotImplementedError", "def get_users():\n table_response = USER_FAVORITES_TABLE.scan()\n return table_response['Items']", "def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)", "def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Favorites.objects.filter(user=user, is_used=True)\n\n return queryset", "def get_fav_video_ID(self, username):\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\" ORDER BY CAST(count as decimal) DESC\".format(username))\n return self.cur.fetchone()[0]", "def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out", "def search_videos(self):\n query = request.args.get('query')\n if not query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"query\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"query\"), 400\n videos_data = self.video_database.search_videos(query)\n user_videos = [data[1]._asdict() for data in videos_data]\n user_emails = [data[0] for data in videos_data]\n user_reactions = [{k.name: v for k, v in data[2].items()} for data in videos_data]\n\n email_token = auth.current_user()[0]\n filtered_videos = []\n filtered_users = []\n filtered_reactions = []\n for v, u, r in zip(user_videos, user_emails, user_reactions):\n if v[\"visible\"] or (u[\"email\"] == email_token or self.friend_database.are_friends(u[\"email\"], email_token)):\n filtered_videos.append(v)\n filtered_users.append(u)\n filtered_reactions.append(r)\n for i in range(len(user_videos)):\n filtered_videos[i][\"creation_time\"] = filtered_videos[i][\"creation_time\"].isoformat()\n return json.dumps([{\"user\": u, \"video\": v, \"reactions\": r}\n for v, u, r in zip(filtered_videos, filtered_users, filtered_reactions)]), 200", "def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)", "def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]", "def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)", "def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)", "def get_user_videos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_favorites_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'added'):\n path = \"users/%d/favorites\" % user_id\n \n query_filter = ')(Ybxw_gbz'\n \n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n \n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results", "def get_watched(self, username): # WORKS\n self.cur.execute(\"SELECT video_ID FROM watched WHERE username = \\\"{}\\\"\".format(username))\n watched_video_IDs = []\n for ID in self.cur.fetchall():\n watched_video_IDs.append(ID[0])\n return watched_video_IDs", "def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def user_videos(username):\n for page_index in count():\n entry_list = download_video_feed(\n create_feed_url(username, page_index)\n )\n\n for entry in entry_list:\n yield entry\n\n if len(entry_list) < MAX_RESULTS:\n break", "def list_favor(self):\n if \"all\" in self.switches:\n favors = Reputation.objects.exclude(favor=0).order_by(\"-date_gossip_set\")\n self.msg(\"Characters with favor: %s\" % \", \".join(str(ob) for ob in favors))\n return\n org = self.get_organization(check_perm=False)\n favors = org.reputations.filter(Q(favor__gt=0) | Q(favor__lt=0)).order_by(\n \"-favor\"\n )\n msg = \"{wThose Favored/Disfavored by %s{n\\n\" % org\n msg += \"\\n\\n\".join(\n \"{c%s{w (%s):{n %s\" % (ob.player, ob.favor, ob.npc_gossip) for ob in favors\n )\n self.msg(msg)", "def search_in_fav(request):\n query = request.GET.get('user_search')\n\n if query:\n # Returns the query in lower case and without accents\n query = unidecode(query).lower()\n result = True\n\n cur_user = request.user\n # Returns all favorites\n favorites = Favorite.objects.all()\n\n # Returns current user filtered favorites\n fav_filtered = favorites.filter(\n users_id=cur_user\n ).filter(products__name__icontains=query).order_by('id')\n\n if not fav_filtered.exists():\n result = False\n fav_filtered = favorites.filter(\n users_id=cur_user).order_by('id')\n\n # Init pagination with 6 products\n paginator = Paginator(fav_filtered, 6)\n page = request.GET.get('page')\n\n try:\n fav_filtered = paginator.page(page)\n except PageNotAnInteger:\n fav_filtered = paginator.page(1)\n except EmptyPage:\n fav_filtered = paginator.page(paginator.num_pages)\n\n if result:\n title = \"Résultats de la recherche : {}\".format(query)\n else:\n title = \"Aucun résultat pour la recherche : {}\".format(query)\n\n context = {\n 'is_result': result,\n 'fav_filtered': fav_filtered,\n 'title': title,\n 'paginate': True,\n }\n\n return render(request, 'favorites/search_in_fav.html', context)", "def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list", "def see_favorits(request):\n user_name = request.user\n print(user_name)\n # product = UserFavorite.objects.filter(user_name=user_name)\n list_favorits = UserFavorite.objects.all().filter(user_name=user_name)\n favorits_query = list_favorits\n favorits_list = []\n for favorite in favorits_query:\n favorits_list.append(Product.objects.get(pk=favorite.product.id))\n print(favorits_list)\n context = {\n # 'product' : product,\n 'user_name' : user_name,\n 'product' : favorits_list\n }\n\n\n return render(request,\"favorits.html\",context)", "def list(self):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n \"\"\"\n\n result = Model.execute(query)\n\n return result.fetchall()", "def get_queryset(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n video = generics.get_object_or_404(Video, **filter_kwargs)\n video_content_type = ContentType.objects.get_for_model(video)\n \n return User.objects.filter(\n activities__verb='like', activities__object_id=video.id,\n activities__object_content_type=video_content_type)", "def fetch_video_list_ids(self, params):\n guid = self.netflix_session.user_data.get('guid')\n cached_list = self.video_list_cache.get(guid, None)\n if cached_list is not None:\n self.kodi_helper.log(msg='Serving cached list for user: ' + guid)\n return cached_list\n video_list_ids_raw = self.netflix_session.fetch_video_list_ids()\n\n if 'error' in video_list_ids_raw:\n return video_list_ids_raw\n video_list = self.netflix_session.parse_video_list_ids(\n response_data=video_list_ids_raw)\n return video_list", "def view_watched_movies(username: str) -> list[tuple]:\n with connection:\n return connection.execute(VIEW_WATCHED_MOVIES, (username,)).fetchall()", "def display_search_page():\n favorite_players = []\n favorites = Favorite.query.filter_by(id = current_user.id).all()\n\n if len(favorites) > 0:\n for favorite in favorites:\n player = get_favorites(favorite.favorited_item)\n player_info = player[0]\n favorite_players.append(player_info)\n else:\n favorite_players = []\n\n\n return render_template('searchpage.html',\n favorite_players = favorite_players)", "def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)", "def get_featured_videos(self, count = 30, page = 1):\n uri = 'videos/featured'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)" ]
[ "0.6658978", "0.65604013", "0.63298076", "0.6320051", "0.6193292", "0.61421627", "0.6124463", "0.6117669", "0.61098194", "0.60715187", "0.6000728", "0.59898365", "0.5935978", "0.59210503", "0.5835391", "0.5789033", "0.57097423", "0.5699663", "0.5696433", "0.56601214", "0.5636552", "0.56333053", "0.5628574", "0.5609894", "0.5605105", "0.55658674", "0.55589354", "0.5521791", "0.5503857", "0.54712397" ]
0.77204025
0
Deletes the video from FLAGS table.
def delete_flag(self, video_ID): try: self.cur.execute("DELETE FROM flags WHERE video_ID = \"{}\"".format(video_ID)) self.db.commit() except: self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def DeleteVideo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)", "def remove_video(video_path):\n print()\n message : str = \"removing folder\"\n pretty_print_value(video_path,message,Fore.YELLOW)\n print()\n\n\n command = \"rm \\\"\" + video_path + \"\\\"\"\n print(command)\n os.system(command)\n ...", "def delete_video(event_id, video_id):\n event = Event.query.get_or_404(event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n video = Video.query.get_or_404(video_id)\n db.session.delete(video)\n db.session.commit()\n flash(\"Your video has been deleted.\", \"success\")\n return redirect(url_for(\"events.media\", id=event_id))", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"", "def schedule_delete_video(video: Video):\n job = scheduler.scheduler.add_job(delete_video, args=[video])\n log.info('Scheduled delete video job video=(%s), job=%s', video, job.id)", "def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)", "def delete():", "def delete(self, video_id):\n\n query = \"\"\"\n DELETE\n FROM videos\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, (video_id,))\n\n return True if result.rowcount == 1 else False", "def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()", "def delete(parameters, session):\n from Modules.Classes.ExperimentalScenario import ExperimentalScenario\n # Received --> [id_exeriment]\n # Retrieve all scenarios associated with target experiment\n exp_sc = session.query(ExperimentalScenario).filter(ExperimentalScenario.experiment_id == parameters[0]).all()\n for item in exp_sc:\n # Retrieve all ExperimentalScenarioPattern association for current experimental scenario\n exp_scenarios_pat = session.query(ExperimentalScenarioPattern).filter(and_(\n ExperimentalScenarioPattern.experimental_scenario_id == item.id,\n ExperimentalScenarioPattern.pattern_type == 2)).all()\n for item2 in exp_scenarios_pat:\n session.delete(item2)\n session.commit()\n session.close()\n msg_rspt = Message(action=2, comment='Register deleted successfully')\n return msg_rspt", "def deleteMatches():\n DB = dbc()\n DB.cursor().execute('DELETE FROM matches')\n DB.commit()\n DB.close()", "def users_video_delete(self):\n user_email = request.args.get('email')\n video_title = request.args.get('video_title')\n email_token = auth.current_user()[0]\n if not video_title or not user_email:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"video_title or user_email\"))\n return messages.ERROR_JSON % \"video_title or user_email\", 400\n if user_email != email_token and not self.auth_server.profile_query(email_token)[\"admin\"]:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n try:\n self.media_server.delete_video(user_email, video_title)\n except UnexistentVideoError:\n self.logger.debug((messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token)))\n return messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token), 404\n self.video_database.delete_video(user_email, video_title)\n return messages.SUCCESS_JSON, 200", "def DelVid(self):\n delvid=input(\"Enter title to remove \")\n \n #Avoid termination on key error if value not in dictionary\n try:\n self.videos.pop(delvid)\n except KeyError:\n print(\"Item not in the inventory\")", "def test_removeFlags(self):\n self._flagsTest('removeFlags', b'-FLAGS')", "def deleteMatches():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM matches\"\n db_cursor.execute(query)\n db.commit()\n db.close()", "def deleteMatches():\n cursor.execute(\"\"\"delete from matches\"\"\")", "def delete(self, request, *args, **kwargs):\n clip = self.get_object()\n clips_count = clip.video.clips.all().count()\n if clips_count <= 1:\n return Response(\n {'detail': \"You can't delete this video's only clip.\"}, \n status=status.HTTP_403_FORBIDDEN)\n \n else:\n self.perform_destroy(clip)\n return Response(status=status.HTTP_204_NO_CONTENT)", "def deleteMatches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()", "def deleteMatches():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM matches\")\n db.commit()\n db.close()", "def deleteMatches():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from matches;\")\n db_conn.commit()\n db_conn.close()", "def deleteMatches():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM match\")\n dbConn.commit()\n dbConn.close()", "def delete(self, video_id, subvideo_name):\n\n video = Video.query.get(video_id)\n if not video:\n return {'message': 'video entry not exist'}, http.HTTPStatus.NOT_FOUND\n videofile = VideoFile.query.filter_by(name=subvideo_name).first()\n if videofile:\n videofile.delete()\n else:\n return {'message': 'no related video file'}, http.HTTPStatus.NOT_FOUND\n\n return {'message': 'delete success'}, http.HTTPStatus.NO_CONTENT", "def deleteMatches():\n c.execute(\"DELETE FROM matchup\");\n print \"All matches have been successfully deleted\"\n return", "def deleteMatches():\n\n query = (\"DELETE FROM matches;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'delete'})", "def delete_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"DELETE FROM movie_table;\")\n self._close_connection(conn)", "def deleteMatches():\n conn, c = connect()\n c.execute(\"DELETE FROM matches;\")\n conn.commit()\n conn.close()", "def deleteMatches():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM matches\")\n dbconnection.commit()\n dbconnection.close()" ]
[ "0.63553846", "0.62756777", "0.59260094", "0.5923239", "0.56933457", "0.5685907", "0.5635672", "0.5605303", "0.55901444", "0.5575058", "0.5559596", "0.555788", "0.55328196", "0.5488664", "0.54750097", "0.5451727", "0.5449797", "0.5439663", "0.543637", "0.5435362", "0.5417179", "0.5408386", "0.53952485", "0.53792393", "0.5360081", "0.5348156", "0.5340292", "0.5327103", "0.5320322", "0.53009135" ]
0.7183327
0
Returns a dictionary of video IDs, title and a list of video titles. For Fuzzy Search.
def video_dict(self): self.cur.execute("SELECT video_ID, video_title FROM videos") videos = {} video_titles = [] for video in self.cur.fetchall(): video_titles.append(video[1]) videos.update({video[0] : video[1]}) return videos, video_titles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_for_title(session, search_term):\n try:\n s_result = session.search_movie(search_term)\n shows = {}\n\n # made the keys of the namedtuple a digit for ease of selecting the correct one later\n for count, result in enumerate(s_result):\n show_id = count\n movie_id = result.movieID\n title = result['long imdb canonical title']\n url = f'http://www.imdb.com/title/tt{movie_id}/parentalguide'\n shows[count] = Show(show_id, movie_id, title, url)\n return shows\n except imdb._exceptions.IMDbDataAccessError:\n display_error()", "def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass", "def filter_videos(videos: dict, pattern: str):\n\n result = {}\n\n for video, contents in videos.items():\n if pattern in contents['title']:\n result[video] = contents\n\n # This whole function is unnecessary because python:\n return {k: v for k, v in videos.items() if pattern in v['title']}", "def get_title_similarity(self):\n title_words = []\n ignore_words = ['the', 'and', 'or', 'to', 'at', 'on', 'of']\n for w in self.target_movie.title.split(' '):\n w = w.strip('- ,:(){}[]')\n if w.lower() not in ignore_words:\n title_words.append(w)\n\n # if last word is a number then it's an year and should be ignored.\n if len(title_words) > 1 and title_words[-1].isdigit():\n title_words = title_words[:-1]\n\n print(title_words)\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(or_(\n Movie.title.ilike(r'%' + tw + r'%') for tw in title_words\n )).all()\n\n target_clean_title = string_cleanup(self.target_movie.title)\n\n print(\"%i records from partial title match\" % len(res))\n TSW = self.TITLE_SIMILARITY_WEIGHT\n for rec in res:\n mc_title = string_cleanup(rec.title)\n smid = rec.movie_id\n if smid not in self.recommendation_pool:\n self.recommendation_pool[smid] = {\n 'movie_obj': rec,\n 'title_similarity': jaccard_index(\n target_clean_title, mc_title, ' ') * TSW\n }\n\n else:\n self.recommendation_pool[smid]['title_similarity'] = \\\n jaccard_index(\n target_clean_title, mc_title, ' ') * TSW", "def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n\n temp_list = []\n for vid in videos:\n\n # Convoluted way to display tags in required format\n tags = \"[\"\n for tag in vid.tags:\n tags = tags + tag + \" \"\n tags = tags + \"]\"\n print(f\"{vid.title}\")\n if tags != \"[]\":\n tags = tags[0:len(tags) - 2] + \"]\"\n if str(search_term.lower()) in str(vid.title):\n temp_list += [f\"{vid.title} ({vid.video_id}) {tags}\"]\n\n # Sort the list and display\n sorted_list = sorted(temp_list)\n print(f\"Here are the results for {search_term}:\")\n for x in sorted_list:\n print(\" \" + f\"{sorted_list.index(x) + 1}) \" + x)", "def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if search_term.lower() in v.title.lower():\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {search_term}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n # option = input(\"Would you like to play any of the above? If yes, \"\n # \"specify the number of the video. \\n If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {search_term}\")\n \n \n # print(\"search_videos needs implementation\")", "def search(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n results = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n results.append(Colors.YELLOW + video[\"file\"] + Colors.END + \" - \" + video[\"source\"] + \" - \" +\n video[\"title\"])\n if results:\n for result in results:\n safeprint(result)\n else:\n safeprint(\"No video matching the given query was found.\")", "def search_movies(title: str) -> list[tuple]:\n with connection:\n search = '%' + title + '%'\n return list(connection.execute(SEARCH_MOVIE, (search,)))", "def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if search_term.lower() in video.title.lower() and video.flag is None:\n results.append(video)\n self.output_search_results(results, search_term)", "def get_individual_video_link(self):\r\n self.filter_url_portion = '' # ignore the filter option.\r\n\r\n target_search_results_obj = []\r\n # in case we want to search more pages just change this and make a loop\r\n self.page_url_portion = '&page=1'\r\n\r\n # start with forming the search\r\n self.form_search_url()\r\n\r\n # Get the dom object from the search page\r\n search_result_dom = self.get_dom_object(self.target_yt_search_url_str)\r\n\r\n # Get the search results\r\n target_search_results_obj.extend(self.tag_element_results(search_result_dom,\r\n 'div[class=\"yt-lockup-content\"] h3[class=\"yt-lockup-title\"] a'))\r\n\r\n #print 'results len: ', len(target_search_results_obj)\r\n\r\n each_video_link_title_dict = {}\r\n for n in target_search_results_obj:\r\n video_link = n.attributes['href']\r\n ## modified video link\r\n # video_link = re.sub('watch\\?v=',r'v/',video_link)\r\n\r\n video_title = n.attributes['title'] #\"Mix\" in video_title[:4] or \"mix\" i(n video_title[:4] or\r\n ile = video_title.lower()\r\n if \"cover\" in ile or \"live\" in ile or \"acustic\" in ile or \"acoustic\" in ile or \"lesson\" in ile:\r\n print \"found blacklisted term, bypassing song: \" + ile\r\n pass #dont want these\r\n else:\r\n each_video_link_title_dict[video_title] = 'https://www.youtube.com' + video_link\r\n\r\n self.video_link_title_dict.update(each_video_link_title_dict)", "def extract_video_info_in_search_result_page(page_request):\n try:\n src_html = requests.get(page_request).text\n soup = BeautifulSoup(src_html, \"lxml\")\n video_li_list = soup.select(\"ul.video-list.clearfix > li\")\n video_dict = {}\n for video_li in video_li_list:\n video_headline = video_li.find(\"div\", attrs={\"class\": \"headline clearfix\"})\n video_url = \"https:\" + video_headline.a[\"href\"]\n bv = re.findall(r\"^https://www.bilibili.com/video/BV([0-9a-zA-Z]*).*$\", video_url)[0] \n video_info = video_li.find(\"div\", attrs={\"class\": \"tags\"})\n raw_view_count = video_info.find(\"span\", attrs={\"title\": \"观看\"}).text.strip()\n view_count = normalize_view_count(raw_view_count)\n raw_upload_time = video_info.find(\"span\", attrs={\"title\": \"上传时间\"}).text.strip()\n upload_time = normalize_datetime(raw_upload_time)\n up_span = video_info.find(\"span\", attrs={\"title\": \"up主\"})\n up_info_wrapper = up_span.find(\"a\")\n up_name = up_info_wrapper.text.strip()\n up_url = BASE_ROUTE + up_info_wrapper[\"href\"][2:]\n video_dict[bv] = {\n \"view_count\": view_count, \"upload_date\": upload_time,\n \"author\": {up_name : up_url}, \"video_url\": video_url\n }\n return video_dict\n except:\n return {}", "def search_videos_tag(self, video_tag):\n recommendations = []\n\n if not video_tag.startswith(\"#\"):\n print(f\"No search results for {video_tag}\")\n else:\n for video in self.videos_dict:\n #s = self.videos_dict[video]\n #result = re.search(r\"\\[([A-Za-z0-9_]+)\\]\", s)\n #print(result.group(1))\n #tag_string = str(result.group(1))\n #if video_tag in tag_string:\n # recommendations.append(self.videos_dict[video])\n if video_tag in video._tags and not video.flagged:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n if n == 0:\n print(f\"No search results for {video_tag}\")\n else:\n print(f\"Here are the results for {video_tag}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass", "def get_video():\n video = {}\n for k, v in DB.VIDEOS.iteritems():\n video[k] = v.__dict__\n return video", "def get_keywords_for_movie(url):\n pass", "def route_video_titles():\n\n result = _retrieve_titles(request.args)\n return jsonify({'titles' : result})", "def get_matched_game_ranking_info_for_movie(movie_id: str) -> Dict[str, int]:\n query_res = Movie.query.filter_by(link_id=movie_id).all()\n if query_res:\n return eval(query_res[0].games_title_match)\n return dict()", "def extract_movie_titles(dictionary):\n results = dictionary['Similar']['Results']\n lstmt = [d['Name'] for d in results]\n return lstmt", "def find_movie(imdb_file, title_regex):\n\n process = subprocess.run([\n \"grep\",\n \"-i\", # Case insensitive\n f'movie\\t[^\\t]*{title_regex}', # Only match movies\n imdb_file\n ], stdout=subprocess.PIPE)\n hits = [dict(zip(COLUMNS, hit.decode(\"utf-8\").split(\"\\t\")))\n for hit in process.stdout.split(b\"\\n\")[:-1]]\n # Try to filter out irrelevant hits, e.g. that don't yet exist or are porn\n legitimate_hits = [hit for hit in hits\n if hit[\"startYear\"] != \"\\\\N\" and\n hit[\"isAdult\"] == \"0\"]\n return legitimate_hits", "def parse_search_results (self, response_data):\n search_results = {}\n raw_search_results = response_data['value']['videos']\n for entry_id in raw_search_results:\n if self._is_size_key(key=entry_id) == False:\n # fetch information about each show & build up a proper search results dictionary\n show = self.parse_show_list_entry(id=entry_id, entry=raw_search_results[entry_id])\n show[entry_id].update(self.parse_show_information(id=entry_id, response_data=self.fetch_show_information(id=entry_id, type=show[entry_id]['type'])))\n search_results.update(show)\n return search_results", "def search_videos(self, search_term):\n print(\"search_videos needs implementation\")", "def _match_movie_titles(self, cinema_schedule_data):\n matcher = MovieIDMatcher()\n invalid_titles = []\n for title, content in cinema_schedule_data.items():\n\n logging.warning(\"Matching movie: \" + title)\n\n imdb_id = matcher.match_imdb_id_from_title_recent(title)\n if imdb_id is None:\n logging.error(\"IMDb ID matched is invalid!\")\n invalid_titles.append(title)\n continue\n\n cinema_schedule_data[title] = {\n 'imdb_id': imdb_id,\n 'content': content\n }\n self.update_single_movie_data(imdb_id)\n logging.warning(\"matching successful!\")\n\n for invalid_title in invalid_titles:\n cinema_schedule_data.pop(invalid_title)", "def search_youtube_music_video(self, artist, name, duration_ms):\n\t\t# return val : false until proven wrong\n\t\tsuccess = False # could not find matching youtube video\n\t\terror_des = \"none\"\n\n\t\tself.authorize()\n\n\n\n\t\t# build search params aka q\n\t\t#finders = ['vevo','lyrics']\t\t\t# words that we want to see in our search\n\t\ttry:\n\t\t\tsearch_response_j = self.youtube.search().list(\n\t\t\t\tvideoCategoryId = 10,\n\t\t\t\ttype = 'video',\n\t\t\t\torder = 'relevance',\n\t\t\t\t#q = '{} {} lyrics'.format(artist, name),\n\t\t\t\tq = artist + ' ' + name + ' lyrics',\n\t\t\t\tpart = \"snippet\",\n\t\t\t\tmaxResults = 5\n\t\t\t).execute()\n\n\n\t\t\tyoutube_videos_j = search_response_j['items']\n\n\t\t\tif len(youtube_videos_j) == 0:\t# NO results\n\t\t\t\terror_des = 'Sorry! Could not find track to download...'\n\t\t\t\tsuccess = False\n\t\t\telse:\t\t\t\t\n\n\t\t\t\t######################################################################################################\n\t\t\t\t# set default best video to the first relevant video\n\t\t\t\t# will be overwritten in next block of code if a better option is found\n\t\t\t\t# if self.check_video_pruning(artist, name, youtube_videos_j[0]['snippet']['title']) is False:\n\t\t\t\t######################################################################################################\n\t\t\t\t# youtube_video_best = {\n\t\t\t\t# \t'video_id': youtube_videos_j[0]['id']['videoId'],\n\t\t\t\t# \t'title': youtube_videos_j[0]['snippet']['title']\n\t\t\t\t# }\n\n\n\t\t\t\t# Let's see if we can find a better video then the default: 0\n\t\t\t\tfor index,video in enumerate(youtube_videos_j):\n\t\t\t\t\tsnippet = video['snippet']\n\t\t\t\t\tchannel_title = snippet['channelTitle']\n\t\t\t\t\ttitle = snippet['title']\n\n\t\t\t\t\t# weed out covers,vevo, live videos\n\t\t\t\t\t# not yet implemented : weed out videos that are not of the same duration as the spotify track\n\t\t\t\t\tif self.check_video_pruning(artist, name, title):\t# ensure that the artist or track name does not actually include the weeders Ex. live hous\n\t\t\t\t\t\tprint '==========\\nTESTING!!!!\\n=========='\n\t\t\t\t\t\tprint 'weeding out video: '\n\t\t\t\t\t\tprint 'name: ', name\n\t\t\t\t\t\tprint 'artist: ', artist\n\t\t\t\t\t\tprint 'title: ', title\n\n\t\t\t\t\t\tcontinue\t# skip video because it contains a weed word\n\n\t\t\t\t\t# select first video that is not pruned\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tyoutube_video_best = {\n\t\t\t\t\t\t \t'video_id': youtube_videos_j[index]['id']['videoId'],\n\t\t\t\t\t\t \t'title': youtube_videos_j[index]['snippet']['title']\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tbreak\n\n\n\t\t\t\t\t#####################################################################################\n\t\t\t\t\t# check if vevo channel\n\t\t\t\t\t# check if channel title owned by artist\n\t\t\t\t\t#####################################################################################\n\t\t\t\t\t# if 'vevo' in channel_title.lower() or artist in channel_title.lower():\n\t\t\t\t\t# \tprint '==========\\nVEVO Found!!!!\\n=========='\n\n\t\t\t\t\t# \tyoutube_video_best['video_id'] = video['id']['videoId']\n\t\t\t\t\t# \tyoutube_video_best['title'] = title\n\n\t\t\t\t\t# \tbreak\t# stop looking\n\n\n\t\t\t\tsuccess = True\n\n\t\texcept HttpError, e:\n\t\t\terror_des = \"An HTTP error %d occurred:\\n%s\" % (e.resp.status, e.content)\n\t\t\tsuccess = False\n\t\t\tprint error_des\n\n\n\t\treturn {\n\t\t\t\t\t'success' : success,\n\t\t\t\t\t'error_des' : error_des,\n\t\t\t\t\t'youtube_video' : youtube_video_best\n\t\t\t\t}", "def video(title, hash = None, season = None, episode = None):\n if not hash:\n video = list(mythVideo.searchVideos(title = title, season = season, episode = episode))[0]\n else:\n video = [video for video in mythVideo.searchVideos(title = title) if video.hash == hash][0]\n\n return render_template('recording.html', item = video)", "def get_data(inp):\n movies = __get_movies(inp)\n series = __get_series(inp)\n\n exist_title(movies, series)\n is_response_larger_than_max_results(movies, series)\n\n search_dict = {}\n\n if movies['Response'] != 'False':\n for movie in movies['Search']:\n search_dict.update({'movie': __get_title_info(movie['imdbID'])})\n\n if series['Response'] != 'False':\n for show in series['Search']:\n search_dict.update({'series': __get_title_info(show['imdbID'])})\n\n return search_dict", "def get_metadata(video_file):\r\n metadata = {\r\n \"title\": os.path.basename(video_file).split(\".\")[0],\r\n \"description\": \"A video recorded with Freeseer\",\r\n \"tags\": ['Freeseer', 'FOSSLC', 'Open Source'],\r\n \"categoryId\": 27 # temporary, see gh#415\r\n }\r\n if video_file.lower().endswith('.ogg'):\r\n tags = oggvorbis.Open(video_file)\r\n if \"title\" in tags:\r\n metadata['title'] = tags['title'][0]\r\n if \"album\" in tags and \"artist\" in tags and \"date\" in tags:\r\n metadata['description'] = \"At {} by {} recorded on {}\".format(tags['album'][0], tags['artist'][0], tags['date'][0])\r\n return metadata", "def scrape(search_title, search_artist, get_top_result=False):\n search_artist = search_artist.replace(\" \", \"+\").replace(\"&\", \"and\")\n search_title = search_title.replace(\" \", \"+\").replace(\"&\", \"and\")\n\n search_query = search_title + \"+\" + search_artist + \"+\\\"auto-generated+by+youtube\\\"\"\n # youtube_url = \"https://www.youtube.com/results?sp=EgIQAQ%253D%253D&search_query=\" + search_query\n youtube_url = \"https://www.youtube.com/results?search_query=\" + search_query\n header = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}\n\n try:\n response = requests.get(youtube_url, headers=header)\n except requests.exceptions.ConnectionError:\n return None, None\n\n content = response.content\n soup = BeautifulSoup(content, \"html.parser\")\n title = []\n uploader = []\n ref = []\n all_title_tags = soup.find_all(\"h3\", attrs={\"class\": \"yt-lockup-title\"})\n all_uploader_tags = soup.find_all(\"div\", attrs={\"class\": \"yt-lockup-byline\"})\n\n for h3 in all_title_tags:\n try:\n title.append(h3.find('a').text)\n ref.append(h3.find('a')['href'])\n except TypeError:\n return None, None\n\n for div in all_uploader_tags:\n try:\n uploader.append(div.text)\n except TypeError:\n pass\n\n if get_top_result:\n # Return best matching link and its duration\n best_title = rank_results(title, search_title, search_artist, uploader)#, search_artist)\n # print(\"Best result is: '\"+str(title[best_title])+\"' at index \"+str(best_title))\n final_url = 'https://www.youtube.com'+ref[best_title]\n\n # video_length = get_video_time(final_url)\n # print(\"Video length is \"+str(video_length)+' ms long')\n return str(title[best_title]), ref[best_title]\n\n # if get_top_result:\n # return [title[0], ref[0]]\n return title, ref", "def recommended_shows(title, shows_df, tfidf_vect):\n\n try:\n\n title_iloc = shows_df.index[shows_df[\"title\"] == title][0]\n\n except:\n\n return \"Movie/TV Show title not found. Please make sure it is one of the titles in this dataset: https://www.kaggle.com/shivamb/netflix-shows\"\n\n show_cos_sim = cosine_similarity(tfidf_vect[title_iloc], tfidf_vect).flatten()\n\n sim_titles_vects = sorted(\n list(enumerate(show_cos_sim)), key=lambda x: x[1], reverse=True\n )[1:6]\n\n response = {\n \"result\": [\n {\"title\": shows_df.iloc[t_vect[0]][0], \"confidence\": round(t_vect[1], 1)}\n for t_vect in sim_titles_vects\n ]\n }\n\n return response", "def search_videos(self, search_term):\n all_videos = self._video_library.get_all_videos()\n all_videos.sort(key=lambda x: x.title)\n matching_videos = []\n for video in all_videos:\n if search_term.lower() in video.title.lower():\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {search_term}\")\n return\n\n print(\"Here are the results for cat:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return", "def similar_search(tmdb_obj, media_id):\n res = tmdb_obj.get_recommendations(media_id)\n if not res:\n return []\n return res", "async def movievotes(self, ctx: discord.ext.commands.Context, *args):\n print(\"-------------------------\")\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1:\n print(\"Starting search\")\n api_key = self.botVariables.get_mashape_metacritic_key()\n search_term = (re.sub(r'([^\\s\\w]|_)+', '', args[0])).lower()\n request_search_link = \"https://api-marcalencc-metacritic-v1.p.mashape.com/search/\" + str(\n urllib.parse.quote(search_term)) + \"/movie?limit=20&offset=1\"\n # search all the films with the term given\n async with aiohttp.ClientSession() as session:\n # the website use get\n async with session.get(request_search_link, headers={'X-Mashape-Key': str(api_key),\n 'Accept': 'application/json'}) as resp:\n request_result = await resp.json()\n if len(request_result[0]['SearchItems']) > 0: # there is at least one film\n films_found = []\n max_prob = 0.0\n # decide the best film using string similarity\n for entry in request_result[0]['SearchItems']:\n entry_string = (re.sub(r'([^\\s\\w]|_)+', '', entry['Title'])).lower()\n film_similarity = BotMethods.similar(search_term, entry_string, None)\n if film_similarity > 0.7: # consider it only if it's > 0.7 (range is 0.0-1.0)\n films_found.append(\n self.FilmInfo(entry['Id'], entry['Title'], film_similarity)) # store that film in the array\n if film_similarity > max_prob: # search for max prob\n max_prob = film_similarity\n if film_similarity >= 1.0: # i have found the perfect string\n print(\"Perfect name found, search cycle stopped\")\n break # stop the for\n if len(films_found) > 0: # i have found at least one possible film\n film_web_id = \"\"\n for film in films_found: # search the film with the max name similarity in the array\n if film.similar == max_prob:\n print(\"Film Chosen: \" + film.film_name + \" - Film web id:\" + str(\n film.film_id) + \" - Sim.:\" + str(\n max_prob))\n film_web_id = film.film_id # get the film web id\n break\n # make request to get all necessary film informations\n request_search_link = \"https://api-marcalencc-metacritic-v1.p.mashape.com\" + str(film_web_id)\n async with aiohttp.ClientSession() as session:\n async with session.get(request_search_link, headers={'X-Mashape-Key': str(api_key),\n 'Accept': 'application/json'}) as resp: # the website use get\n request_result = await resp.json()\n if 'message' in request_result:\n await message_channel.send(\"*An error occurred downloading the data...*\")\n return\n # prepare the embed message\n embed = discord.Embed(title=str(request_result[0]['Title']),\n colour=discord.Colour(0xffcc00),\n url=\"http://www.metacritic.com\" + str(film_web_id),\n description=\"Metacritic votes about \" + str(\n request_result[0]['Title']) + \" by \" + str(\n request_result[0]['Director']) + \", released on \" + str(\n request_result[0]['ReleaseDate']),\n timestamp=datetime.utcfromtimestamp(time.time())\n )\n if 'ImageUrl' in request_result[0]:\n embed.set_thumbnail(url=str(request_result[0]['ImageUrl']))\n else:\n print(\"No ImageUrl found, no thumbnail set\")\n embed.set_author(name=ctx.message.author.name, url=\"\", icon_url=ctx.message.author.avatar_url)\n embed.set_footer(text=self.botVariables.get_description(),\n icon_url=self.botVariables.get_bot_icon())\n if len(request_result[0]['Rating']) > 0:\n # --- read users votes ---\n user_votes = \"Rating: \" + str(request_result[0]['Rating']['UserRating']) + \" (\" + str(\n request_result[0]['Rating']['UserReviewCount']) + \" votes)\"\n # --- read critic votes ---\n critic_votes = \"Rating: \" + str(request_result[0]['Rating']['CriticRating']) + \" (\" + str(\n request_result[0]['Rating']['CriticReviewCount']) + \" votes)\"\n # --- create fields ---\n embed.add_field(name=\"Critic Rating\", value=critic_votes)\n embed.add_field(name=\"User Rating\", value=user_votes)\n else:\n embed.add_field(name=\"No votes found...\",\n value=\"Looks like there are no votes for this film...\")\n # --- sending the message ---\n print(\"Sending film embed message\")\n await message_channel.send(embed=embed)\n else:\n print(\"No films found\")\n await message_channel.send(\"*No films found, check the name...*\")\n else:\n print(\"No films found\")\n await message_channel.send(\"*No films found, check the name...*\")\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"movievotes <film Title>, for more see \" + self.command_prefix + \"help movievotes\")\n print(\"-------------------------\")" ]
[ "0.63464946", "0.63003737", "0.62933", "0.6200125", "0.6154921", "0.5969483", "0.59387124", "0.58837193", "0.58710206", "0.58641505", "0.58581513", "0.58392906", "0.58041734", "0.57863104", "0.5770734", "0.57627165", "0.5736344", "0.5699794", "0.56986773", "0.5690014", "0.5685134", "0.56392336", "0.56323224", "0.56199527", "0.5583261", "0.55420804", "0.55366355", "0.55319023", "0.5492023", "0.5482167" ]
0.7131889
0
Drop any existing tables and create the SBML classes schema. URI is a string interpreted as an rfc1738 compatible database URI.
def init(db_uri, drop): engine = create_engine(db_uri) if drop.lower().startswith("y"): Base.metadata.drop_all(bind=engine) Base.metadata.create_all(bind=engine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def teardown_db():\n engine = config['tg.app_globals'].sa_engine\n connection = engine.connect()\n\n # INFO - D.A. - 2014-12-04\n # Recipe taken from bitbucket:\n # https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything\n\n inspector = reflection.Inspector.from_engine(engine)\n metadata = MetaData()\n\n tbs = []\n all_fks = []\n views = []\n\n # INFO - D.A. - 2014-12-04\n # Sequences are hard defined here because SQLA does not allow to reflect them from existing schema\n seqs = [\n Sequence('seq__groups__group_id'),\n Sequence('seq__contents__content_id'),\n Sequence('seq__content_revisions__revision_id'),\n Sequence('seq__permissions__permission_id'),\n Sequence('seq__users__user_id'),\n Sequence('seq__workspaces__workspace_id')\n ]\n\n for view_name in inspector.get_view_names():\n v = Table(view_name,metadata)\n views.append(v)\n\n for table_name in inspector.get_table_names():\n\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(\n ForeignKeyConstraint((),(),name=fk['name'])\n )\n t = Table(table_name,metadata,*fks)\n tbs.append(t)\n all_fks.extend(fks)\n\n if not config['sqlalchemy.url'].startswith('sqlite'):\n for fkc in all_fks:\n connection.execute(DropConstraint(fkc))\n\n for view in views:\n drop_statement = 'DROP VIEW {}'.format(view.name)\n # engine.execute(drop_statement)\n connection.execute(drop_statement)\n\n for table in tbs:\n connection.execute(DropTable(table))\n\n\n for sequence in seqs:\n try:\n connection.execute(DropSequence(sequence))\n except Exception as e:\n logger.debug(teardown_db, 'Exception while trying to remove sequence {}'.format(sequence.name))\n\n transaction.commit()\n connection.close()\n engine.dispose()", "def reset_tables(database_url, _metadata):\n\n # use reflected MetaData to avoid errors due to ORM classes\n # being inconsistent with existing tables\n with isolated_nullpool_engine(database_url) as engine:\n seperate_metadata = MetaData()\n seperate_metadata.reflect(bind=engine)\n seperate_metadata.drop_all(bind=engine)\n ENUM(name='dpds_operation_types').drop(engine)\n\n # use ORM clases to define tables to create\n init_tables(database_url, _metadata)", "def clean_up():\n drop_all_tables()\n create_all()", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def tearDown(self):\n\n InitializeDb('TEST_DATABASE_URI').drop_tables()", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def reset_db():\n\n metadata = sa.MetaData()\n metadata.reflect(engine)\n for tbl in reversed(metadata.sorted_tables):\n tbl.drop(engine)\n create_tables()", "def teardown_schema(self):\n models.Base.metadata.drop_all(self.session.bind)", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def db_dropall():\n # db_dropall doesn't work if the models aren't imported\n import_string('models', silent=True)\n for blueprint_name, blueprint in app.blueprints.items():\n import_string('%s.models' % blueprint.import_name, silent=True)\n db.drop_all()", "def drop_db() -> None:\n \n if os.environ.get('DATABASE_URL').startswith('sqlite:///'):\n sqlite_s, sqlite_f = os.environ.get('DATABASE_URL').split(\"sqlite:///\") \n os.unlink(sqlite_f)\n else: \n Base.metadata.drop_all(bind=engine)", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def drop_db():\n database.db.reflect()\n database.db.drop_all()\n print('Dropped the database')", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def drop_all(self):\n self._engine.execute(\n DDL(f\"drop schema if exists {_schema.CUBEDASH_SCHEMA} cascade\")\n )", "def recreate_db():\n db.session.execute('SET FOREIGN_KEY_CHECKS=0;')\n db.session.execute('DROP TABLE IF EXISTS logs;')\n db.session.execute('DROP TABLE IF EXISTS employees;')\n db.session.execute('DROP TABLE IF EXISTS sales;')\n db.session.execute('DROP TABLE IF EXISTS plants;')\n db.session.execute('DROP TABLE IF EXISTS products;')\n db.session.execute('DROP TABLE IF EXISTS suppliers;')\n db.session.execute('DROP TABLE IF EXISTS orders;')\n db.session.execute('DROP TABLE IF EXISTS contacts;')\n db.session.execute('DROP TABLE IF EXISTS varieties;')\n db.session.execute('DROP TABLE IF EXISTS species;')\n db.session.execute('DROP TABLE IF EXISTS genera;')\n db.session.execute('DROP TABLE IF EXISTS families;')\n db.drop_all()\n db.create_all()\n db.session.commit()\n fakePlant = Plant(living = True)\n db.session.add(fakePlant)\n db.session.commit()\n db.session.delete(fakePlant)\n db.session.execute('SET FOREIGN_KEY_CHECKS=1;')\n db.session.commit()", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def drop(self):\n c = self.cursor()\n for table in ['experiment','fact']:\n c.execute(\"drop table if exists {}\".format(table))\n self.commit()", "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def tearDown(self):\n drop_all_tables()\n create_all()", "def clear_db():\n from flask_monitoringdashboard.database import get_tables, engine\n\n for table in get_tables():\n table.__table__.drop(engine)\n table.__table__.create(engine)", "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def clearDatabase():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)", "def reset_db():\n\n webapp.dbsql.drop_all()\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()" ]
[ "0.634737", "0.6157549", "0.61463475", "0.6144049", "0.6128537", "0.61086684", "0.60474813", "0.60012245", "0.59740967", "0.5970951", "0.59620595", "0.5883204", "0.58639604", "0.5856733", "0.5855191", "0.5840111", "0.5809529", "0.5783441", "0.57696843", "0.5762156", "0.5747124", "0.5746917", "0.5734684", "0.5727968", "0.57227236", "0.5703507", "0.5699341", "0.5650309", "0.5649749", "0.56381214" ]
0.65057254
0
switch from hostview to services view and viceversa
def _controller_switch(self, widget, test, newpage): if newpage == 0: # switch to host view self.on_services_view = False try: self.main.workspace.remove(self.services_view.notebook) except: # empty workspace self.main.workspace.remove(self.main.welcome_note) try: self.main.workspace.add(self.work.notebook) except: # empty workspace self.main.workspace.add(self.main.welcome_note) elif newpage == 1: # switch to servies view self.on_services_view = True try: self.main.workspace.remove(self.work.notebook) except: # empty workspace self.main.workspace.remove(self.main.welcome_note) try: self.main.workspace.add(self.services_view.notebook) except: # empty workspace self.main.workspace.add(self.main.welcome_note) # clear the mouse_click menu try: self.rightclickmenu.destroy() except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __handle_view_adversary(self, gamestate_component):", "def _handler_control_view(self,event):\n self._mgr.LoadPerspective(\n\t\t\tself._perspectives['control_view'])", "def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()", "def view_system():\n\n pass", "def hostsplit_service(self):\n self.which_owner()\n self.which_security()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)", "def update_views():\n # replace Supervisor main entry\n here = path.abspath(path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': path.join(here, 'ui/index.html'), 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': path.join(here, 'ui/procaddress.html'), 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': path.join(here, 'ui/hostaddress.html'), 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': path.join(here, 'ui/application.html'), 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': path.join(here, 'ui/empty.html'), 'view': AddressNetworkImageView}", "def servicesChanged(self) -> None:\n ...", "def update_views():\n # replace Supervisor main entry\n here = os.path.abspath(os.path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': os.path.join(here, 'ui/index.html'),\n 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': os.path.join(\n here, 'ui/procaddress.html'),\n 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': os.path.join(\n here, 'ui/hostaddress.html'),\n 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': os.path.join(\n here, 'ui/application.html'),\n 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressNetworkImageView}", "def services_row(self, listbox, cell, listboxrow):\n\n\t\tself._clear_workspace()\n\n\t\tif str(cell) in self.scenes[\"services_view\"]:\n\n\t\t\t# check if the scene was already loaded\n\t\t\tself.services_view = self.scenes[\"services_view\"][str(cell)]\n\n\t\telse:\n\n\t\t\t# get selected port\n\t\t\t(model, pathlist) = self.services_list.servicestree.get_selection().get_selected_rows()\n\t\t\tfor path in pathlist :\n\n\t\t\t\ttree_iter = model.get_iter(path)\n\t\t\t\t#selected_host = model.get_value(tree_iter,1) \n\t\t\t\tselected_service = model.get_value(tree_iter,0) # selected service\n\n\t\t\t\t# YO\n\t\t\t\tself._selected_opt[\"service\"] = selected_service \n\n\t\t\t# generate the scene\n\t\t\tself.services_view = Serviceview(selected_service, self.engine.database, view_out_scope=self.main.out_of_scope.get_active())\n\t\t\tself.scenes[\"services_view\"][str(cell)] = self.services_view\n\n\t\t# add the scene\n\t\tself._selected_opt[\"service\"] = self.services_view.service\n\t\tself.main.workspace.add(self.services_view.notebook)\n\n\t\tself.services_view.treeview.connect(\"button_press_event\", self.mouse_click)", "def refresh_view():\n pass", "def _handler_default_view(self, event):\n self._mgr.LoadPerspective(\n self._perspectives['default'])", "def SetupViewStates(viewSvc, rootViewLayer):\n logger.debug('Configuring view states')\n logger.debug('Initializing view state service')\n viewSvc.Initialize(rootViewLayer)\n logger.debug('Adding primary views')\n viewSvc.AddView(ViewState.Login, LoginView())\n viewSvc.AddView(ViewState.Intro, IntroView())\n viewSvc.AddView(ViewState.CharacterSelector, CharacterSelectorView())\n viewSvc.AddView(ViewState.Space, SpaceView())\n viewSvc.AddView(ViewState.Station, CQView())\n viewSvc.AddView(ViewState.Hangar, HangarView())\n viewSvc.AddView(ViewState.WorldSpace, WorldspaceView())\n logger.debug('Adding secondary views')\n viewSvc.AddView(ViewState.DockPanel, DockPanelView(), viewType=ViewType.Secondary)\n viewSvc.AddView(ViewState.StarMap, StarMapView(), viewType=ViewType.Secondary)\n viewSvc.AddView(ViewState.SystemMap, SystemMapView(), viewType=ViewType.Secondary)\n viewSvc.AddView(ViewState.Planet, PlanetView(), viewType=ViewType.Secondary)\n viewSvc.AddView(ViewState.ShipTree, ShipTreeView(), viewType=ViewType.Secondary)\n viewSvc.AddView(ViewState.VirtualGoodsStore, AurumStoreView(), viewType=ViewType.Secondary)\n logger.debug('Adding dynamic views')\n viewSvc.AddView(ViewState.CharacterCreation, CharacterCustomizationView(), viewType=ViewType.Dynamic)\n logger.debug('Adding state transitions')\n viewSvc.AddTransition(None, ViewState.Login)\n viewSvc.AddTransitions((ViewState.Login, None), (ViewState.Intro, ViewState.CharacterSelector, ViewState.CharacterCreation), FadeToBlackTransition(fadeTimeMS=250))\n viewSvc.AddTransitions((ViewState.Intro,), (ViewState.CharacterSelector, ViewState.CharacterCreation), FadeToBlackLiteTransition(fadeTimeMS=500))\n viewSvc.AddTransitions((ViewState.CharacterSelector,), (ViewState.Space,\n ViewState.CharacterCreation,\n ViewState.Hangar,\n ViewState.VirtualGoodsStore), FadeToBlackTransition(fadeTimeMS=250))\n viewSvc.AddTransitions((ViewState.CharacterCreation,), (ViewState.Hangar, ViewState.CharacterSelector, ViewState.Space), FadeToBlackTransition(fadeTimeMS=250, allowReopen=False))\n viewSvc.AddTransitions((ViewState.Space,\n ViewState.Hangar,\n ViewState.StarMap,\n ViewState.SystemMap,\n ViewState.DockPanel,\n ViewState.Planet,\n ViewState.ShipTree), (ViewState.Space,\n ViewState.Hangar,\n ViewState.StarMap,\n ViewState.SystemMap,\n ViewState.DockPanel,\n ViewState.Planet,\n ViewState.ShipTree), FadeToBlackLiteTransition(fadeTimeMS=100))\n viewSvc.AddTransition(ViewState.Space, ViewState.Space, SpaceToSpaceTransition())\n viewSvc.AddTransition(ViewState.StarMap, ViewState.StarMap)\n viewSvc.AddTransition(ViewState.Hangar, ViewState.Hangar, FadeToBlackLiteTransition(fadeTimeMS=250, allowReopen=False))\n viewSvc.AddTransitions((ViewState.Station, ViewState.WorldSpace), (ViewState.Hangar,\n ViewState.StarMap,\n ViewState.SystemMap,\n ViewState.DockPanel,\n ViewState.Planet,\n ViewState.WorldSpace,\n ViewState.ShipTree), FadeToBlackLiteTransition(fadeTimeMS=100))\n viewSvc.AddTransitions((ViewState.Space,\n ViewState.CharacterSelector,\n ViewState.Hangar,\n ViewState.CharacterCreation,\n ViewState.Station,\n ViewState.WorldSpace,\n ViewState.ShipTree), (ViewState.Station, ViewState.WorldSpace), FadeToCQTransition(fadeTimeMS=200, fallbackView=ViewState.Hangar, allowReopen=False))\n viewSvc.AddTransition(ViewState.Space, ViewState.Hangar, SpaceToStationTransition())\n viewSvc.AddTransition(ViewState.Hangar, ViewState.Space, FadeToBlackTransition(fadeTimeMS=500))\n viewSvc.AddTransition(ViewState.Space, ViewState.Station, SpaceToStationTransition())\n viewSvc.AddTransition(ViewState.Hangar, ViewState.ShipTree, SpaceToStationTransition())\n viewSvc.AddTransitions((ViewState.StarMap,\n ViewState.Planet,\n ViewState.SystemMap,\n ViewState.DockPanel,\n ViewState.ShipTree), (ViewState.Station,), FadeToCQTransition(fadeTimeMS=200, fallbackView=ViewState.Hangar, allowReopen=True))\n viewSvc.AddTransitions((ViewState.Station,\n ViewState.Hangar,\n ViewState.StarMap,\n ViewState.SystemMap,\n ViewState.DockPanel,\n ViewState.WorldSpace), (ViewState.CharacterCreation,), FadeToBlackTransition(fadeTimeMS=200))\n viewSvc.AddTransition(ViewState.CharacterCreation, (ViewState.Station, ViewState.WorldSpace), FadeFromCharRecustomToCQTransition(fadeTimeMS=250))\n viewSvc.AddTransition(ViewState.Station, ViewState.Space, FadeFromCQToSpaceTransition(fadeTimeMS=250))\n viewSvc.AddTransitions((ViewState.VirtualGoodsStore,), VIEWS_TO_AND_FROM_AURUM_STORE, FadeToBlackTransition(fadeTimeMS=250))\n viewSvc.AddTransitions(VIEWS_TO_AND_FROM_AURUM_STORE, (ViewState.VirtualGoodsStore,), FadeToBlackTransition(fadeTimeMS=250))\n logger.debug('Adding view state controlled overlays')\n viewSvc.AddOverlay(ViewOverlay.Target, None)\n viewSvc.AddOverlay(ViewOverlay.SidePanels, SidePanels)\n viewSvc.AddOverlay(ViewOverlay.ShipUI, ShipUI)\n viewSvc.AddOverlay(ViewOverlay.StationEntityBrackets, None)\n logger.debug('Done configuring view states')", "def mouse_click(self, tv, event, alltargets=False):\n\t\t\n\t\tif event.button == 3:\n\n\t\t\t# create the menu and submenu objects\n\t\t\trightclickmenu = Gtk.Menu()\n\t\t\t\n\t\t\ttargets = []\n\t\t\tgeneric = []\n\n\t\t\t# check\n\t\t\tif self.on_services_view:\n\t\t\t\tif alltargets:\n\t\t\t\t\t(model, pathlist) = self.services_list.servicestree.get_selection().get_selected_rows()\n\t\t\t\telse:\n\t\t\t\t\t(model, pathlist) = self.services_view.treeview.get_selection().get_selected_rows()\n\t\t\telse:\n\t\t\t\t(model, pathlist) = self.work.treeview.get_selection().get_selected_rows()\n\n\t\t\tif len(pathlist) < 1:\n\t\t\t\t# right click on nothing\n\t\t\t\treturn False \n\n\t\t\t# get selected port\n\t\t\ttry:\n\t\t\t\tfor path in pathlist :\n\t\t\t\t\ttree_iter = model.get_iter(path)\n\n\t\t\t\t\tif self.on_services_view:\n\t\t\t\t\t\tif alltargets:\n\t\t\t\t\t\t\tservice = self._filter_service(model.get_value(tree_iter,0)) # selected service\n\t\t\t\t\t\t\t# set shell conf section from user selection\n\t\t\t\t\t\t\tself._selected_opt[\"service\"] = service\n\n\t\t\t\t\t\t\tfor port in self.engine.database.get_ports_by_service(service):\n\t\t\t\t\t\t\t\ttargets.append(port)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t\t# set selected host if on service view\n\t\t\t\t\t\t\tself._selected_opt[\"host\"] = model.get_value(tree_iter,4) \n\t\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7) ))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t# set selected service if not on service view\n\t\t\t\t\t\tselected_service = model.get_value(tree_iter,4) # selected service\n\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7)))\n\t\t\t\t\t\tself._selected_opt[\"service\"] = selected_service \n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n\t\t\t\n\t\t\t#print('si')\n\t\t\t# fix some multiple names\n\t\t\tself._selected_opt[\"service\"] = self._filter_service(self._selected_opt[\"service\"])\n\n\t\t\t# get extra extensions\n\t\t\textra = self.engine.get_menu(self._selected_opt[\"service\"])\n\n\t\t\tfor extension in extra:\n\t\t\t\tif extension == \"shell\":\n\t\t\t\t\t# little trick for shell ext\n\t\t\t\t\tiE = Gtk.MenuItem(self._selected_opt[\"service\"])\n\t\t\t\telse:\n\t\t\t\t\tiE = Gtk.MenuItem(extension)\n\n\t\t\t\tiE.show()\n\t\t\t\trightclickmenu.append(iE)\n\n\t\t\t\t# check if there is a submenu for the current extension\n\t\t\t\ttry:\n\t\t\t\t\ttabs = {}\n\t\t\t\t\textension_ext_menu = Gtk.Menu()\n\t\t\t\t\tsubmenu = extra[extension].submenu(self._selected_opt[\"service\"])\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t#print(sub_item)\n\t\t\t\t\t\tif len(sub_item.split(\"/\")) > 1:\n\t\t\t\t\t\t\tprev = \"\"\n\t\t\t\t\t\t\tprevst = \"\"\n\n\t\t\t\t\t\t\tfor sub in sub_item.split(\"/\"):\n\t\t\t\t\t\t\t\tif sub != sub_item.split(\"/\")[-1]:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# new category\n\t\t\t\t\t\t\t\t\tt_menu = Gtk.Menu()\n\t\t\t\t\t\t\t\t\tt = Gtk.MenuItem(sub)\n\t\t\t\t\t\t\t\t\tt.show()\n\t\t\t\t\t\t\t\t\tt.set_submenu(t_menu)\n\n\t\t\t\t\t\t\t\t\tif not sub in tabs:\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\ttabs[sub] = t_menu\n\n\t\t\t\t\t\t\t\t\t\tif prevst != \"\":\n\t\t\t\t\t\t\t\t\t\t\tprev.append(t)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\textension_ext_menu.append(t)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprev = tabs[sub]\n\t\t\t\t\t\t\t\t\tprevst = sub\n\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t#print(sub)\n\t\t\t\t\t\t\t\t\titem = Gtk.MenuItem( sub ) \n\t\t\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\t\t\t\t\tprev.append(item)\n\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# extension in any sub-categories\n\t\t\t\t\t\t\titem = Gtk.MenuItem(sub_item)\n\t\t\t\t\t\t\textension_ext_menu.append(item)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# show and connect the extension\n\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\tif len(tabs) == 0:\n\t\t\t\t\t\tnot_found = Gtk.MenuItem(\"nothing\")\n\t\t\t\t\t\tnot_found.show()\n\t\t\t\t\t\textension_ext_menu.append(not_found)\n\t\t\t\t\t\n\t\t\t\t\tiE.set_submenu(extension_ext_menu)\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t#print(e)\n\t\t\t\t\tiE.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], extra[extension].menu[\"label\"]) #.menu[\"label\"])\n\n\t\t\t\ttry:\n\t\t\t\t\t# try if there is generic for the current extension\n\t\t\t\t\tsubmenu = extra[extension].submenu(\"generic\")\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t# remove _ and show spaces\n\t\t\t\t\t\tgeneric.append(sub_item.replace(\"_\",\" \"))\n\t\t\t\texcept: pass\n\n\t\t\tseparator = Gtk.SeparatorMenuItem()\n\t\t\tseparator.show()\n\t\t\trightclickmenu.append(separator)\n\n\t\t\tgen_x = self.engine.get_menu(\"generic\")\n\n\t\t\tfor gen in generic:\n\n\t\t\t\ti2 = Gtk.MenuItem(gen)\n\t\t\t\ti2.show()\n\t\t\t\trightclickmenu.append(i2)\n\n\t\t\t\ti2.connect(\"activate\", self.run_multi_extra, targets, extra[\"shell\"], \"generic\", gen)\n\n\t\t\trightclickmenu.popup(None, None, None, None, 0, Gtk.get_current_event_time())\n\n\t\t\treturn True", "def _sync(self, reset=False, history=False):\n\n\t\t# Check history refresh\n\t\tif history:\n\t\t\t# this sync only the hosts history in the hostviews loaded\n\t\t\t# this avoid to refresh everything and lose the hostlist/servicelist selection\n\t\t\t# then return True\n\n\t\t\tfor host in self.scenes[\"hosts_view\"]:\n\t\t\t\tself.scenes[\"hosts_view\"][host].refresh(self.engine.database, history=True)\n\n\t\t\treturn True \n\t\t\n\t\t# refresh everithing\n\t\tself.host_list.refresh(self.engine.database)\n\t\tself.services_list.refresh(self.engine.database)\n\n\t\tif reset:\n\t\t\t# called at project switch\n\t\t\t# otherwise will break current running task's log\n\t\t\tself.logger.refresh(self.engine.database)\n\n\t\t\t# add the welcome message\n\t\t\tself.main.workspace.add(self.main.welcome_note)\n\t\t\t\n\t\t\t# reset the scenes\n\t\t\tself.scenes[\"hosts_view\"]\t = {}\n\t\t\tself.scenes[\"services_view\"] = {}\n\n\t\t\ttry:\n\t\t\t\tself.services_view.notebook.destroy()\n\t\t\texcept: pass\n\t\t\ttry:\n\t\t\t\tself.work.notebook.destroy()\n\t\t\texcept: pass\n\n\t\t# set the db location as headerbar subtitle\n\t\tself.main.headerbar.set_subtitle(self.engine.database.db_loc)\t\n\n\t\t# refresh the hostviews and servicesview\n\t\tfor host in self.scenes[\"hosts_view\"]:\n\t\t\tself.scenes[\"hosts_view\"][host].refresh(self.engine.database)\n\n\t\tfor service in self.scenes[\"services_view\"]:\n\t\t\tself.scenes[\"services_view\"][service].refresh(self.engine.database, view_out_scope = self.main.out_of_scope.get_active())", "def init_view(self):\n self.view_map = self.ctx.clientmap", "def service(request):\n\treturn render(request,'service.html',None)", "def tempest_ceph_services_tun(self):\n self.helper_ceph_services('tun')", "def view_service(options, service_name, client):\n if options.show_events:\n return display_events(client.service_events(service_name))\n\n service_content = client.service(service_name)\n return display.DisplayServices().format_details(service_content)", "def _spawn_vapv(self, hostnames, lb):\n identifier = self.openstack_connector.get_identifier(lb)\n # Initialize lists of items to clean up if operation fails\n port_ids = []\n security_groups = []\n vms = []\n try: # For rolling back objects if failure occurs...\n # Create ports...\n ports = {}\n if cfg.CONF.lbaas_settings.management_mode == \"FLOATING_IP\":\n # Primary data port (floating IP)\n (port, sec_grp, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[0], create_floating_ip=True, cluster=True,\n identifier=identifier\n )\n ports[hostnames[0]] = {\n \"ports\": {\n \"data\": port,\n \"mgmt\": None\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": port['fixed_ips'][0]['ip_address']\n }\n port_ids.append(port['id'])\n security_groups = [sec_grp]\n # Secondary data port (floating IP)\n (port, junk, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[1], security_group=sec_grp,\n create_floating_ip=True, cluster=True\n )\n ports[hostnames[1]] = {\n \"ports\": {\n \"data\": port,\n \"mgmt\": None\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": port['fixed_ips'][0]['ip_address']\n }\n port_ids.append(port['id'])\n elif cfg.CONF.lbaas_settings.management_mode == \"MGMT_NET\":\n # Primary data port (management network)\n (data_port, data_sec_grp, junk) = self.openstack_connector.create_port(\n lb, hostnames[0], cluster=True, identifier=identifier\n )\n # Primary mgmt port (management network)\n (mgmt_port, mgmt_sec_grp, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[0], mgmt_port=True, cluster=True, identifier=identifier\n )\n ports[hostnames[0]] = {\n \"ports\": {\n \"data\": data_port,\n \"mgmt\": mgmt_port\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": mgmt_ip\n }\n security_groups = [data_sec_grp, mgmt_sec_grp]\n port_ids.append(data_port['id'])\n port_ids.append(mgmt_port['id'])\n # Secondary data port (management network)\n (data_port, sec_grp, junk) = self.openstack_connector.create_port(\n lb, hostnames[1], security_group=data_sec_grp, cluster=True\n )\n # Secondary mgmt port (management network)\n (mgmt_port, junk, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[1], mgmt_port=True, security_group=mgmt_sec_grp,\n cluster=True\n )\n ports[hostnames[1]] = {\n \"ports\": {\n \"data\": data_port,\n \"mgmt\": mgmt_port\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": mgmt_ip\n }\n port_ids.append(data_port['id'])\n port_ids.append(mgmt_port['id'])\n\n # Create instances...\n try:\n bandwidth = lb.bandwidth\n if bandwidth == 0:\n raise AttributeError()\n except AttributeError:\n bandwidth = self._get_setting(\n lb.tenant_id, \"services_director_settings\", \"bandwidth\"\n )\n avoid = None\n for host in hostnames:\n # Launch vAPV...\n vm = self.openstack_connector.create_vapv(\n host, lb, ports[host]['ports'], avoid\n )\n vms.append(vm['id'])\n # Set params for next iteration...\n if cfg.CONF.lbaas_settings.allow_different_host_hint is True:\n avoid = vm['id']\n\n except Exception as e:\n if cfg.CONF.lbaas_settings.roll_back_on_error is True:\n self.openstack_connector.clean_up(\n instances=vms,\n security_groups=security_groups,\n ports=port_ids\n )\n raise e", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def views(self, views):\n\n self._views = views", "def switch(self, context):\n return", "def __handle_view_door(self, gamestate_component):", "def RemoteRouter(services):\n return PublicController(services)", "def local_views():\n\tpass", "def __handle_view_item(self, gamestate_component):", "def tempest_ceph_services_vlan(self):\n self.helper_ceph_services('vlan')", "def network_views():\n return 'networkview?'" ]
[ "0.5940524", "0.5927022", "0.5788933", "0.57363355", "0.56955576", "0.56856203", "0.5660334", "0.56177056", "0.5548212", "0.5532452", "0.5528236", "0.54758364", "0.54498", "0.54244065", "0.5414866", "0.5408288", "0.53973764", "0.53850013", "0.5373715", "0.53456396", "0.53456396", "0.53456396", "0.5342106", "0.531921", "0.53144354", "0.52689165", "0.52566195", "0.52278596", "0.521597", "0.5207166" ]
0.6726968
0
add / remove host scope
def _scope(self, widget, add, targets): for host_obj in targets: self.engine.database.switch_scope(add, host_obj) self._sync()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_or_remove_host(self, event):\n try:\n host = self.caller.search(self.lhs).Dominion\n except AttributeError:\n return\n if event:\n if host == event.main_host:\n raise self.CalCmdError(\"The main host cannot be removed.\")\n if host in event.hosts:\n event.change_host_to_guest(host)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n event.add_host(host)\n msg = \"%s added to hosts.\" % host\n else:\n hosts = self.project[\"hosts\"]\n if host.id in hosts:\n hosts.remove(host.id)\n if host.id not in self.project[\"invites\"]:\n self.project[\"invites\"].append(host.id)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n hosts.append(host.id)\n if host.id in self.project[\"invites\"]:\n self.project[\"invites\"].remove(host.id)\n msg = \"%s added to hosts.\" % host\n self.msg(msg)", "def scope_delete(client, args):\n client.set_scope([])", "def createHost(self):\n self.createUser()\n self.user.host_for = [self.program.scope.key()]\n self.user.put()", "def scope(self, name):\r\n raise NotImplementedError", "def resourcescope(self, **content):\n self.resource_scopes.update(content)\n self.resource_scopes.update(self.overwrite_resource_scopes)", "def add_scope(self, scope_name):\r\n scp = '{}/{}'.format(self._model, scope_name)\r\n self._scopes.append(scp)", "def enterScope(self, name):", "def require_host(host_list):\n def add_attribute(func):\n if not hasattr(func, \"host\"):\n func.host = []\n func.host.append(host_list)\n return func\n return add_attribute", "def add(self, host, plistener):\n\n host_entry = findACL(host)\n if (host_entry.plisteners != None):\n host_entry.plisteners.append(str(plistener))\n else:\n host_entry.plisteners = [str(plistener)]\n host_entry.put()\n\n plistener_entry = findACL(plistener)\n if (plistener_entry.psessions != None):\n plistener_entry.psessions.append(str(host))\n else:\n plistener_entry.psessions = [str(host)]\n plistener_entry.put()", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def host_click(self, tv, event):\n\t\t\n\t\t# grab the right click\n\t\tif event.button == 3:\n\n\t\t\trightclickmenu = Gtk.Menu()\n\n\t\t\t# get selected host\n\t\t\ttry:\n\t\t\t\ttargets = []\n\n\t\t\t\t(model, pathlist) = self.host_list.hosttree.get_selection().get_selected_rows()\n\t\t\t\t\n\t\t\t\tif len(pathlist) < 1:\n\t\t\t\t\t# right click on nothing\n\t\t\t\t\treturn False \n\n\t\t\t\tfor path in pathlist :\n\t\t\t\t\t# Fill target's array\n\n\t\t\t\t\ttree_iter = model.get_iter(path)\n\n\t\t\t\t\taddress = model.get_value(tree_iter,1) # selected host address\n\t\t\t\t\tdomain = model.get_value(tree_iter,2) # selected host address\n\t\t\t\t\thost_id = model.get_value(tree_iter,5)\n\n\t\t\t\t\thost_obj = self.engine.database.get_host(host_id)\n\n\t\t\t\t\ttargets.append(host_obj)\n\n\t\t\t\t\tself._selected_opt[\"host\"] = address\n\t\t\t\t\tself._selected_opt[\"domain\"] = domain\n\t\t\t\t\tself._selected_opt[\"port\"] = 0\n\n\t\t\t\t\t# set hosts generic shell conf section\n\t\t\t\t\textra_name = \"hostlist\"\n\n\t\t\t\t\t# Delete host option\n\t\t\t\t\ti4 = Gtk.MenuItem(\"Delete\")\n\t\t\t\t\ti4.show()\n\n\t\t\t\t\trightclickmenu.append(i4)\n\t\t\t\t\ti4.connect(\"activate\", self._delete_host, targets)\n\n\t\t\t\tif len(targets) > 1:\n\t\t\t\t\t# multiple hosts selected\n\t\t\t\t\t# we will add both remove and add to scope options\n\t\t\t\t\ti5 = Gtk.MenuItem(\"Remove Scope\")\n\t\t\t\t\ti6 = Gtk.MenuItem(\"Add Scope\")\n\n\t\t\t\t\ti5.show()\n\t\t\t\t\ti6.show()\n\n\t\t\t\t\trightclickmenu.append(i5)\n\t\t\t\t\trightclickmenu.append(i6)\n\n\t\t\t\t\ti5.connect(\"activate\", self._scope, False, targets) # True means Add\n\t\t\t\t\ti6.connect(\"activate\", self._scope, True, targets) # False means remove\n\n\t\t\t\telse:\n\t\t\t\t\t# single host selected\n\t\t\t\t\t# check if the host in scope and add only one option\n\t\t\t\t\tif host_obj.scope:\n\t\t\t\t\t\t# in scope\n\t\t\t\t\t\ti5 = Gtk.MenuItem(\"Remove Scope\")\n\t\t\t\t\t\ti5.connect(\"activate\", self._scope, False, targets) # False means remove\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Out of scope item\n\t\t\t\t\t\ti5 = Gtk.MenuItem(\"Add Scope\")\n\t\t\t\t\t\ti5.connect(\"activate\", self._scope, True, targets) # True means Add\n\t\t\t\t\t\t\n\t\t\t\t\ti5.show()\n\t\t\t\t\trightclickmenu.append(i5)\n\n\t\t\t\textra = self.engine.get_menu(extra_name)\n\n\t\t\t\tfor c_ext in extra:\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttabs = {}\n\t\t\t\t\t\t#extension_ext_menu = Gtk.Menu()\n\t\t\t\t\t\tsubmenu = extra[c_ext].submenu(extra_name)\n\n\t\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t\t#print(sub_item)\n\t\t\t\t\t\t\tif len(sub_item.split(\"/\")) > 1:\n\t\t\t\t\t\t\t\tprev = \"\"\n\t\t\t\t\t\t\t\tprevst = \"\"\n\n\t\t\t\t\t\t\t\tfor sub in sub_item.split(\"/\"):\n\t\t\t\t\t\t\t\t\tif sub != sub_item.split(\"/\")[-1]:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t# new category\n\t\t\t\t\t\t\t\t\t\tt_menu = Gtk.Menu()\n\t\t\t\t\t\t\t\t\t\tt = Gtk.MenuItem(sub)\n\t\t\t\t\t\t\t\t\t\tt.show()\n\t\t\t\t\t\t\t\t\t\tt.set_submenu(t_menu)\n\n\t\t\t\t\t\t\t\t\t\tif not sub in tabs:\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\ttabs[sub] = t_menu\n\n\t\t\t\t\t\t\t\t\t\t\tif prevst != \"\":\n\t\t\t\t\t\t\t\t\t\t\t\tprev.append(t)\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\trightclickmenu.append(t)\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tprev = tabs[sub]\n\t\t\t\t\t\t\t\t\t\tprevst = sub\n\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t#print(sub)\n\t\t\t\t\t\t\t\t\t\titem = Gtk.MenuItem( sub ) \n\t\t\t\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[c_ext], extra_name, sub_item)\n\n\t\t\t\t\t\t\t\t\t\tprev.append(item)\n\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# extension in any sub-categories\n\t\t\t\t\t\t\t\titem = Gtk.MenuItem(sub_item)\n\t\t\t\t\t\t\t\trightclickmenu.append(item)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# show and connect the extension\n\t\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[c_ext], extra_name, sub_item)\n\n\t\t\t\t\texcept Exception as e : \n\t\t\t\t\t\tif self.on_services_view:\n\t\t\t\t\t\t\titem = Gtk.MenuItem(c_ext)\n\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\trightclickmenu.append(item)\n\n\t\t\t\t\t\t\titem.connect(\"activate\", self.run_multi_extra, targets, extra[c_ext], service, c_ext)\n\n\n\t\t\t\trightclickmenu.popup(None, None, None, None, 0, Gtk.get_current_event_time())\n\t\t\t\trightclickmenu.show_all()\n\n\t\t\t\treturn True\n\n\t\t\texcept Exception as e: print(e)", "def fusion_api_edit_scope(self, uri, body=None, api=None, headers=None, eTag=None):\n\n return self.scope.put(uri=uri, body=body, api=api, headers=headers, eTag=eTag)", "def addScope(self, scope):\n assert isinstance(scope, ScopeInterface);", "def add_host(self, group, host):\n if group not in self.inventory:\n self.add_inventory_group(group)\n\n if host not in self.inventory[group]['hosts']:\n self.inventory[group]['hosts'].append(host)\n return", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def scope(self): # noqa: ANN201", "def remove_host(self, host):\n\t\tfrom host import Host\n\n\t\tif isinstance(host, Host):\n\t\t\tself.hosts.pop(self.hosts.index(host))\n\t\t\tprint('Removing host %s... from environment %s' % (host.id[0:5], self.id[0:5]))\n\t\telif type(host) == int:\n\t\t\tself.hosts.pop(host)\n\t\telse:\n\t\t\traise TypeError('A Host object or an integer must be specified!')", "def remove(self, host, plistener):\n\n host_entry = findACL(host)\n if (host_entry.plisteners != None):\n while (plistener in host_entry.plisteners):\n host_entry.plisteners.remove(plistener.user_id())\n host_entry.put()\n\n plistener_entry = findACL(plistener)\n if (plistener_entry.psessions != None):\n while (host in plistener_entry.psessions):\n plistener_entry.psessions.append(host)\n plistener_entry.put()", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def scope(self, scope):\n self._scope = scope", "def append_allowed_hosts(self, hostname):\r\n settings.ALLOWED_HOSTS.append(hostname)\r\n self.addCleanup(settings.ALLOWED_HOSTS.pop)", "def fusion_api_create_scope(self, body, api=None, headers=None):\n return self.scope.post(body, api, headers)", "def add_hosts(self, hosts):\n for host in hosts:\n if host not in self.__hosts__:\n self.__hosts__.append(KnownHostsHost(host))", "def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.add(args)", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def set_scope(self, scope):\n self.vis.set_scope(scope)", "def set_host_aliases():\n with open('/tmp/hosts', 'w') as f:\n uname = os.uname()\n f.write(f'{uname.nodename} localhost\\n')\n os.environ['HOSTALIASES'] = '/tmp/hosts'", "def addScope(name=None):\n try:\n global scopeSeq\n global currScope\n scopeSeq += 1\n lastScope = currScope\n currScope = scopeSeq\n scopeStack.append(currScope)\n scopeDict[currScope] = symbolTable(currScope)\n scopeDict[currScope].setParent(lastScope)\n if name is not None:\n if type(name) is list:\n scopeDict[lastScope].insert(name[1], 'func')\n scopeDict[lastScope].updateArgList(name[1], 'child', scopeDict[currScope])\n else:\n temp = currScope\n currScope = lastScope\n if checkId(name, '*'):\n pos = p.lexer.lexpos\n line = checkLineNo(pos,0)\n print(\"Name \" + name + \" already defined....\",line)\n return\n currScope = temp\n scopeDict[lastScope].insert(name, 'type'+name)\n scopeDict[lastScope].updateArgList(name, 'child', scopeDict[currScope])\n pass\n except Exception as e:\n print(\"WARNING:1:\"+str(e))\n return", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts" ]
[ "0.5854914", "0.5846827", "0.5810849", "0.5483697", "0.54318076", "0.5403112", "0.52607644", "0.52530056", "0.5238579", "0.517643", "0.51754916", "0.5110414", "0.50496083", "0.50493914", "0.50262", "0.5015864", "0.49888277", "0.4976647", "0.49699435", "0.49688184", "0.49462023", "0.49456862", "0.49343634", "0.49272865", "0.49247217", "0.49116683", "0.49083292", "0.49060434", "0.4890477", "0.48643875" ]
0.6768299
0
show / hide outofscope targets
def _showhide_scope(self, widget): self.host_list.toggle_scope() self.services_list.toggle_scope() self._sync() #reset=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_visible(self, target: bool) -> None:\n hidden = not target\n for ent in self.child_ents():\n ent.vis_shown = target\n ent.hidden = hidden\n for solid in ent.solids:\n solid.vis_shown = target\n solid.hidden = hidden\n\n for solid in self.child_solids():\n solid.vis_shown = solid.hidden = target\n solid.hidden = hidden", "def ShowDockingGuides(guides, show):\r\n\r\n for target in guides:\r\n \r\n if show and not target.host.IsShown():\r\n target.host.Show()\r\n target.host.Update()\r\n \r\n elif not show and target.host.IsShown(): \r\n target.host.Hide()", "def show_targets(self):\n\n c = self.c\n c.p.contract()\n nd = c.p.insertAfter()\n nd.h = \"Global QuickMove targets\"\n nd.b = \"\"\"\n There are the current global QuickMove targets. Use the tree context menu\n Move -> Read targets command to replace the stored targets with the content\n of this node, after editing.\n\n Targets are a pair of lines, starting with \"NAME:\" and \"UNL:\", with the whole\n UNL on one line.\\n\\n\"\"\"\n\n for target in g.app.db['_quickmove']['global_targets']:\n nd.b += \"NAME: %s\\nUNL: %s\\n\\n\" % (target['name'], target['unl'])\n\n c.selectPosition(nd)\n c.redraw()", "def visible(self, show):", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def ToggleVisible(self, event):\n pass", "def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()", "def is_visible(self):", "def showHidden(*args, above: bool=True, allObjects: bool=True, below: bool=True, lastHidden:\n bool=True, **kwargs)->None:\n pass", "def toggle_visibility(self):\n if self.is_visible():\n self.hide()\n else:\n self.show()", "def __show_target(self):\n pcd = o3d.io.read_point_cloud(\n self.target_cloud\n )\n if np.asarray(pcd.points).shape[0] != 0:\n pcd.paint_uniform_color([0, 0, 1])\n pcd.estimate_normals()\n self.target_point_cloud_view.load_cloud(pcd)\n try:\n self.target_point_cloud_view.show_window()\n except RuntimeError:\n pass\n else:\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Target point cloud is no longer available\"\n )\n self.target_cloud = \"\"\n self.__update_clickability()\n self.__save_context()", "def show_hide_project_hierarchy(self, event=None):\n # self.project_btn['bg'] = 'gray'\n if self.i == 1:\n self.paned_win.remove(self.left_frame)\n self.i = 0\n else:\n self.paned_win.add(self.left_frame, before=self.right_frame, width=230)\n self.i = 1", "def do_hf_hide(self, arg):\n self.show_hidden_frames = False\n self.refresh_stack()", "def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()", "def show_hide_toolbar(self):\n if self.showing: # hiding\n self.toolbar_frame.pack_forget()\n self.Toolbars.entryconfigure(1, label=\" Show toolbar \", command=self.show_hide_toolbar)\n self.showing = False\n else: # displaying\n self.paned_win.pack_forget()\n self.on_off_project_hierarchy.pack_forget()\n self.statusbar_frame.pack_forget()\n\n self.statusbar_frame.pack(fill='x', side='bottom')\n self.toolbar_frame.pack(fill='x', side='top')\n self.on_off_project_hierarchy.pack(fill='y', side='left', ipadx=3)\n self.paned_win.pack(fill='both', expand=1)\n\n self.Toolbars.entryconfigure(1, label=\" Hide toolbar \")\n self.showing = True", "def is_hidden(self):\n return self.has_label(HIDDEN_LABEL)", "def targets_placeholder(self):", "def setIsolateHidden( self, state ):\n self._isolatedHidden = state\n \n super(XNode, self).setVisible(self.isVisible())" ]
[ "0.6622147", "0.6241229", "0.61932164", "0.60345656", "0.56592095", "0.56346786", "0.56018", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.5549058", "0.55428123", "0.54548293", "0.54544175", "0.5411023", "0.54019326", "0.5366378", "0.53504425", "0.53254473", "0.528646", "0.5284362", "0.52733374", "0.5241761" ]
0.6492323
1
show / hide logs notebook
def _showhide_logs(self, widget): if self.main.view_logs.get_active(): self.logger.notebook.show() else: self.logger.notebook.hide()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_messages(self):\n self.masterlog.revealme()", "def viewLog(self, event):\n logcontent = \"\"\n if Config.GetOption(\"ActLog\") == True:\n\n logFrame = wx.Frame(None, -1, \"View Log\", size=(500, 500))\n panel5 = wx.Panel(logFrame)\n data = wx.richtext.RichTextCtrl(panel5, pos=(0, 0), size=(500,\n 500))\n data.AppendText(Log.ReadLog())\n logFrame.Centre()\n logFrame.Show()\n else:\n\n inform = wx.MessageDialog(None,\n \"The Log is disabled!\\\n \\nEnable it to view.\",\n \"Log Status\", wx.OK)\n inform.ShowModal()", "def show_logs():\n nodes=hl.getAllNodes();\n\n return render_template('logs.html',nodes = nodes)", "def hide(self, event: Event = None) -> None:\n if self.handler:\n self.c.frame.log.selectTab('Log')\n self.c.bodyWantsFocus()", "def output_notebook(self):\n self._notebook = True", "def set_log_hide_secrets(value: bool = True) -> None:\n logs.HideFormatter.HIDE_ENABLED = tools.coerce_bool(value)", "def show_log(work_log):\n f = open(work_log, 'r')\n contents = f.readlines()\n print '\\n'\n for line in contents:\n print line\n print '\\n'\n f.close()", "def showAllLogs():\n\t#Add sections to log screen\n\tallLogs=findFiles(getWorkingDirectory(),\".log\")\n\tcounter=-1\n\tfor l in allLogs:\n\t\tcounter+=1\n\t\tbase=getRootName(l)\n\t\tif base in logDict:\n\t\t\tbase=logDict[base]\n\t\t#Add to selection bar\n\t\tlogSelectionBar.addTab(base,command=lambda n=l: displayLog(n))\n\t\t#Store\n\t\tloadedLogs[counter]=l", "def subnotebook_show(self):\n if not self.subnotebook:\n logger.debug(\"Showing subnotebook\")\n self.subnotebook = self.add_subnotebook()", "def show(self):\n self._logger.debug(\"show\")", "def hide_messages():\n\n print(\"Keep uncertainty data?\")\n print(\"NewDatabase(..., keep_uncertainty_data=True)\")\n print(\"\")\n print(\"Hide these messages?\")\n print(\"NewDatabase(..., quiet=True)\")", "def DisplayLog(ssh, log_file, no_prompts=False):\n warning_msg = (\"It will stream log to show on screen. If you want to stop \"\n \"streaming, please press CTRL-C to exit.\\nPress 'y' to show \"\n \"log or read log by myself[y/N]:\")\n if no_prompts or utils.GetUserAnswerYes(warning_msg):\n ssh.Run(\"tail -f -n +1 %s\" % log_file, show_output=True)", "def setup_log_panel(window, src_window=None):\n view = window.create_output_panel(\"YouTubeEditor Log\")\n view.set_read_only(True)\n view.settings().set(\"gutter\", False)\n view.settings().set(\"rulers\", [])\n view.settings().set(\"word_wrap\", False)\n view.settings().set(\"context_menu\", \"YouTubeLog.sublime-menu\")\n\n if src_window:\n src_view = src_window.find_output_panel(\"YouTubeEditor Log\")\n if src_view:\n text = src_view.substr(sublime.Region(0, len(src_view)))\n view.run_command(\"append\", {\n \"characters\": text,\n \"force\": True,\n \"scroll_to_end\": True\n })", "def display_output_panel():\n window = sublime.active_window()\n if window.active_panel() == 'output.YouTubeEditor Log':\n return\n\n # True for always, False for Never, number for Always (but autoclose);\n # thus if this is a boolean and it's False, we should leave. Otherwise,\n # we're good.\n show_panel = yte_setting('auto_show_panel')\n if isinstance(show_panel, bool) and show_panel == False:\n return\n\n # Show the panel, and if desired autoclose it.\n window.run_command(\"show_panel\", {\"panel\": \"output.YouTubeEditor Log\"})\n if isinstance(show_panel, bool) == False and isinstance(show_panel, int):\n close_panel_after_delay(window, show_panel * 1000)", "def notebook():\n pass", "def notebook():\n pass", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return", "def view_log():\n g.title = \"View Log\"\n log = ShotLog().get_text() #log is a generator\n \n return render_template('log_viewer.html',log=log)", "def OnShowLog(self, event):\n dlg = LogViewer(self)\n dlg.OnLogRefresh(event)\n dlg.ShowModal()\n dlg.Destroy()", "def loggraph(self, pad_columns=True):\n return self.show(loggraph=True, html=True, pad_columns=pad_columns)", "def hide_access_logs():\n access_log = cherrypy.log.access_log\n for handler in tuple(access_log.handlers):\n access_log.removeHandler(handler)", "def setNoHiddenLines():\n dislin.nohide()", "def cmd_logs(args):\n\n remote.show_log(_get_current_project_name(), num=args.num, tail=args.tail)", "def event_beforehide(self):\n logging.warning('beforehide undefined')", "def subnotebook_hide(self):\n if self.subnotebook and self.subnotebook.winfo_ismapped():\n logger.debug(\"Hiding subnotebook\")\n self.subnotebook.pack_forget()\n self.subnotebook.destroy()\n self.subnotebook = None", "def show(self) -> None:\n thr_is_alive = self._spin_thread and self._spin_thread.is_alive()\n if self._hide_spin is None:\n raise RuntimeError(\"hide_spin is None\")\n\n if thr_is_alive and self._hide_spin.is_set():\n with self._stdout_lock:\n # clear the hidden spinner flag\n self._hide_spin.clear()\n # clear the current line so the spinner is not appended to it\n self._clear_line()", "def hideMl(self, *args):\n\t\tself.callMethod(('ManialinkManager', 'hideManialinkToLogin'), *args)", "def __configure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"logViewerPage\")", "def I2C_log_thread(write_directives, filename, gui): \n # Change the role of the logging button so that it stops logging\n gui.logging_button_state('stop')\n \n # start logging\n pySCPI_aardvark.log_aardvark(write_directives, filename, gui)\n \n # clear the flag that stopped logging\n gui.terminator.kill_event.clear()\n \n # re-allow access to hte command text box\n gui.Command_text.config(state = 'normal') \n \n # determine if there is still a gui to update\n if not gui.terminator.root_destroyed:\n # there is so unlock all GUI buttons\n gui.action_lock('Unlock')\n \n # Reset the logging button back to it's initial state\n gui.logging_button_state('start')\n # end if", "def event_afterhide(self):\n logging.warning('afterhide undefined')" ]
[ "0.6446135", "0.62465423", "0.6045043", "0.57627416", "0.5757946", "0.5732056", "0.5711249", "0.5697132", "0.56432843", "0.56414294", "0.5610665", "0.56100166", "0.56040424", "0.5602165", "0.55740595", "0.55740595", "0.553568", "0.5534145", "0.5527125", "0.543908", "0.5409417", "0.53985107", "0.5371028", "0.53706676", "0.5362998", "0.53415906", "0.53262484", "0.53072554", "0.5297165", "0.5271852" ]
0.8210805
0
serviceslist service click event this will generate the scene in the scenes dictionary
def services_row(self, listbox, cell, listboxrow): self._clear_workspace() if str(cell) in self.scenes["services_view"]: # check if the scene was already loaded self.services_view = self.scenes["services_view"][str(cell)] else: # get selected port (model, pathlist) = self.services_list.servicestree.get_selection().get_selected_rows() for path in pathlist : tree_iter = model.get_iter(path) #selected_host = model.get_value(tree_iter,1) selected_service = model.get_value(tree_iter,0) # selected service # YO self._selected_opt["service"] = selected_service # generate the scene self.services_view = Serviceview(selected_service, self.engine.database, view_out_scope=self.main.out_of_scope.get_active()) self.scenes["services_view"][str(cell)] = self.services_view # add the scene self._selected_opt["service"] = self.services_view.service self.main.workspace.add(self.services_view.notebook) self.services_view.treeview.connect("button_press_event", self.mouse_click)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_select_scene(self, scene):\n pass", "def mouse_click(self, tv, event, alltargets=False):\n\t\t\n\t\tif event.button == 3:\n\n\t\t\t# create the menu and submenu objects\n\t\t\trightclickmenu = Gtk.Menu()\n\t\t\t\n\t\t\ttargets = []\n\t\t\tgeneric = []\n\n\t\t\t# check\n\t\t\tif self.on_services_view:\n\t\t\t\tif alltargets:\n\t\t\t\t\t(model, pathlist) = self.services_list.servicestree.get_selection().get_selected_rows()\n\t\t\t\telse:\n\t\t\t\t\t(model, pathlist) = self.services_view.treeview.get_selection().get_selected_rows()\n\t\t\telse:\n\t\t\t\t(model, pathlist) = self.work.treeview.get_selection().get_selected_rows()\n\n\t\t\tif len(pathlist) < 1:\n\t\t\t\t# right click on nothing\n\t\t\t\treturn False \n\n\t\t\t# get selected port\n\t\t\ttry:\n\t\t\t\tfor path in pathlist :\n\t\t\t\t\ttree_iter = model.get_iter(path)\n\n\t\t\t\t\tif self.on_services_view:\n\t\t\t\t\t\tif alltargets:\n\t\t\t\t\t\t\tservice = self._filter_service(model.get_value(tree_iter,0)) # selected service\n\t\t\t\t\t\t\t# set shell conf section from user selection\n\t\t\t\t\t\t\tself._selected_opt[\"service\"] = service\n\n\t\t\t\t\t\t\tfor port in self.engine.database.get_ports_by_service(service):\n\t\t\t\t\t\t\t\ttargets.append(port)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t\t# set selected host if on service view\n\t\t\t\t\t\t\tself._selected_opt[\"host\"] = model.get_value(tree_iter,4) \n\t\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7) ))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t# set selected service if not on service view\n\t\t\t\t\t\tselected_service = model.get_value(tree_iter,4) # selected service\n\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7)))\n\t\t\t\t\t\tself._selected_opt[\"service\"] = selected_service \n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n\t\t\t\n\t\t\t#print('si')\n\t\t\t# fix some multiple names\n\t\t\tself._selected_opt[\"service\"] = self._filter_service(self._selected_opt[\"service\"])\n\n\t\t\t# get extra extensions\n\t\t\textra = self.engine.get_menu(self._selected_opt[\"service\"])\n\n\t\t\tfor extension in extra:\n\t\t\t\tif extension == \"shell\":\n\t\t\t\t\t# little trick for shell ext\n\t\t\t\t\tiE = Gtk.MenuItem(self._selected_opt[\"service\"])\n\t\t\t\telse:\n\t\t\t\t\tiE = Gtk.MenuItem(extension)\n\n\t\t\t\tiE.show()\n\t\t\t\trightclickmenu.append(iE)\n\n\t\t\t\t# check if there is a submenu for the current extension\n\t\t\t\ttry:\n\t\t\t\t\ttabs = {}\n\t\t\t\t\textension_ext_menu = Gtk.Menu()\n\t\t\t\t\tsubmenu = extra[extension].submenu(self._selected_opt[\"service\"])\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t#print(sub_item)\n\t\t\t\t\t\tif len(sub_item.split(\"/\")) > 1:\n\t\t\t\t\t\t\tprev = \"\"\n\t\t\t\t\t\t\tprevst = \"\"\n\n\t\t\t\t\t\t\tfor sub in sub_item.split(\"/\"):\n\t\t\t\t\t\t\t\tif sub != sub_item.split(\"/\")[-1]:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# new category\n\t\t\t\t\t\t\t\t\tt_menu = Gtk.Menu()\n\t\t\t\t\t\t\t\t\tt = Gtk.MenuItem(sub)\n\t\t\t\t\t\t\t\t\tt.show()\n\t\t\t\t\t\t\t\t\tt.set_submenu(t_menu)\n\n\t\t\t\t\t\t\t\t\tif not sub in tabs:\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\ttabs[sub] = t_menu\n\n\t\t\t\t\t\t\t\t\t\tif prevst != \"\":\n\t\t\t\t\t\t\t\t\t\t\tprev.append(t)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\textension_ext_menu.append(t)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprev = tabs[sub]\n\t\t\t\t\t\t\t\t\tprevst = sub\n\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t#print(sub)\n\t\t\t\t\t\t\t\t\titem = Gtk.MenuItem( sub ) \n\t\t\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\t\t\t\t\tprev.append(item)\n\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# extension in any sub-categories\n\t\t\t\t\t\t\titem = Gtk.MenuItem(sub_item)\n\t\t\t\t\t\t\textension_ext_menu.append(item)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# show and connect the extension\n\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\tif len(tabs) == 0:\n\t\t\t\t\t\tnot_found = Gtk.MenuItem(\"nothing\")\n\t\t\t\t\t\tnot_found.show()\n\t\t\t\t\t\textension_ext_menu.append(not_found)\n\t\t\t\t\t\n\t\t\t\t\tiE.set_submenu(extension_ext_menu)\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t#print(e)\n\t\t\t\t\tiE.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], extra[extension].menu[\"label\"]) #.menu[\"label\"])\n\n\t\t\t\ttry:\n\t\t\t\t\t# try if there is generic for the current extension\n\t\t\t\t\tsubmenu = extra[extension].submenu(\"generic\")\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t# remove _ and show spaces\n\t\t\t\t\t\tgeneric.append(sub_item.replace(\"_\",\" \"))\n\t\t\t\texcept: pass\n\n\t\t\tseparator = Gtk.SeparatorMenuItem()\n\t\t\tseparator.show()\n\t\t\trightclickmenu.append(separator)\n\n\t\t\tgen_x = self.engine.get_menu(\"generic\")\n\n\t\t\tfor gen in generic:\n\n\t\t\t\ti2 = Gtk.MenuItem(gen)\n\t\t\t\ti2.show()\n\t\t\t\trightclickmenu.append(i2)\n\n\t\t\t\ti2.connect(\"activate\", self.run_multi_extra, targets, extra[\"shell\"], \"generic\", gen)\n\n\t\t\trightclickmenu.popup(None, None, None, None, 0, Gtk.get_current_event_time())\n\n\t\t\treturn True", "def scene_on(service):\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_on(group)", "def output(self):\n return {\n \"action\": \"RunScene\",\n \"arguments\": [\n {\n \"name\": \"SceneNum\", \n \"value\": self.id\n }\n ], \n \"service\": \"urn:micasaverde-com:serviceId:HomeAutomationGateway1\"\n }", "def on_stage_clicked(e: Event, options: Dict[str, Any]) -> None:\r\n trace('Stage is clicked!')", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def list_scene(command):\n namespace = app.main(command)\n assert namespace.command == 'ls' or namespace.command == \"listscenes\"", "def services(**kwargs):\n pass", "def do_scenes(self, line):\n\n print 'List of Scenes \\n'\n print 'ID\\tName'\n\n for index, scene in enumerate(self.huuey.scenes):\n print u\"{index}\\t{unique}\".format(index=index+1, unique=scene)", "def list_services(ctx):\n pass", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def start_scene():\n from Menus.select_team import SelectTeam # placing the import in a func makes python import that module only when needed\n from Menus.button import Button\n from Display.display import Display\n select_team = SelectTeam()\n display = Display()\n # start scene audio\n pygame.mixer.music.load('assets/audio/menu/main_menu.wav')\n pygame.mixer.music.set_volume(0.3)\n pygame.mixer.music.play(-1)\n\n # loading main menu background\n main_menu_img = pygame.image.load(\"assets/sprites/Backgrounds/main-menu.png\").convert()\n\n # creating start button\n start_btn = Button(pygame.image.load(\"assets/sprites/Buttons/start-game.png\").convert(), (410, 380), (200, 80))\n\n while True:\n display.display_background(main_menu_img)\n display.display_text(\"Soccer Match Simulator 2020\", display.font_title, (250, 300))\n start_btn.draw()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n # if start game is clicked move to SelectTeam class\n start_btn.event_handler(event, select_team.select_home)\n \n pygame.display.update()", "def create_menu():", "def run_app(self):\r\n ## Tell the artist to be patient... eg not genY\r\n inprogressBar = pbui.ProgressBarUI(title = 'Rebuilding Surfacing Scene From Publish:')\r\n inprogressBar.show()\r\n inprogressBar.updateProgress(percent = 1, doingWhat = 'Processing scene info...')\r\n ## Instantiate the API\r\n tk = sgtk.sgtk_from_path(\"T:/software/bubblebathbay\")\r\n debug(app = self, method = 'run_app', message = 'API instanced...\\n%s' % tk, verbose = False)\r\n debug(app = self, method = 'run_app', message = 'Fetch Surface Shaders launched...', verbose = False)\r\n \r\n context = self.context ## To get the step\r\n debug(app = self, method = 'run_app', message = 'Context Step...\\n%s' % context.step['name'], verbose = False)\r\n if context.step['name'] != 'Surface':\r\n cmds.warning(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n QtGui.QMessageBox.information(None, \"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n raise tank.TankError(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n \r\n scene_path = '%s' % os.path.abspath(cmds.file(query=True, sn= True))\r\n debug(app = self, method = 'run_app', message = 'scene_path... %s' % scene_path, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n entity = self.context.entity ## returns {'type': 'Shot', 'name': 'ep100_sh010', 'id': 1166}\r\n debug(app = self, method = 'run_app', message = 'entity... %s' % entity, verbose = False)\r\n \r\n ## Filter for the matching ID for the shot\r\n sg_filters = [[\"id\", \"is\", entity[\"id\"]]]\r\n debug(app = self, method = 'run_app', message = 'sg_filters... %s' % sg_filters, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n sg_entity_type = self.context.entity[\"type\"] ## returns Shot\r\n debug(app = self, method = 'run_app', message = 'sg_entity_type...\\n%s' % sg_entity_type, verbose = False)\r\n \r\n ## DATA\r\n ## NOTES SO HERE WE DON'T NEED TO CALL THE ASSETS FIELD FROM SHOTGUN\r\n ## WE CAN JUST GRAB THE LATEST PUBLISH FILE FROM EACH OF THE TEMPLATE STEPS\r\n inprogressBar.updateProgress(percent = 3, doingWhat = 'Processing scene info...')\r\n shadersTemplate = tk.templates[self.get_setting('maya_asset_SHD_XML_template')]\r\n debug(app = self, method = 'run_app', message = 'shadersTemplate...\\n%s' % shadersTemplate, verbose = False)\r\n\r\n ## PROCESS TEMPLATE NOW\r\n inprogressBar.updateProgress(percent = 5, doingWhat = 'Processing shaders xml...') \r\n debug(app = self, method = 'run_app', message = 'Processing template... %s' % shadersTemplate, verbose = False)\r\n ## SHADERS\r\n self.processTemplates(tk = tk, templateFile = shadersTemplate, id = entity[\"id\"], shotNum = entity[\"name\"], inprogressBar = inprogressBar, lighting = False)\r\n \r\n ############################################\r\n ## CORE ACHIVES \r\n ## Now process the assembly References\r\n debug(app = self, method = 'run_app', message = 'Processing mentalCore assemblies..', verbose = False)\r\n inprogressBar.updateProgress(percent = 50, doingWhat = 'Processing core archives...')\r\n if cmds.objExists('CORE_ARCHIVES_hrc') or cmds.objExists('CORE_ARCHIVES_hrc'):\r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'Complete...')\r\n inprogressBar.close()\r\n inprogressBar = None\r\n else:\r\n ## Get the assembly paths from the transforms in the scene with the correct tags to load now..\r\n self.getAssemblyPaths = coreLib.getCorePaths()\r\n debug(app = self, method = 'run_app', message = 'self.getAssemblyPaths.. %s' % self.getAssemblyPaths, verbose = False)\r\n \r\n ## Now load the assemblies from the paths\r\n coreLib.loadCoreArchives(paths = self.getAssemblyPaths)\r\n debug(app = self, method = 'run_app', message = 'self.loadCoreArchives Successful all assemblies loaded moving on to reconnect now...', verbose = False)\r\n inprogressBar.updateProgress(percent = 70, doingWhat = 'Core archives loaded...')\r\n \r\n ## Now connect the assemblies.\r\n inprogressBar.updateProgress(percent = 80, doingWhat = 'Reconnecting core archives...')\r\n coreLib.doReconnect(postPublish = False)\r\n debug(app = self, method = 'run_app', message = 'Ahh core archive assemblies reconnected successfully!!...', verbose = False)\r\n \r\n ## Now cleanup\r\n inprogressBar.updateProgress(percent = 90, doingWhat = 'Cleaning...')\r\n ## Group the placements\r\n cleanup.shotCleanupPlacements() \r\n ## Group the lights\r\n cleanup.shotCleanupLights()\r\n ## Put all the coreRebuild under Lighting_hrc group\r\n coreLib._cleanupCoreArchiveRebuildGrps('LIGHTING_hrc')\r\n \r\n \r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'COMPLETE...')\r\n inprogressBar.close()\r\n inprogressBar = None", "def on_mapbutton_clicked(self, button, selectedmap):\n mapfile = self.selectedmap.get_text()\n\n numberofcars = self.numberofcars.get_value_as_int()\n\n practicefile = open(basepath + '/launch/teststarter.launch', \"w\")\n vehicle_id = 1\n practicefile.write('<launch>\\n')\n practicefile.write(' <node pkg=\"sml_world\" name=\"road_network\" type=\"road_network.py\" args=\"/resources/scenarios/%s' % mapfile)\n practicefile.write(' True\" />\\n')\n practicefile.write(' <node pkg=\"sml_world\" name=\"visualization\" type=\"visualization.py\" />\\n')\n practicefile.write(' <node pkg=\"sml_world\" name=\"sml_world_central\" type=\"sml_world_central.py\" />\\n')\n for vehicle_id in range(1, numberofcars):\n practicefile.write(' <node pkg=\"rosservice\" name=\"spawn_vehicle%d' % vehicle_id)\n practicefile.write('\" type=\"rosservice\" args=\"call --wait /spawn_vehicle ')\n practicefile.write('\\'{vehicle_id: %d' % vehicle_id)\n practicefile.write(', class_name: \\'DummyVehicle\\', x: 0.0, y: 0.0, yaw: 0.8, v: 10.0, node_id: -400, toggle_sim: true}\\'\" />\\n')\n practicefile.write('</launch>\\n')\n\n practicefile.close()\n start_sml()", "def setup_scenes(self, scene_dict, start_scene):\n self._scene_dict = scene_dict\n self._scene_name = start_scene\n self._scene = self._scene_dict[self._scene_name]", "def servicesChanged(self) -> None:\n ...", "def events(self):", "def refresh(self):\n selected = []\n if not self.__new_service:\n selected = [str(t.text()) for t in\n self.__service_list.selectedItems()]\n\n self.__service_list.clear()\n if not self.__show:\n self.__services = opencue.api.getDefaultServices()\n else:\n self.__services = self.__show.getServiceOverrides()\n\n for service in self.__services:\n item = QtWidgets.QListWidgetItem(service.name())\n self.__service_list.addItem(item)\n\n if service.name() in selected:\n item.setSelected(True)\n\n self.__service_list.sortItems()", "def service_list():\n data = list_services()\n table = present(lambda: data,\n renderer='table',\n headers=['Service Name', 'URLS', 'Service Type', \"Memory Usages\", 'Replicas', 'Started at',\n 'Updated at',\n 'State', 'Restarts'],\n columns=['name', 'urls', 'service_type', 'memory', 'replicas', 'start_date', 'last_update',\n 'state',\n 'service_restarts'])\n if table:\n click.echo(table)\n else:\n click.echo('\\nYou have no running services right now, why don\\'t you try deploying one? \\n'\n 'have fun and follow the link below:\\n')\n click.echo('https://docs.fandogh.cloud/docs/services.html\\n')", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def saved(self, service):\n if not self.__show:\n msg = QtWidgets.QMessageBox()\n msg.setText(\"You are about to modify a facility wide service configuration. \"\n \"Are you in PSR-Resources?\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg.setDefaultButton(QtWidgets.QMessageBox.No)\n if msg.exec_() == QtWidgets.QMessageBox.No:\n return\n\n if self.__new_service:\n if self.__show:\n self.__show.createServiceOverride(service.data)\n else:\n opencue.api.createService(service.data)\n else:\n service.update()\n\n self.refresh()\n self.__new_service = False\n\n for i in range(0, self.__service_list.count()):\n item = self.__service_list.item(i)\n if item:\n if str(item.text()) == service.name():\n self.__service_list.setCurrentRow(i, QtCore.QItemSelectionModel.Select)\n break", "def scene_name():\n\n pass", "def start(self,process=0): \n #productive\n profprint()\n logic = self.logic\n logic.changeCursor(1)\n self.removeObservers()\n # get new slice nodes\n layoutManager = slicer.app.layoutManager()\n sliceNodeCount = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceNode')\n for nodeIndex in xrange(sliceNodeCount):\n # find the widget for each node in scene\n sliceNode = slicer.mrmlScene.GetNthNodeByClass(nodeIndex, 'vtkMRMLSliceNode')\n sliceWidget = layoutManager.sliceWidget(sliceNode.GetLayoutName()) \n if sliceWidget: \n # add obserservers and keep track of tags\n style = sliceWidget.sliceView().interactorStyle()\n self.sliceWidgetsPerStyle[style] = sliceWidget\n events = (\"LeftButtonPressEvent\",\"RightButtonPressEvent\", \"EnterEvent\", \"LeaveEvent\",\"KeyPressEvent\",\"KeyReleaseEvent\")\n for event in events:\n if process==self.needleValidationClicks:\n tag = style.AddObserver(event, self.processEventNeedleValidation)\n elif process==self.addManualTipClicks:\n tag = style.AddObserver(event, self.processEventAddManualTips)\n elif process==self.obturatorNeedleTipClicks:\n tag = style.AddObserver(event, self.processEventAddObturatorNeedleTips)\n dn = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode().GetDisplayNode()\n w = dn.GetWindow()\n l = dn.GetLevel()\n dn.AddObserver(vtk.vtkCommand.ModifiedEvent, lambda c,e : logic.setWL(dn,w,l))\n else:\n tag = style.AddObserver(event, self.processEvent) \n self.styleObserverTags.append([style,tag])", "def show_all_service(pass_list):\r\n service_list = []\r\n\r\n for pass_num in range(len(pass_list)):\r\n\r\n service_list.append(pass_list[pass_num][1])\r\n print(f\"{pass_num + 1}. {pass_list[pass_num][1]}\")\r\n\r\n return service_list", "def service_handler(service):\n entity_id = ENTITY_ID_FORMAT.format(service.service)\n script = component.entities.get(entity_id)\n if script:\n script.turn_on()", "def __handle_view_item(self, gamestate_component):", "def get_service(self):", "def view_service(options, service_name, client):\n if options.show_events:\n return display_events(client.service_events(service_name))\n\n service_content = client.service(service_name)\n return display.DisplayServices().format_details(service_content)", "def newSDDCService(**kwargs):\n # Test for interactive flag - if False, check to ensure additional arguments were give for service entry\n if kwargs['interactive'] is False and (kwargs['l4_protocol'] is None or kwargs['dest_ports'] is None):\n print(\"Error - if not using interactive mode, at least protocol and destination port(s) must be configured. Source port(s) optional, based on your application.\")\n sys.exit(1)\n elif kwargs['interactive'] is True and (kwargs['l4_protocol'] is not None or kwargs['dest_ports'] is not None or kwargs['source_ports'] is not None):\n print(\"Error - if using interactive mode, please only specify the name of the desired service. All other parameters will be obtained interactively.\")\n sys.exit(1)\n else:\n pass\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n service_id = kwargs['objectname']\n interactive = kwargs['interactive']\n\n if interactive == True:\n service_entry_list = []\n # Start a loop that will run until the user enters 'quit'.\n # Ask the user for a name.\n destination_port = \"\"\n while destination_port != 'done':\n destination_port_list = []\n source_port_list = []\n service_entry_id = input(\"Please enter the Service Entry ID:\")\n l4_protocol = input(\"Please enter the L4 Protocol:\")\n source_port = \"\"\n destination_port = \"\"\n while source_port != 'done':\n source_port = input(\"Plese enter the Source Ports or type 'done' when your list is finished:\")\n if source_port != \"done\":\n source_port_list.append(source_port)\n while (destination_port != 'next') and (destination_port != \"done\"):\n source_port = \"\"\n destination_port = input(\"Plese enter the Destination Ports, type 'next' when you want to define another service entry or 'done' if you have finished:\")\n if (destination_port != 'next') and (destination_port != \"done\"):\n destination_port_list.append(destination_port)\n service_entry = {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\" : destination_port_list,\n \"resource_type\" : \"L4PortSetServiceEntry\",\n \"id\" : service_entry_id,\n \"display_name\" : service_entry_id }\n service_entry_list.append(service_entry)\n else:\n source_port_list = kwargs['source_ports']\n destination_port_list = kwargs['dest_ports']\n l4_protocol = kwargs['l4_protocol']\n service_entry_list = [\n {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\": destination_port_list,\n \"resource_type\": \"L4PortSetServiceEntry\",\n \"display_name\": f'{service_id}_svc_entry'\n }\n ]\n json_data = {\n \"service_entries\":service_entry_list,\n \"id\" : service_id,\n \"display_name\" : service_id,\n }\n response = new_sddc_service_json(proxy,sessiontoken,service_id,json_data)\n if response == 200:\n print(f'Service {service_id} successfully updated.')\n params = {'proxy':proxy, 'sessiontoken':sessiontoken, 'objectname':service_id}\n getSDDCService(**params)\n else:\n print(\"Issues creating the service - please check your syntax and try again.\")\n sys.exit(1)" ]
[ "0.5816325", "0.56552047", "0.5570884", "0.54380053", "0.5428393", "0.5369113", "0.5291489", "0.5280148", "0.52580625", "0.5250973", "0.52037925", "0.51483935", "0.5143417", "0.51365715", "0.5135276", "0.511065", "0.50677156", "0.50518566", "0.5048181", "0.5044973", "0.5041532", "0.5026883", "0.5006413", "0.50021446", "0.49816352", "0.498029", "0.49665785", "0.49656516", "0.4953247", "0.49317616" ]
0.64644957
0
Specifies the type of threshold criteria Expected value is 'DynamicThresholdCriterion'.
def criterion_type(self) -> str: return pulumi.get(self, "criterion_type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__, *,\n criterion_type: str,\n metric_name: str,\n name: str,\n operator: str,\n threshold: float,\n time_aggregation: str,\n dimensions: Optional[Sequence['outputs.MetricDimensionResponse']] = None,\n metric_namespace: Optional[str] = None,\n skip_metric_validation: Optional[bool] = None):\n pulumi.set(__self__, \"criterion_type\", 'StaticThresholdCriterion')\n pulumi.set(__self__, \"metric_name\", metric_name)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"threshold\", threshold)\n pulumi.set(__self__, \"time_aggregation\", time_aggregation)\n if dimensions is not None:\n pulumi.set(__self__, \"dimensions\", dimensions)\n if metric_namespace is not None:\n pulumi.set(__self__, \"metric_namespace\", metric_namespace)\n if skip_metric_validation is not None:\n pulumi.set(__self__, \"skip_metric_validation\", skip_metric_validation)", "def __init__(__self__, *,\n alert_sensitivity: str,\n criterion_type: str,\n failing_periods: 'outputs.DynamicThresholdFailingPeriodsResponse',\n metric_name: str,\n name: str,\n operator: str,\n time_aggregation: str,\n dimensions: Optional[Sequence['outputs.MetricDimensionResponse']] = None,\n ignore_data_before: Optional[str] = None,\n metric_namespace: Optional[str] = None,\n skip_metric_validation: Optional[bool] = None):\n pulumi.set(__self__, \"alert_sensitivity\", alert_sensitivity)\n pulumi.set(__self__, \"criterion_type\", 'DynamicThresholdCriterion')\n pulumi.set(__self__, \"failing_periods\", failing_periods)\n pulumi.set(__self__, \"metric_name\", metric_name)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"time_aggregation\", time_aggregation)\n if dimensions is not None:\n pulumi.set(__self__, \"dimensions\", dimensions)\n if ignore_data_before is not None:\n pulumi.set(__self__, \"ignore_data_before\", ignore_data_before)\n if metric_namespace is not None:\n pulumi.set(__self__, \"metric_namespace\", metric_namespace)\n if skip_metric_validation is not None:\n pulumi.set(__self__, \"skip_metric_validation\", skip_metric_validation)", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def _estimate_threshold(self, x=None, threshold_type='numerical', **kwargs):\n\n if threshold_type == \"data\":\n return super()._estimate_threshold(x)\n \n # if threshold is in table, then use it.\n current_setting = (self.beta, self._chi2dist.kwds['df'])\n threshold = None\n res = False\n # Whether to recompute thresholds\n recompute_threshold = kwargs.pop('recompute_threshold', False)\n if not recompute_threshold and current_setting in _gaussian_cusum_thresholds.keys():\n for e in _gaussian_cusum_thresholds[current_setting]:\n if e[0] == self.arl:\n threshold = np.array([e[1]])\n res = True\n\n # if threshold not is in table, estimate it\n if threshold is None:\n len_simulation = kwargs.pop('len_simulation', None)\n if len_simulation is None:\n len_simulation = 10 * self.arl\n self.log.info(\"estimating threshold...\")\n plain_cusum = Cusum(arl=self.arl, beta=self.beta)\n plain_cusum.gamma = self.gamma\n d2_training = self._chi2dist.rvs(size=(int(len_simulation), 1))\n kwargs.pop('x', None)\n res = plain_cusum._estimate_threshold(x=d2_training, dof=self._chi2dist.kwds['df'],\n **kwargs)\n threshold = plain_cusum.threshold\n \n # set threshold\n self.threshold = threshold\n return res", "def categorical_threshold_config(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig']:\n return pulumi.get(self, \"categorical_threshold_config\")", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def type(self) -> pulumi.Input[Union[str, 'ValidationThresholdType']]:\n return pulumi.get(self, \"type\")", "def threshold_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BudgetThresholdRuleArgs']]]]:\n return pulumi.get(self, \"threshold_rules\")", "def threshold_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BudgetThresholdRuleArgs']]]]:\n return pulumi.get(self, \"threshold_rules\")", "def set_threshold(self, cat, t):\n self.con.execute(\"update ct set threshold=%f where category='%s'\" \n % (t, cat))", "def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def threshold(self) -> Union[float, TensorType]:\n return self._threshold", "def set_camera_thresholds(self,thresholds):\n self.send_packet('\\x93'+struct.pack('<'+'B'*8,*thresholds))", "def add_constraint(self, constraint_type, **kwargs):\n if constraint_type == \"custom\":\n self.constraints += tuple(kwargs.values())[0]\n elif constraint_type == \"weight\":\n bound, leverage = self.const_creator.create_constraint(constraint_type, **kwargs)\n self.bounds = bound\n self.leverage = kwargs['leverage']\n self.constraints[0] = leverage[0] # Total Leverage is always the first constraint\n else:\n self.constraints += self.const_creator.create_constraint(constraint_type, **kwargs)", "def _estimate_threshold(self, **kwargs):\n recompute_threshold = kwargs.pop('recompute_threshold', False)\n # if threshold is in table, then use it.\n current_setting = (self.beta, self.chi2dist.kwds['df'])\n threshold = None\n res = False\n if not recompute_threshold and current_setting in _gaussian_cusum_thresholds.keys():\n for e in _gaussian_cusum_thresholds[current_setting]:\n if e[0] == self.arl:\n threshold = np.array([e[1]])\n res = True\n # if threshold not is in table, estimate it\n if threshold is None:\n len_simulation = kwargs.pop('len_simulation', None)\n if len_simulation is None:\n len_simulation = 10 * self.arl\n self.log.info(\"estimating threshold...\")\n plain_cusum = Cusum(arl=self.arl, beta=self.beta)\n plain_cusum.gamma = self.gamma\n d2_training = self.chi2dist.rvs(size=(int(len_simulation), 1))\n kwargs.pop('x', None)\n res = plain_cusum._estimate_threshold(x=d2_training, dof=self.chi2dist.kwds['df'],\n **kwargs)\n threshold = plain_cusum.threshold\n self.threshold = threshold\n return res", "def get_threshold(self):\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n roc = main_regime.event_handlers\n threshcond = \"\"\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n threshcond = self.replace_operators(oc.test)\n else: threshcond=None\n return threshcond", "def get_threshold(self):\n\n if self.threshold.startswith('+'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = True\n elif self.threshold.startswith('-'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = False\n else:\n if self.threshold.isdigit():\n self._threshold = int(self.threshold)\n self._upper = True\n if not hasattr(self, '_threshold'):\n raise ValueError('Invalid threshold')", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def numerical_threshold_config(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig']:\n return pulumi.get(self, \"numerical_threshold_config\")", "def _init_is_better(self, mode: str, threshold: float, threshold_mode: str) -> None:\n if mode not in {\"min\", \"max\"}:\n raise ValueError(\"mode \" + mode + \" is unknown!\")\n if threshold_mode not in {\"rel\", \"abs\"}:\n raise ValueError(\"threshold mode \" + threshold_mode + \" is unknown!\")\n\n if mode == \"min\":\n self.mode_worse = math.inf\n else: # mode == 'max':\n self.mode_worse = -math.inf\n\n self.mode = mode\n self.threshold = threshold\n self.threshold_mode = threshold_mode", "def evaluate(self, threshold=0.5):\n pass", "def add_condition(self):\n m = self.get_current_measurement()\n result = PriorityDialog()\n if result.exec_():\n # Update Survey.priority based on the input\n key, val1, val2, weight = result.key, result.val1, result.val2, result.weight\n \n # If the condition is x == val1, determine whether val1 is str or int\n if result.type == 'value':\n val1 = get_type(val1)(val1)\n\n # Add the condition to Survey.priority\n arr = np.array([[val1, val2, weight]])\n if key not in m.priority:\n m.priority[key] = np.zeros(shape=(0, 3))\n m.priority[key] = np.append(m.priority[key], arr, axis=0)\n \n self.mgr.changed = True\n \n self.load_conditions()" ]
[ "0.64736396", "0.6007544", "0.5649433", "0.560247", "0.549276", "0.5436013", "0.53366566", "0.5287433", "0.5287433", "0.5266749", "0.5145749", "0.5077199", "0.5077199", "0.5077199", "0.5077199", "0.5077199", "0.5073182", "0.49863455", "0.49803627", "0.4960917", "0.4954998", "0.49512622", "0.49187928", "0.49147123", "0.4886583", "0.486399", "0.48394212", "0.48277092", "0.4818978", "0.48157865" ]
0.62579083
1
Name of the metric.
def metric_name(self) -> str: return pulumi.get(self, "metric_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metric_name(self) -> str:\n return self._metric_name", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> Optional[str]:\n return pulumi.get(self, \"metric_name\")", "def getMetricName(self):\n return self.getOrDefault(self.metricName)", "def getMetricName(self):\n return self.getOrDefault(self.metricName)", "def name(self) -> str:\n if self._name is None:\n return 'AutoML Metric'\n else:\n return self._name", "def metric(self) -> str:\r\n return self._metric", "def _metric_name(self, suffix):\r\n return '{}.{}'.format(self.METRIC_NAME, suffix)", "def metric_identifier(self) -> str:\n return self._metric_identifier", "def name(self):\n return self.measurement_profile.name", "def __str__(self):\n return '{self.metric}' \\\n .format(self=self)", "def name(self):\n return self.data[\"attributes\"][\"stats\"][\"name\"]", "def metric(self):\n return self.__metric", "def name(self):\n return f\"{self._name}_{self._sensor}\"", "def measured_property_name(self) -> str:\n return self._prop_name", "def name(self):\n return f\"{self._name} {self._sensor_name}\"", "def get_metric_name(metric, model_id):\n name = metric.value if isinstance(metric, Metric) else str(metric)\n return normalize_name('{}.metrics.{}'.format(model_id, name))", "def name(self):\n return f\"{self._name} {SENSOR_TYPES[self.sensor][0]}\"", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)" ]
[ "0.9080742", "0.89493376", "0.89493376", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.85680395", "0.8507669", "0.8280574", "0.8280574", "0.8276878", "0.8003284", "0.7678508", "0.7518795", "0.74013215", "0.7283626", "0.7264966", "0.71947575", "0.7178294", "0.71052885", "0.70876956", "0.708659", "0.70665133", "0.70513386", "0.7013602" ]
0.9079402
1
the criteria time aggregation types.
def time_aggregation(self) -> str: return pulumi.get(self, "time_aggregation")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction", "def get_group_types(self):\r\n pass", "def durations_per_type(self):\n pass", "def get_report_event_types():\n post_data = request.get_json()\n where_clause = \"\"\n if post_data:\n where_clause = \"WHERE 1 = 1 \"\n if post_data.get('caregiver_ids'):\n where_clause += \"AND e.caregiver_id = ANY(:caregiver_ids) \"\n if post_data.get('min_time'):\n where_clause += \"AND e.start_time > :min_time \"\n if post_data.get('max_time'):\n where_clause += \"AND e.start_time < :max_time \"\n\n sql = text(\"\"\"\n SELECT extract(year from e.start_time) as yyyy,\n to_char(e.start_time, 'Mon') as mon,\n e.caregiver_id,\n e.reimbursed,\n sum(t.amount_in_cents)/100\n FROM event e INNER JOIN type t\n ON e.type_id = t.id\n {}\n GROUP BY yyyy, mon, e.caregiver_id, e.reimbursed\n \"\"\".format(where_clause))\n try:\n result = db.session.execute(sql, post_data).fetchall()\n except Exception:\n response_object = {\n 'status': 'fail',\n 'message': 'Invalid payload.'\n }\n return make_response(jsonify(response_object)), 400\n\n report = rec_dd()\n for r in result:\n prefix = \"\" if r[3] else \"non-\"\n report[int(r[0])][r[1]][r[2]][prefix + \"reimbursed SEK\"] = r[4]\n\n response_object = {\n 'status': 'success',\n 'data': {\n 'report': report,\n 'generated_at': datetime.datetime.now()\n }\n }\n return make_response(jsonify(response_object)), 200", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def _types(cls):\n return {}", "def filter_metrics_choices(self): \n cols = pd.Series(tdr.get_catalog().tubidw.all_metric_hourly.columns)\n filter_metrics = ['no filters'] + cols[cols.str.endswith(tuple(['_count', '_sec']))].tolist()\n return filter_metrics", "def get_aggregations(self):\n return []", "def get_exposure_times(self):\n exposure_time = self.meta.exposure.exposure_time\n duration = self.meta.exposure.duration\n start_time = self.meta.exposure.start_time\n mid_time = self.meta.exposure.mid_time\n end_time = self.meta.exposure.end_time\n return (exposure_time, duration, start_time, mid_time, end_time)", "def type_fields(self, res, op_item):\n result = []\n cast_func = {}\n header = res[0]\n for heading in header:\n cast_func[heading] = DataType.str\n\n if \"field_type\" in op_item:\n for f, p in findall(FIELD_TYPE_RE, op_item[\"field_type\"]):\n cast_func[p] = self.dt.get_func(f)\n first = True\n for row in res[1:]:\n new_row = []\n for idx in range(len(header)):\n\n heading = header[idx]\n cur_value = row[idx]\n if type(cur_value) is tuple:\n cur_value = cur_value[1]\n if heading == \"timespan\" and first:\n first = False\n new_row.append((cast_func[heading](cur_value), cur_value))\n\n result.append(new_row)\n\n return [header] + result", "def class_time(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), t in self.apply_time.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += t\n return rval", "def get_types(self):\n return self.column_type", "def class_time(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, t in self.apply_time.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += t\r\n return rval", "def test_time_type_state_types(day):\n\n assert day_time_info(day.hours_0).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_1).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_2).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_3).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_4).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_5).types == {TimeType.MORNING}\n assert day_time_info(day.hours_6).types == {TimeType.MORNING}\n assert day_time_info(day.hours_7).types == {TimeType.MORNING}\n assert day_time_info(day.hours_8).types == {TimeType.MORNING}\n assert day_time_info(day.hours_9).types == {TimeType.MORNING}\n assert day_time_info(day.hours_10).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_11).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_12).types == {TimeType.NOON}\n assert day_time_info(day.hours_13).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_14).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_15).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_16).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_17).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_18).types == {TimeType.EVENING}\n assert day_time_info(day.hours_19).types == {TimeType.EVENING}\n assert day_time_info(day.hours_20).types == {TimeType.EVENING}\n assert day_time_info(day.hours_21).types == {TimeType.EVENING}\n assert day_time_info(day.hours_22).types == {TimeType.EVENING}\n assert day_time_info(day.hours_23).types == {TimeType.NIGHT}", "def get_measurement_types():\n\n all_measures = ['temperature', 'humidity', 'pressure']\n\n ####################\n return all_measures\n ####################", "def get_query_and_evaluation_analysis_types(self, parameters):\n queries = parameters[\"clustering\"][\"evaluation\"][\"query_types\"]\n queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters))\n return list(set(queries))", "def __getTimeRestriction(time_day):\n time_restriction = PicoPlaca.__timeRestriction()\n time_ini = time_restriction[time_day][0]\n time_fin = time_restriction[time_day][1]\n time_ini = datetime.strptime(time_ini, \"%H:%M\").time()\n time_fin = datetime.strptime(time_fin, \"%H:%M\").time()\n return time_ini, time_fin", "def times(self):\n ret = {}\n for tag in self.TIMETAGLIST:\n if self.has_tag(tag):\n try:\n ret[tag] = safeInt(self.tag(tag))\n except TypeError:\n pass\n return ret", "def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)", "def get_evaluation_analysis_types(self, parameters):\n eval_types =[]\n for evaluation_criteria_id in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"]:\n# for subcriteria in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id]:\n# eval_types.append(subcriteria)\n eval_types.extend(parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id].keys())\n return list(set(eval_types))", "def typeParameters():\n\t\td = Algorithm.typeParameters()\n\t\td.update({\n\t\t\t'epsilon': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'alpha': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'r': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'Qmin': lambda x: isinstance(x, (float, int)),\n\t\t\t'Qmax': lambda x: isinstance(x, (float, int))\n\t\t})\n\t\treturn d", "def data_types(self):", "def data_types(self):\n return self['data_types']", "def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")", "def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def etypes(self): # -> list[str]:\n ...", "def get_product_time_bounds(self, product: str):\n\n # Get the offsets from dataset doc\n product = self.types.get_by_name(product)\n dataset_section = product.metadata_type.definition['dataset']\n min_offset = dataset_section['search_fields']['time']['min_offset']\n max_offset = dataset_section['search_fields']['time']['max_offset']\n\n time_min = DateDocField('aquisition_time_min',\n 'Min of time when dataset was acquired',\n DATASET.c.metadata,\n False, # is it indexed\n offset=min_offset,\n selection='least')\n\n time_max = DateDocField('aquisition_time_max',\n 'Max of time when dataset was acquired',\n DATASET.c.metadata,\n False, # is it indexed\n offset=max_offset,\n selection='greatest')\n\n with self._db_connection() as connection:\n result = connection.execute(\n select(\n [func.min(time_min.alchemy_expression), func.max(time_max.alchemy_expression)]\n ).where(\n DATASET.c.dataset_type_ref == product.id\n )\n ).first()\n\n return result", "def CRITs_mappings(self):\n self.crits_type_mapping = {}\n self.crits_type_mapping[\"DOMAIN\"] = \"URI - Domain Name\"\n self.crits_type_mapping[\"URI - Domain Name\"] = \"DOMAIN\"\n self.crits_type_mapping[\"IP\"] = \"Address - ipv4-addr\"\n self.crits_type_mapping[\"Address - ipv4-addr\"] = \"IP\"", "def _init_prepare_types(self):\n # len(db)-1 wouldn't work here because there could be missing\n # index due to generic filtering\n self.types = {\n key: fit_integer_type(np.max(db.index.values), is_signed=False)\n for key, db in iteritems(self.by_dbs)}" ]
[ "0.57325155", "0.56621933", "0.5633908", "0.5444428", "0.5374539", "0.5365551", "0.5315507", "0.53074354", "0.5227702", "0.521846", "0.5201579", "0.5184422", "0.5163588", "0.51626277", "0.5159124", "0.5154171", "0.5151809", "0.51419383", "0.51266676", "0.5125911", "0.5125398", "0.51119715", "0.5108495", "0.50918245", "0.50918245", "0.50915587", "0.5075939", "0.50699586", "0.50634664", "0.5060226" ]
0.5837583
0
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
def skip_metric_validation(self) -> Optional[bool]: return pulumi.get(self, "skip_metric_validation")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()", "def test_error_on_invalid_metric(self):\n self.ocp_data.get(\"rates\", [])[0][\"metric\"][\"name\"] = \"invalid_metric\"\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def submit_errors_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.errors\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def create_metric(self) -> EvalMetric:\n pass", "def test_metric_from_name_nonexistent(self):\n with pytest.raises(ValueError):\n _metric_from_name(\"nonexistent_metric\")", "def test_value_missing_metric(self, missing_mock):\n missing_mock.return_value = True\n self._MetricSourceAgeMetricTest__metric_source.datetime = MagicMock()\n\n result = self.__metric.value()\n\n self.assertTrue(missing_mock.called)\n self.assertTrue(self._MetricSourceAgeMetricTest__metric_source.datetime.assert_not_called)\n self.assertEqual(-1, result)", "def __init__(self):\n super().__init__()\n self.metric = 'GCOERR'", "def __init__(self):\n super().__init__()\n self.metric = 'FALLOUT'", "def check_metric(self, metric):\r\n\r\n if metric in metric_functions or metric == '':\r\n return metric\r\n else:\r\n raise InvalidNeuralNetwork()", "def test_negative(self):\n self.assertFalse(validate_measure_input('-1', self.measures))", "def test_fail_on_init(self):\n\n with self.assertRaises(IcypawException):\n class Node:\n my_metric = Metric(Int64, read_only=True)\n\n @my_metric.net_hook\n def my_metric(self, value):\n pass", "def validation_event(self, message):", "def test_prometheus_rule_failures():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert constants.ALERT_PROMETHEUSRULEFAILURES not in [\n alert[\"labels\"][\"alertname\"] for alert in alerts\n ]", "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def test_error(self):\n metric = self.metric()\n measurement = self.measurement(metric, sources=[self.source(metric, parse_error=\"error\")])\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def validate(metric_class):\n if not hasattr(metric_class, 'label'):\n raise ImproperlyConfigured(\"No 'label' attribute found for metric %s.\" % metric_class.__name__)\n \n if not hasattr(metric_class, 'widget'):\n raise ImproperlyConfigured(\"No 'widget' attribute found for metric %s.\" % metric_class.__name__)", "def test_create_derived_metric(self):\n pass", "def aggregator_unavailable_apiservice(self, metric, scraper_config):\n for sample in metric.samples:\n sample[self.SAMPLE_LABELS][\"apiservice_name\"] = sample[self.SAMPLE_LABELS].pop(\"name\")\n self.submit_metric('.aggregator_unavailable_apiservice', metric, scraper_config, monotonic_count=False)", "def missing_test_message(msg):\r\n action = config.compute_test_value\r\n if action == 'raise':\r\n raise AttributeError(msg)\r\n elif action == 'warn':\r\n warnings.warn(msg, stacklevel=2)\r\n else:\r\n assert action in ['ignore', 'off']", "def test_copy_without_name(self):\n self.metric[\"name\"] = \"\"\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Security warnings (copy)\", metric_copy[\"name\"])", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def rule_invalid(self, idx: int, line: Statement) -> None:\n #TODO: this is a print while not all rules are handled yet\n print(NotARule(f'No such rule: {line.rule.value!r}'))", "def notEnabledDummy(self, ev):\n pass", "def metric_loss(self, output, sample, *args, **kwargs):\n loss = self.loss(output, sample, *args, **kwargs)\n return loss.item()", "def _get_metrics(self, *args, **kwargs):\n logger.warning(\"Could not get metric. No function registered.\")", "def common_alarm_func_add(asg_name, metricname, namespace, arn_scalein, arn_scaleout, alarmname, desc, Unit):\n d1=desc+ \" High\"\n a1=alarmname + '-high'\n try:\n cloudwatch.put_metric_alarm(AlarmName=a1, AlarmDescription=d1,\n AlarmActions=[arn_scaleout],\n ActionsEnabled=True, MetricName=metricname, EvaluationPeriods=1,\n Threshold=float(ScaleUpThreshold), Statistic=\"Average\", Namespace=namespace,\n ComparisonOperator=\"GreaterThanThreshold\", Period=ScalingPeriod, Unit=Unit)\n except Exception as e:\n logger.error('Failed to add High Alarm: ' + desc + ' for ASG: ' + asg_name)\n logger.error(\"[Alarm High Add]: {}\".format(e))\n return False\n\n a1=alarmname + '-low'\n d1=desc+ \" Low\"\n try:\n cloudwatch.put_metric_alarm(AlarmName=a1, AlarmDescription=d1,\n AlarmActions=[arn_scalein],\n ActionsEnabled=True, MetricName=metricname, EvaluationPeriods=1,\n Threshold=float(ScaleDownThreshold), Statistic=\"Average\", Namespace=namespace,\n ComparisonOperator=\"LessThanThreshold\", Period=ScalingPeriod,\n Unit=Unit)\n except Exception as e:\n logger.error('Failed to add Low Alarm: ' + desc + ' for ASG: ' + asg_name)\n logger.error(\"[Alarm Low Add]: {}\".format(e))\n return False\n\n return True", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def rule(func: Callable) -> Callable:\n\n def inner1(operation: dict, **kwargs) -> dict:\n if len(func(operation, **kwargs)):\n if \"violations\" not in operation.keys():\n operation[\"violations\"] = []\n operation[\"violations\"].append(func(operation, **kwargs))\n\n return operation\n\n return inner1" ]
[ "0.53630054", "0.53394985", "0.5327985", "0.53265405", "0.5294704", "0.52564716", "0.5245898", "0.52369255", "0.51835185", "0.5125754", "0.51243144", "0.5101995", "0.5097013", "0.50950426", "0.5072278", "0.50347114", "0.50080895", "0.49950373", "0.49888673", "0.49649054", "0.4950176", "0.49480435", "0.49422026", "0.49403253", "0.49353", "0.49296924", "0.4921963", "0.49034834", "0.49034834", "0.48943445" ]
0.5530519
0
the id of the action group to use.
def action_group_id(self) -> Optional[str]: return pulumi.get(self, "action_group_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id(self):\n return self._group", "def group_id(self):\n return self._id", "def getId(self):\n return _libsbml.Group_getId(self)", "def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")", "def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")", "def group_id(self):\n return self._group_id", "def group_id(self):\n return self._group_id", "def group_id(self):\n return self._group_id", "def identifier(self):\n return self._group.identifier", "def group_id(self) -> int:\n return self._group_id", "def GroupId(self):\n\t\treturn self._get_attribute('groupId')", "def scenario_group_id(self) -> str:\n return self.__scenario_group_id", "def test_user_group_controller_get_id(self):\n pass", "def setId(self, *args):\n return _libsbml.Group_setId(self, *args)", "def group_id(self):\n # type: () -> string_types\n return self._group_id", "def target_group_identifier(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")", "def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")", "def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")", "def group_identifier(self):\n return self._group_identifier", "def group_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"group_id\")", "def actionGroup(self, QDesignerFormWindowManagerInterface_ActionGroup): # real signature unknown; restored from __doc__\n pass", "def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()", "def action_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"action_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")" ]
[ "0.77482456", "0.7099057", "0.70216775", "0.69734263", "0.69734263", "0.6817101", "0.6817101", "0.6817101", "0.6800052", "0.6768275", "0.6752699", "0.6614591", "0.66105044", "0.6609539", "0.66095287", "0.65748924", "0.6553389", "0.6553389", "0.6553389", "0.6470345", "0.6448293", "0.63695854", "0.6348024", "0.6335962", "0.62983483", "0.6286914", "0.6286914", "0.6286914", "0.6286914", "0.6286914" ]
0.84645164
0
Specifies the metric alert criteria for multiple resource that has multiple metric criteria.
def __init__(__self__, *, odata_type: str, all_of: Optional[Sequence[Any]] = None): pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria') if all_of is not None: pulumi.set(__self__, "all_of", all_of)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__, *,\n odata_type: str,\n all_of: Optional[Sequence['outputs.MetricCriteriaResponse']] = None):\n pulumi.set(__self__, \"odata_type\", 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria')\n if all_of is not None:\n pulumi.set(__self__, \"all_of\", all_of)", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['AlertMutingRuleConditionConditionArgs']]]:\n return pulumi.get(self, \"conditions\")", "def metric_tests(self, metric_tests: Dict[str, FAIRResultEvaluationCriterium]):\n\n self._metric_tests = metric_tests", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def __init__(__self__, *,\n alerts_for_all_job_failures: Optional[str] = None):\n if alerts_for_all_job_failures is not None:\n pulumi.set(__self__, \"alerts_for_all_job_failures\", alerts_for_all_job_failures)", "def all_of(self) -> Optional[Sequence['outputs.MetricCriteriaResponse']]:\n return pulumi.get(self, \"all_of\")", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()", "def _create_conservation(self, m, resources):\n for res, resource in enumerate(resources):\n rule = partial(self._conservation_rule, resource) #lambda m, c, t: abs(np.sum(m.Production[c, res, t])) <=1e-14 # TODO zero tolerance value?\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{r}_conservation'.format(r=resource), constr)", "def __init__(__self__, *,\n alerts_for_all_job_failures: Optional[pulumi.Input[Union[str, 'AlertsState']]] = None):\n if alerts_for_all_job_failures is not None:\n pulumi.set(__self__, \"alerts_for_all_job_failures\", alerts_for_all_job_failures)", "def check_criteria(self, criteria, case_control=False):\n\n if case_control:\n pts_meeting_criteria = {key : [] for key in ['case', 'control']}\n else:\n pts_meeting_criteria = []\n\n if len(criteria) == 0: # mostly for exclusion criteria.\n return np.array([])\n\n for name, criterion in criteria.items():\n print(name, criterion)\n feature_inds = self.find_feature(name)\n pts_meeting_criterion = self.search_by_chunk(self.dataset, feature_inds, criterion, case_control)\n \n if case_control:\n pts_meeting_criteria['case'].append(pts_meeting_criterion['case'])\n pts_meeting_criteria['control'].append(pts_meeting_criterion['control'])\n else:\n pts_meeting_criteria.append(pts_meeting_criterion)\n\n if case_control:\n return reduce(np.intersect1d, pts_meeting_criteria['case']), \\\n reduce(np.intersect1d, pts_meeting_criteria['control'])\n else:\n return reduce(np.intersect1d, pts_meeting_criteria)", "def optimize_metrics(self,\n metrics: list = None,\n verbose: bool = True):\n\n if metrics is None:\n metrics = self._supported_metrics\n else:\n metrics = [metric.lower() for metric in metrics]\n assert all(metric in self._supported_metrics for metric in metrics)\n for i in metrics:\n super(ThresholdOptimizer, self).__getattribute__(f'get_best_{i}_metrics')(verbose=verbose)", "def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c", "def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests", "def applies_to(self, target_criteria):\n if not self.applies_to_available:\n raise NotImplementedError(\n \"applies_to() method not implemented for \" + self.__class__.__name__\n )\n if not isinstance(target_criteria, list):\n self.target_criteria = [target_criteria]\n else:\n self.target_criteria = target_criteria", "def solve_filter_metrics(self):\n\n if 'metrics' in self.filter_request:\n filter_metrics = self.filter_request['metrics']\n metrics_request = {}\n\n temp = filter_metrics.split(',')\n for i in temp:\n metrics_request[i.strip()] = None\n\n for i in range(len(self.list_pack)):\n self.apply_filter_metrics(i, metrics_request.copy())", "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "def defineMetricSpecs(self):\n metricSpecs = (\n MetricSpec(field=self.fieldToPredict, metric='multiStep',\n inferenceElement='multiStepBestPredictions',\n params={'errorMetric': 'aae', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='trivial',\n inferenceElement='prediction',\n params={'errorMetric': 'aae', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='multiStep',\n inferenceElement='multiStepBestPredictions',\n params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='trivial',\n inferenceElement='prediction',\n params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': self.steps})\n )\n return metricSpecs", "def check_min_and_max_alert_widgets(attribute_range_entry: db.Model):\n\n if attribute_range_entry.maximum:\n max_alerts = AlertWidgetModel.get_max_alerts(attribute_range_entry)\n for alert in max_alerts:\n user_details = Users.find_by_id(alert[\"user_id\"])\n if user_details:\n attr = Attributes.get_by_id(attribute_range_entry.attribute_id)\n if attr:\n if not send_alert_email(\n user_details.email, user_details.fullname,\n attr.name, attribute_range_entry.maximum,\n attribute_range_entry.maximum_recorded_date,\n attribute_range_entry.maximum_sensor_id,\n alert[\"max_threshold\"], \"exceeded\"):\n logger.error(\"Server error prevented the \"\n \"sending of a max alert email \"\n \"to {} regarding attribute with \"\n \"id {}\".format(\n user_details.email,\n attribute_range_entry.attribute_id))\n else:\n logger.error(\n \"Could not send max alert email to \"\n \"user with id {} as the attribute with \"\n \"id {} does not exist \".format(\n alert[\"user_id\"],\n attribute_range_entry.attribute_id))\n else:\n logger.error(\"Could not send max alert email to \"\n \"user with id {} as the user does \"\n \"not exist \".format(alert[\"user_id\"]))\n\n if attribute_range_entry.minimum:\n min_alerts = AlertWidgetModel.get_min_alerts(attribute_range_entry)\n for alert in min_alerts:\n user_details = Users.find_by_id(alert[\"user_id\"])\n if user_details:\n attr = Attributes.get_by_id(attribute_range_entry.attribute_id)\n if attr:\n if not send_alert_email(\n user_details.email, user_details.fullname,\n attr.name, attribute_range_entry.minimum,\n attribute_range_entry.minimum_recorded_date,\n attribute_range_entry.minimum_sensor_id,\n alert[\"min_threshold\"], \"fell short of\"):\n logger.error(\"Server error prevented the sending of \"\n \"a min alert email to {} regarding \"\n \"attribute with id {}\".format(\n user_details.email,\n attribute_range_entry.attribute_id))\n else:\n logger.error(\n \"Could not send min alert email to \"\n \"user with id {} as the attribute with \"\n \"id {} does not exist \".format(\n alert[\"user_id\"],\n attribute_range_entry.attribute_id))\n else:\n logger.error(\"Could not send min alert email to \"\n \"user with id {} as the user does \"\n \"not exist \".format(alert[\"user_id\"]))", "def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def set_metrics(metric_dict, cd_loss, cd_corrects, cd_report):\n metric_dict['cd_losses'].append(cd_loss.item())\n metric_dict['cd_corrects'].append(cd_corrects.item())\n metric_dict['cd_precisions'].append(cd_report[0])\n metric_dict['cd_recalls'].append(cd_report[1])\n metric_dict['cd_f1scores'].append(cd_report[2])\n\n return metric_dict", "def add_condition(self):\n m = self.get_current_measurement()\n result = PriorityDialog()\n if result.exec_():\n # Update Survey.priority based on the input\n key, val1, val2, weight = result.key, result.val1, result.val2, result.weight\n \n # If the condition is x == val1, determine whether val1 is str or int\n if result.type == 'value':\n val1 = get_type(val1)(val1)\n\n # Add the condition to Survey.priority\n arr = np.array([[val1, val2, weight]])\n if key not in m.priority:\n m.priority[key] = np.zeros(shape=(0, 3))\n m.priority[key] = np.append(m.priority[key], arr, axis=0)\n \n self.mgr.changed = True\n \n self.load_conditions()", "def _criteria(self, record):\n\n process = True\n switch = False\n if record.aaf[0] > 0.5:\n switch = True\n return process, switch", "def multi_metric_scorer():\n\n scoring = {'AUC': 'roc_auc',\n 'Accuracy': 'accuracy',\n\n 'Balanced_accuracy': make_scorer(\n recall_score,\n pos_label=None,\n average='macro',\n sample_weight=None\n ),\n 'Sensitivity': make_scorer(\n recall_score,\n pos_label=1,\n average='binary',\n sample_weight=None\n ),\n 'Specificity': make_scorer(\n recall_score,\n pos_label=0,\n average='binary',\n sample_weight=None\n ),\n 'F1': make_scorer(\n f1_score, average='weighted'\n ),\n 'PPV': make_scorer(\n precision_score,\n pos_label=1,\n average='binary'\n ),\n 'NPV': make_scorer(\n precision_score, \n pos_label=0, \n average='binary'\n ),\n 'Brier_score': 'brier_score_loss'}\n\n return scoring", "def rules(self):\n return self._alert_rules_client", "def custom_severities(self):\n # TODO(ww): There's probably a better place to put this.\n url = \"/threathunter/watchlistmgr/v3/orgs/{}/reports/severity\".format(\n self.credentials.org_key\n )\n resp = self.get_object(url)\n items = resp.get(\"results\", [])\n return [self.create(ReportSeverity, item) for item in items]", "def constraint_inputs(self):\n features = {}\n for metric in self.metrics.all():\n if metric.type == GoalMetric.METRIC_TYPE_RISK_SCORE:\n risk = metric.configured_val\n else:\n features[metric.feature] = (metric.comparison, metric.feature, metric.configured_val)\n return risk, features", "def evaluate_across_productivity(self, metrics, spec_3):\n M = len(metrics)\n P = len(spec_3)\n data = np.zeros([M, P])\n for i in range(P):\n self.load_spec_3(spec_3[i])\n self.reactor._summary()\n data[:, i] = [j() for j in metrics]\n return data", "def write_acceptance_criteria_to_boxes(self):\n\n self.ignore_parameters, value = {}, ''\n for crit_short_name in self.preferences['show_statistics_on_gui']:\n crit = \"specimen_\" + crit_short_name\n if self.acceptance_criteria[crit]['value'] == -999:\n self.threshold_windows[crit_short_name].SetValue(\"\")\n self.threshold_windows[crit_short_name].SetBackgroundColour(\n wx.Colour(128, 128, 128))\n self.ignore_parameters[crit] = True\n continue\n elif crit == \"specimen_scat\":\n if self.acceptance_criteria[crit]['value'] in ['g', 1, '1', True, \"True\", \"t\"]:\n #value = \"True\"\n value = \"t\"\n #self.scat_threshold_window.SetBackgroundColour(wx.SetBackgroundColour(128, 128, 128))\n else:\n value = \"f\"\n #value = \"False\"\n self.threshold_windows['scat'].SetBackgroundColour(\n (128, 128, 128))\n #self.scat_threshold_window.SetBackgroundColour((128, 128, 128))\n\n elif type(self.acceptance_criteria[crit]['value']) == int:\n value = \"%i\" % self.acceptance_criteria[crit]['value']\n elif type(self.acceptance_criteria[crit]['value']) == float:\n if self.acceptance_criteria[crit]['decimal_points'] == -999:\n value = \"%.3e\" % self.acceptance_criteria[crit]['value']\n else:\n value = \"{:.{}f}\".format(self.acceptance_criteria[crit]['value'],\n self.acceptance_criteria[crit]['decimal_points'])\n else:\n continue\n\n self.threshold_windows[crit_short_name].SetValue(value)\n self.threshold_windows[crit_short_name].SetBackgroundColour(\n wx.WHITE)" ]
[ "0.6026406", "0.51808137", "0.50900877", "0.48364025", "0.48048016", "0.47682977", "0.47614202", "0.47585002", "0.47174513", "0.4685663", "0.46267575", "0.45938715", "0.45914057", "0.45757943", "0.4562624", "0.45517758", "0.4543452", "0.44907033", "0.4478374", "0.44422543", "0.44344044", "0.44158718", "0.4414755", "0.44135737", "0.4394033", "0.43763483", "0.43610278", "0.43557185", "0.43504673", "0.4348717" ]
0.6174168
0
Specifies the metric alert criteria for a single resource that has multiple metric criteria.
def __init__(__self__, *, odata_type: str, all_of: Optional[Sequence['outputs.MetricCriteriaResponse']] = None): pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria') if all_of is not None: pulumi.set(__self__, "all_of", all_of)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__, *,\n odata_type: str,\n all_of: Optional[Sequence[Any]] = None):\n pulumi.set(__self__, \"odata_type\", 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria')\n if all_of is not None:\n pulumi.set(__self__, \"all_of\", all_of)", "def metric_tests(self, metric_tests: Dict[str, FAIRResultEvaluationCriterium]):\n\n self._metric_tests = metric_tests", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['AlertMutingRuleConditionConditionArgs']]]:\n return pulumi.get(self, \"conditions\")", "def add_condition(self):\n m = self.get_current_measurement()\n result = PriorityDialog()\n if result.exec_():\n # Update Survey.priority based on the input\n key, val1, val2, weight = result.key, result.val1, result.val2, result.weight\n \n # If the condition is x == val1, determine whether val1 is str or int\n if result.type == 'value':\n val1 = get_type(val1)(val1)\n\n # Add the condition to Survey.priority\n arr = np.array([[val1, val2, weight]])\n if key not in m.priority:\n m.priority[key] = np.zeros(shape=(0, 3))\n m.priority[key] = np.append(m.priority[key], arr, axis=0)\n \n self.mgr.changed = True\n \n self.load_conditions()", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def optimize_metrics(self,\n metrics: list = None,\n verbose: bool = True):\n\n if metrics is None:\n metrics = self._supported_metrics\n else:\n metrics = [metric.lower() for metric in metrics]\n assert all(metric in self._supported_metrics for metric in metrics)\n for i in metrics:\n super(ThresholdOptimizer, self).__getattribute__(f'get_best_{i}_metrics')(verbose=verbose)", "def __init__(__self__, *,\n alerts_for_all_job_failures: Optional[str] = None):\n if alerts_for_all_job_failures is not None:\n pulumi.set(__self__, \"alerts_for_all_job_failures\", alerts_for_all_job_failures)", "def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests", "def applies_to(self, target_criteria):\n if not self.applies_to_available:\n raise NotImplementedError(\n \"applies_to() method not implemented for \" + self.__class__.__name__\n )\n if not isinstance(target_criteria, list):\n self.target_criteria = [target_criteria]\n else:\n self.target_criteria = target_criteria", "def _create_conservation(self, m, resources):\n for res, resource in enumerate(resources):\n rule = partial(self._conservation_rule, resource) #lambda m, c, t: abs(np.sum(m.Production[c, res, t])) <=1e-14 # TODO zero tolerance value?\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{r}_conservation'.format(r=resource), constr)", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def all_of(self) -> Optional[Sequence['outputs.MetricCriteriaResponse']]:\n return pulumi.get(self, \"all_of\")", "def __init__(__self__, *,\n alerts_for_all_job_failures: Optional[pulumi.Input[Union[str, 'AlertsState']]] = None):\n if alerts_for_all_job_failures is not None:\n pulumi.set(__self__, \"alerts_for_all_job_failures\", alerts_for_all_job_failures)", "def winner_criteria(self, winner_criteria):\n\n self._winner_criteria = winner_criteria", "def __init__(__self__, *,\n alert_sensitivity: str,\n criterion_type: str,\n failing_periods: 'outputs.DynamicThresholdFailingPeriodsResponse',\n metric_name: str,\n name: str,\n operator: str,\n time_aggregation: str,\n dimensions: Optional[Sequence['outputs.MetricDimensionResponse']] = None,\n ignore_data_before: Optional[str] = None,\n metric_namespace: Optional[str] = None,\n skip_metric_validation: Optional[bool] = None):\n pulumi.set(__self__, \"alert_sensitivity\", alert_sensitivity)\n pulumi.set(__self__, \"criterion_type\", 'DynamicThresholdCriterion')\n pulumi.set(__self__, \"failing_periods\", failing_periods)\n pulumi.set(__self__, \"metric_name\", metric_name)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"time_aggregation\", time_aggregation)\n if dimensions is not None:\n pulumi.set(__self__, \"dimensions\", dimensions)\n if ignore_data_before is not None:\n pulumi.set(__self__, \"ignore_data_before\", ignore_data_before)\n if metric_namespace is not None:\n pulumi.set(__self__, \"metric_namespace\", metric_namespace)\n if skip_metric_validation is not None:\n pulumi.set(__self__, \"skip_metric_validation\", skip_metric_validation)", "def set_metrics(metric_dict, cd_loss, cd_corrects, cd_report):\n metric_dict['cd_losses'].append(cd_loss.item())\n metric_dict['cd_corrects'].append(cd_corrects.item())\n metric_dict['cd_precisions'].append(cd_report[0])\n metric_dict['cd_recalls'].append(cd_report[1])\n metric_dict['cd_f1scores'].append(cd_report[2])\n\n return metric_dict", "def metric_strategies(self, metric_strategies):\n\n self._metric_strategies = metric_strategies", "def write_acceptance_criteria_to_boxes(self):\n\n self.ignore_parameters, value = {}, ''\n for crit_short_name in self.preferences['show_statistics_on_gui']:\n crit = \"specimen_\" + crit_short_name\n if self.acceptance_criteria[crit]['value'] == -999:\n self.threshold_windows[crit_short_name].SetValue(\"\")\n self.threshold_windows[crit_short_name].SetBackgroundColour(\n wx.Colour(128, 128, 128))\n self.ignore_parameters[crit] = True\n continue\n elif crit == \"specimen_scat\":\n if self.acceptance_criteria[crit]['value'] in ['g', 1, '1', True, \"True\", \"t\"]:\n #value = \"True\"\n value = \"t\"\n #self.scat_threshold_window.SetBackgroundColour(wx.SetBackgroundColour(128, 128, 128))\n else:\n value = \"f\"\n #value = \"False\"\n self.threshold_windows['scat'].SetBackgroundColour(\n (128, 128, 128))\n #self.scat_threshold_window.SetBackgroundColour((128, 128, 128))\n\n elif type(self.acceptance_criteria[crit]['value']) == int:\n value = \"%i\" % self.acceptance_criteria[crit]['value']\n elif type(self.acceptance_criteria[crit]['value']) == float:\n if self.acceptance_criteria[crit]['decimal_points'] == -999:\n value = \"%.3e\" % self.acceptance_criteria[crit]['value']\n else:\n value = \"{:.{}f}\".format(self.acceptance_criteria[crit]['value'],\n self.acceptance_criteria[crit]['decimal_points'])\n else:\n continue\n\n self.threshold_windows[crit_short_name].SetValue(value)\n self.threshold_windows[crit_short_name].SetBackgroundColour(\n wx.WHITE)", "def __str_healthrule_critical_conditions(self,healthrule):\n def str_custom_condition_expression(condition,expression):\n # In custom conditions the expression is given, only need to replace shortNames by metric name\n if 'metricExpression' in condition:\n return expression.replace( condition['shortName'],\n condition['metricExpression']['metricDefinition']['logicalMetricName'].lower() + \" \" + \\\n condition['operator'].lower() + \" \" + \\\n str(condition['value']) )\n else:\n return str_custom_condition_expression(condition['condition1'],\n str_custom_condition_expression(condition['condition2'],expression) )\n def str_condition_expression(condition,operator):\n # In the rest of conditions, no expression is given, need to create it from scratch\n if 'metricExpression' in condition and 'metricDefinition' in condition['metricExpression']:\n metricExp = condition['metricExpression']['metricDefinition']['logicalMetricName'].lower() + \" \" + \\\n condition['operator'].lower() + \" \" + str(condition['value'])\n return metricExp\n elif 'metricExpression' in condition and condition['conditionExpression'] is not None:\n return condition['conditionExpression']\n else:\n return str_condition_expression(condition['condition1'],operator) + \" \" + operator + \" \" + \\\n str_condition_expression(condition['condition2'],operator)\n\n if 'evalCriterias' not in healthrule:\n if 'DEBUG' in locals(): sys.stderr.write(\"Unrecognized evaluation criteria for healthrule \"+healthrule['name'])\n elif healthrule['evalCriterias']['criticalCriteria'] is not None: ## Legacy XML format\n if healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['evalDetailType'] == \"METRIC_EXPRESSION\":\n return healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['metricExpression']\n elif healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']['evalDetailType'] == \"SINGLE_METRIC\":\n evalDetail = healthrule['evalCriterias']['criticalCriteria']['conditions'][0]['evalDetail']\n if evalDetail['metricEvalDetail']['metricEvalDetailType']==\"BASELINE_TYPE\":\n return evalDetail['metricPath']+\" is \"+ \\\n evalDetail['metricEvalDetail']['baselineCondition']+\" \"+ \\\n evalDetail['metricEvalDetail']['baselineName']+\" by \"+ \\\n str(evalDetail['metricEvalDetail']['compareValue'])+\" \"+ \\\n evalDetail['metricEvalDetail']['baselineUnit']\n elif evalDetail['metricEvalDetail']['metricEvalDetailType']==\"SPECIFIC_TYPE\":\n return evalDetail['metricPath']+\" is \"+ \\\n evalDetail['metricEvalDetail']['baselineCondition']+\" \"+ \\\n str(evalDetail['metricEvalDetail']['compareValue'])\n return \"\"", "def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c", "def criteria_met(self, criteria_met):\n\n self._criteria_met = criteria_met", "def set_metrics(self):", "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "def test_verify_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n assert pm.verify(\n metric=\"threshold_brier_score\",\n comparison=\"m2c\",\n dim=[\"init\", \"member\"],\n threshold=0.5,\n )", "def __str_healthrule_critical_conditions(self,healthrule):\n if 'critical' in healthrule and healthrule['critical'] is not None:\n condition = healthrule['critical']['condition']\n if healthrule['critical']['conditionAggregationType'] == \"CUSTOM\":\n conditionExpression = self.__format_condition_expression(healthrule['critical']['conditionExpression'])\n return self.__str_condition_expression(condition=condition,expression=conditionExpression)\n else: # conditionAggregationType is \"ANY\", \"ALL\" or null\n operator = \"OR\" if healthrule['critical']['conditionAggregationType'] == \"ANY\" else \"AND\"\n return self.__str_condition_expression(condition=condition,aggregationType=operator)\n elif 'critical' in healthrule: # and healthrule['critical'] is None:\n return \"\"\n elif 'evalCriterias' in healthrule:\n sys.stderr.write(\"Format not supported.\")\n return \"\"\n else:\n sys.stderr.write(\"Unrecognized evaluation criteria for healthrule \"+healthrule['name'])\n return \"\"", "def _criteria(self, record):\n\n process = True\n switch = False\n if record.aaf[0] > 0.5:\n switch = True\n return process, switch", "def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect", "def set_criteria(self, criteria):\n\n\t\ttry:\n\t\t\tfrom zcrmsdk.src.com.zoho.crm.api.custom_views.criteria import Criteria\n\t\texcept Exception:\n\t\t\tfrom .criteria import Criteria\n\n\t\tif criteria is not None and not isinstance(criteria, Criteria):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: criteria EXPECTED TYPE: Criteria', None, None)\n\t\t\n\t\tself.__criteria = criteria\n\t\tself.__key_modified['criteria'] = 1", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()" ]
[ "0.58963656", "0.5050068", "0.50446075", "0.5041433", "0.4687035", "0.46119368", "0.46008843", "0.45889822", "0.4584769", "0.45769805", "0.45517832", "0.45421895", "0.453245", "0.45204303", "0.45196065", "0.4497745", "0.44730958", "0.44707868", "0.44672188", "0.4449448", "0.44333002", "0.44293755", "0.44205827", "0.44034123", "0.43910486", "0.4379181", "0.43784532", "0.43780133", "0.43729976", "0.43697223" ]
0.58667475
1
The list of metric criteria for this 'all of' operation.
def all_of(self) -> Optional[Sequence['outputs.MetricCriteriaResponse']]: return pulumi.get(self, "all_of")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def get_criteria(self):\n\n\t\treturn self.__criteria", "def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests", "def filter_metrics_choices(self): \n cols = pd.Series(tdr.get_catalog().tubidw.all_metric_hourly.columns)\n filter_metrics = ['no filters'] + cols[cols.str.endswith(tuple(['_count', '_sec']))].tolist()\n return filter_metrics", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def all_of(self) -> Optional[Sequence[Any]]:\n return pulumi.get(self, \"all_of\")", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def filter_criteria(self):\n return self.filter_nodes('//Validation/Criteria')", "def AllFoundCriteria(ga_engine):\n return AllFound.ALL_FOUND", "def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)", "def get_all(self):\n return self._name_to_operator.values()", "def list_metrics(self):\n pass", "def list_definition(self):\n return self._get(path='metrics')", "def all(self):\n return self._summarize(lambda c: c.all)", "def filter_criteria(self) -> pulumi.Output[Optional['outputs.EventSourceMappingFilterCriteria']]:\n return pulumi.get(self, \"filter_criteria\")", "def visitCriteria(self, ctx: ApiQLParser.CriteriaContext):\n return lmap(lambda c: c.accept(self), ctx.getChildren(self.filter_ignored))", "def items(self):\n return self._rules_by_lhs.items()", "def all(cls):\n return cls.where()", "def offres(self) -> list[int]:\n filters: list[dict[str, int]] = [{}, {\"valide\": True}, {\"paye\": True}]\n return [self.offre_set.filter(**f).count() for f in filters]", "def get_conditions(self):\n if not hasattr(self, 'conditions'):\n raise NotImplementedError('\"conditions\" attribute must be overridden')\n\n for c in self.conditions:\n value = getattr(self, c)\n if callable(value):\n self._condits[c] = value()\n else:\n if value:\n #if property is not empty\n self._condits[c] = value\n\n return self._condits", "def conditions(self):\n return ConditionCollection(client=self)", "def rule_conditions(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRuleConditionArgs']]]:\n return pulumi.get(self, \"rule_conditions\")", "def criteria(self) -> Optional[Sequence['outputs.MetadataDependenciesResponse']]:\n return pulumi.get(self, \"criteria\")", "def supported_metrics(cls) -> List[str]:\n ...", "def get_data_reqs(self):\n reqs = {\n \"requires_partial_lc\": True,\n \"metric\": self.metric,\n \"requires_hyperparameters\": False,\n \"hyperparams\": None,\n \"unlabeled\": False,\n \"unlabeled_factor\": 0,\n }\n return reqs", "def _get_conditions(self):\n return self.__conditions", "def _get_conditions(self):\n return self.__conditions" ]
[ "0.67560923", "0.6633179", "0.6580229", "0.63893026", "0.63158923", "0.622334", "0.6153101", "0.6063113", "0.6005794", "0.6004159", "0.5937065", "0.5884718", "0.58846277", "0.58748585", "0.58309233", "0.57818455", "0.5748338", "0.5677755", "0.564924", "0.563005", "0.56039816", "0.55954355", "0.5547361", "0.5518141", "0.5509863", "0.5507643", "0.550558", "0.549369", "0.5492144", "0.5492144" ]
0.76627076
0
the criteria threshold value that activates the alert.
def threshold(self) -> float: return pulumi.get(self, "threshold")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def threshold(self,thresholdValue):\n # TO DO\n pass", "def threshold(self):\n return self._threshold", "def threshold(self):\n return self._threshold", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def evaluate(self, threshold=0.5):\n pass", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def actualthreshold(self):\n return self._actualthreshold", "def action_threshold(self) -> pulumi.Input['BudgetActionActionThresholdArgs']:\n return pulumi.get(self, \"action_threshold\")", "def action_threshold(self) -> pulumi.Output['outputs.BudgetActionActionThreshold']:\n return pulumi.get(self, \"action_threshold\")", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def get_threshold(self):\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n roc = main_regime.event_handlers\n threshcond = \"\"\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n threshcond = self.replace_operators(oc.test)\n else: threshcond=None\n return threshcond", "def thresholdfactor(self):\n return self.__thresholdfactor", "def action_threshold(self) -> Optional[pulumi.Input['BudgetActionActionThresholdArgs']]:\n return pulumi.get(self, \"action_threshold\")", "def value_threshold(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"value_threshold\")", "def value_threshold(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"value_threshold\")", "def value_threshold(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"value_threshold\")", "def aboveThresholdAlarm(self, data):\n\n if(self.calculateAverage(data) > self.threshold and self.aboveThreshold):\n message = \"Average above acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance above threshold.\")", "def _thresholding(qc_value, thresholds=None):\n MAX_BOUND, MIN_BOUND = (1, 0)\n if not thresholds:\n thresholds = TaskQC.criteria['default'].copy()\n if qc_value is None or np.isnan(qc_value):\n return int(-1)\n elif (qc_value > MAX_BOUND) or (qc_value < MIN_BOUND):\n raise ValueError(\"Values out of bound\")\n if 'PASS' in thresholds.keys() and qc_value >= thresholds['PASS']:\n return 0\n if 'WARNING' in thresholds.keys() and qc_value >= thresholds['WARNING']:\n return 1\n if 'FAIL' in thresholds and qc_value >= thresholds['FAIL']:\n return 2\n if 'NOT_SET' in thresholds and qc_value >= thresholds['NOT_SET']:\n return -1\n # if None of this applies, return 'NOT_SET'\n return -1", "def reward_threshold(self) -> Optional[float]:", "def belowThresholdAlarm(self, data):\n\n if(self.belowThreshold and self.calculateAverage(data) < self.threshold):\n message = \"Average below acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance below threshold.\")", "def current_threshold_hit(self):\n\n\t\tnew_current = self.robot.pdp.getCurrent(const.CARGO_PDP_ID)\n\n\t\tself._current_samples.append(new_current)\n\n\t\tif len(self._current_samples) > 10:\n\t\t\tself._current_samples.pop(0)\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._current_samples) / len(self._current_samples)\n\n\t\treturn new_avg > const.CARGO_INTAKE_THRESHOLD", "def get_threshold(self):\n\n if self.threshold.startswith('+'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = True\n elif self.threshold.startswith('-'):\n if self.threshold[1:].isdigit():\n self._threshold = int(self.threshold[1:])\n self._upper = False\n else:\n if self.threshold.isdigit():\n self._threshold = int(self.threshold)\n self._upper = True\n if not hasattr(self, '_threshold'):\n raise ValueError('Invalid threshold')", "def alert_sensitivity(self) -> str:\n return pulumi.get(self, \"alert_sensitivity\")" ]
[ "0.73743016", "0.70379454", "0.70379454", "0.68876785", "0.68876785", "0.68876785", "0.68876785", "0.68876785", "0.68192774", "0.680575", "0.6726792", "0.6726792", "0.6726792", "0.6714316", "0.6673342", "0.6624437", "0.66214246", "0.6594564", "0.6541047", "0.6528297", "0.6473134", "0.64578176", "0.64578176", "0.64349866", "0.64303684", "0.6368085", "0.6336614", "0.6222334", "0.6211253", "0.6189639" ]
0.720796
1
the criteria time aggregation types.
def time_aggregation(self) -> str: return pulumi.get(self, "time_aggregation")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction", "def get_group_types(self):\r\n pass", "def durations_per_type(self):\n pass", "def get_report_event_types():\n post_data = request.get_json()\n where_clause = \"\"\n if post_data:\n where_clause = \"WHERE 1 = 1 \"\n if post_data.get('caregiver_ids'):\n where_clause += \"AND e.caregiver_id = ANY(:caregiver_ids) \"\n if post_data.get('min_time'):\n where_clause += \"AND e.start_time > :min_time \"\n if post_data.get('max_time'):\n where_clause += \"AND e.start_time < :max_time \"\n\n sql = text(\"\"\"\n SELECT extract(year from e.start_time) as yyyy,\n to_char(e.start_time, 'Mon') as mon,\n e.caregiver_id,\n e.reimbursed,\n sum(t.amount_in_cents)/100\n FROM event e INNER JOIN type t\n ON e.type_id = t.id\n {}\n GROUP BY yyyy, mon, e.caregiver_id, e.reimbursed\n \"\"\".format(where_clause))\n try:\n result = db.session.execute(sql, post_data).fetchall()\n except Exception:\n response_object = {\n 'status': 'fail',\n 'message': 'Invalid payload.'\n }\n return make_response(jsonify(response_object)), 400\n\n report = rec_dd()\n for r in result:\n prefix = \"\" if r[3] else \"non-\"\n report[int(r[0])][r[1]][r[2]][prefix + \"reimbursed SEK\"] = r[4]\n\n response_object = {\n 'status': 'success',\n 'data': {\n 'report': report,\n 'generated_at': datetime.datetime.now()\n }\n }\n return make_response(jsonify(response_object)), 200", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def _types(cls):\n return {}", "def filter_metrics_choices(self): \n cols = pd.Series(tdr.get_catalog().tubidw.all_metric_hourly.columns)\n filter_metrics = ['no filters'] + cols[cols.str.endswith(tuple(['_count', '_sec']))].tolist()\n return filter_metrics", "def get_aggregations(self):\n return []", "def get_exposure_times(self):\n exposure_time = self.meta.exposure.exposure_time\n duration = self.meta.exposure.duration\n start_time = self.meta.exposure.start_time\n mid_time = self.meta.exposure.mid_time\n end_time = self.meta.exposure.end_time\n return (exposure_time, duration, start_time, mid_time, end_time)", "def type_fields(self, res, op_item):\n result = []\n cast_func = {}\n header = res[0]\n for heading in header:\n cast_func[heading] = DataType.str\n\n if \"field_type\" in op_item:\n for f, p in findall(FIELD_TYPE_RE, op_item[\"field_type\"]):\n cast_func[p] = self.dt.get_func(f)\n first = True\n for row in res[1:]:\n new_row = []\n for idx in range(len(header)):\n\n heading = header[idx]\n cur_value = row[idx]\n if type(cur_value) is tuple:\n cur_value = cur_value[1]\n if heading == \"timespan\" and first:\n first = False\n new_row.append((cast_func[heading](cur_value), cur_value))\n\n result.append(new_row)\n\n return [header] + result", "def class_time(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), t in self.apply_time.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += t\n return rval", "def get_types(self):\n return self.column_type", "def class_time(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, t in self.apply_time.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += t\r\n return rval", "def test_time_type_state_types(day):\n\n assert day_time_info(day.hours_0).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_1).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_2).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_3).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_4).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_5).types == {TimeType.MORNING}\n assert day_time_info(day.hours_6).types == {TimeType.MORNING}\n assert day_time_info(day.hours_7).types == {TimeType.MORNING}\n assert day_time_info(day.hours_8).types == {TimeType.MORNING}\n assert day_time_info(day.hours_9).types == {TimeType.MORNING}\n assert day_time_info(day.hours_10).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_11).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_12).types == {TimeType.NOON}\n assert day_time_info(day.hours_13).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_14).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_15).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_16).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_17).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_18).types == {TimeType.EVENING}\n assert day_time_info(day.hours_19).types == {TimeType.EVENING}\n assert day_time_info(day.hours_20).types == {TimeType.EVENING}\n assert day_time_info(day.hours_21).types == {TimeType.EVENING}\n assert day_time_info(day.hours_22).types == {TimeType.EVENING}\n assert day_time_info(day.hours_23).types == {TimeType.NIGHT}", "def get_measurement_types():\n\n all_measures = ['temperature', 'humidity', 'pressure']\n\n ####################\n return all_measures\n ####################", "def get_query_and_evaluation_analysis_types(self, parameters):\n queries = parameters[\"clustering\"][\"evaluation\"][\"query_types\"]\n queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters))\n return list(set(queries))", "def __getTimeRestriction(time_day):\n time_restriction = PicoPlaca.__timeRestriction()\n time_ini = time_restriction[time_day][0]\n time_fin = time_restriction[time_day][1]\n time_ini = datetime.strptime(time_ini, \"%H:%M\").time()\n time_fin = datetime.strptime(time_fin, \"%H:%M\").time()\n return time_ini, time_fin", "def times(self):\n ret = {}\n for tag in self.TIMETAGLIST:\n if self.has_tag(tag):\n try:\n ret[tag] = safeInt(self.tag(tag))\n except TypeError:\n pass\n return ret", "def get_evaluation_analysis_types(self, parameters):\n eval_types =[]\n for evaluation_criteria_id in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"]:\n# for subcriteria in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id]:\n# eval_types.append(subcriteria)\n eval_types.extend(parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id].keys())\n return list(set(eval_types))", "def typeParameters():\n\t\td = Algorithm.typeParameters()\n\t\td.update({\n\t\t\t'epsilon': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'alpha': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'r': lambda x: isinstance(x, (float, int)) and x > 0,\n\t\t\t'Qmin': lambda x: isinstance(x, (float, int)),\n\t\t\t'Qmax': lambda x: isinstance(x, (float, int))\n\t\t})\n\t\treturn d", "def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)", "def data_types(self):", "def data_types(self):\n return self['data_types']", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")", "def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")", "def etypes(self): # -> list[str]:\n ...", "def get_product_time_bounds(self, product: str):\n\n # Get the offsets from dataset doc\n product = self.types.get_by_name(product)\n dataset_section = product.metadata_type.definition['dataset']\n min_offset = dataset_section['search_fields']['time']['min_offset']\n max_offset = dataset_section['search_fields']['time']['max_offset']\n\n time_min = DateDocField('aquisition_time_min',\n 'Min of time when dataset was acquired',\n DATASET.c.metadata,\n False, # is it indexed\n offset=min_offset,\n selection='least')\n\n time_max = DateDocField('aquisition_time_max',\n 'Max of time when dataset was acquired',\n DATASET.c.metadata,\n False, # is it indexed\n offset=max_offset,\n selection='greatest')\n\n with self._db_connection() as connection:\n result = connection.execute(\n select(\n [func.min(time_min.alchemy_expression), func.max(time_max.alchemy_expression)]\n ).where(\n DATASET.c.dataset_type_ref == product.id\n )\n ).first()\n\n return result", "def CRITs_mappings(self):\n self.crits_type_mapping = {}\n self.crits_type_mapping[\"DOMAIN\"] = \"URI - Domain Name\"\n self.crits_type_mapping[\"URI - Domain Name\"] = \"DOMAIN\"\n self.crits_type_mapping[\"IP\"] = \"Address - ipv4-addr\"\n self.crits_type_mapping[\"Address - ipv4-addr\"] = \"IP\"", "def _init_prepare_types(self):\n # len(db)-1 wouldn't work here because there could be missing\n # index due to generic filtering\n self.types = {\n key: fit_integer_type(np.max(db.index.values), is_signed=False)\n for key, db in iteritems(self.by_dbs)}" ]
[ "0.57344794", "0.56620973", "0.56332326", "0.54430324", "0.537375", "0.53651655", "0.5316518", "0.5306705", "0.522799", "0.52185", "0.5201779", "0.51849824", "0.5163754", "0.5163674", "0.51582074", "0.5154635", "0.5152362", "0.51422703", "0.5126015", "0.5125766", "0.51251775", "0.51104724", "0.5106991", "0.5091363", "0.5090927", "0.5090927", "0.507597", "0.50706536", "0.5062613", "0.50598514" ]
0.5836969
1
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
def skip_metric_validation(self) -> Optional[bool]: return pulumi.get(self, "skip_metric_validation")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()", "def test_error_on_invalid_metric(self):\n self.ocp_data.get(\"rates\", [])[0][\"metric\"][\"name\"] = \"invalid_metric\"\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def submit_errors_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.errors\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def create_metric(self) -> EvalMetric:\n pass", "def test_metric_from_name_nonexistent(self):\n with pytest.raises(ValueError):\n _metric_from_name(\"nonexistent_metric\")", "def test_value_missing_metric(self, missing_mock):\n missing_mock.return_value = True\n self._MetricSourceAgeMetricTest__metric_source.datetime = MagicMock()\n\n result = self.__metric.value()\n\n self.assertTrue(missing_mock.called)\n self.assertTrue(self._MetricSourceAgeMetricTest__metric_source.datetime.assert_not_called)\n self.assertEqual(-1, result)", "def __init__(self):\n super().__init__()\n self.metric = 'GCOERR'", "def __init__(self):\n super().__init__()\n self.metric = 'FALLOUT'", "def check_metric(self, metric):\r\n\r\n if metric in metric_functions or metric == '':\r\n return metric\r\n else:\r\n raise InvalidNeuralNetwork()", "def test_negative(self):\n self.assertFalse(validate_measure_input('-1', self.measures))", "def test_fail_on_init(self):\n\n with self.assertRaises(IcypawException):\n class Node:\n my_metric = Metric(Int64, read_only=True)\n\n @my_metric.net_hook\n def my_metric(self, value):\n pass", "def validation_event(self, message):", "def test_prometheus_rule_failures():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert constants.ALERT_PROMETHEUSRULEFAILURES not in [\n alert[\"labels\"][\"alertname\"] for alert in alerts\n ]", "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def test_error(self):\n metric = self.metric()\n measurement = self.measurement(metric, sources=[self.source(metric, parse_error=\"error\")])\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def validate(metric_class):\n if not hasattr(metric_class, 'label'):\n raise ImproperlyConfigured(\"No 'label' attribute found for metric %s.\" % metric_class.__name__)\n \n if not hasattr(metric_class, 'widget'):\n raise ImproperlyConfigured(\"No 'widget' attribute found for metric %s.\" % metric_class.__name__)", "def test_create_derived_metric(self):\n pass", "def aggregator_unavailable_apiservice(self, metric, scraper_config):\n for sample in metric.samples:\n sample[self.SAMPLE_LABELS][\"apiservice_name\"] = sample[self.SAMPLE_LABELS].pop(\"name\")\n self.submit_metric('.aggregator_unavailable_apiservice', metric, scraper_config, monotonic_count=False)", "def missing_test_message(msg):\r\n action = config.compute_test_value\r\n if action == 'raise':\r\n raise AttributeError(msg)\r\n elif action == 'warn':\r\n warnings.warn(msg, stacklevel=2)\r\n else:\r\n assert action in ['ignore', 'off']", "def test_copy_without_name(self):\n self.metric[\"name\"] = \"\"\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Security warnings (copy)\", metric_copy[\"name\"])", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def rule_invalid(self, idx: int, line: Statement) -> None:\n #TODO: this is a print while not all rules are handled yet\n print(NotARule(f'No such rule: {line.rule.value!r}'))", "def notEnabledDummy(self, ev):\n pass", "def metric_loss(self, output, sample, *args, **kwargs):\n loss = self.loss(output, sample, *args, **kwargs)\n return loss.item()", "def _get_metrics(self, *args, **kwargs):\n logger.warning(\"Could not get metric. No function registered.\")", "def common_alarm_func_add(asg_name, metricname, namespace, arn_scalein, arn_scaleout, alarmname, desc, Unit):\n d1=desc+ \" High\"\n a1=alarmname + '-high'\n try:\n cloudwatch.put_metric_alarm(AlarmName=a1, AlarmDescription=d1,\n AlarmActions=[arn_scaleout],\n ActionsEnabled=True, MetricName=metricname, EvaluationPeriods=1,\n Threshold=float(ScaleUpThreshold), Statistic=\"Average\", Namespace=namespace,\n ComparisonOperator=\"GreaterThanThreshold\", Period=ScalingPeriod, Unit=Unit)\n except Exception as e:\n logger.error('Failed to add High Alarm: ' + desc + ' for ASG: ' + asg_name)\n logger.error(\"[Alarm High Add]: {}\".format(e))\n return False\n\n a1=alarmname + '-low'\n d1=desc+ \" Low\"\n try:\n cloudwatch.put_metric_alarm(AlarmName=a1, AlarmDescription=d1,\n AlarmActions=[arn_scalein],\n ActionsEnabled=True, MetricName=metricname, EvaluationPeriods=1,\n Threshold=float(ScaleDownThreshold), Statistic=\"Average\", Namespace=namespace,\n ComparisonOperator=\"LessThanThreshold\", Period=ScalingPeriod,\n Unit=Unit)\n except Exception as e:\n logger.error('Failed to add Low Alarm: ' + desc + ' for ASG: ' + asg_name)\n logger.error(\"[Alarm Low Add]: {}\".format(e))\n return False\n\n return True", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def rule(func: Callable) -> Callable:\n\n def inner1(operation: dict, **kwargs) -> dict:\n if len(func(operation, **kwargs)):\n if \"violations\" not in operation.keys():\n operation[\"violations\"] = []\n operation[\"violations\"].append(func(operation, **kwargs))\n\n return operation\n\n return inner1" ]
[ "0.53630054", "0.53394985", "0.5327985", "0.53265405", "0.5294704", "0.52564716", "0.5245898", "0.52369255", "0.51835185", "0.5125754", "0.51243144", "0.5101995", "0.5097013", "0.50950426", "0.5072278", "0.50347114", "0.50080895", "0.49950373", "0.49888673", "0.49649054", "0.4950176", "0.49480435", "0.49422026", "0.49403253", "0.49353", "0.49296924", "0.4921963", "0.49034834", "0.49034834", "0.48943445" ]
0.5530519
1
The number of failed locations.
def failed_location_count(self) -> float: return pulumi.get(self, "failed_location_count")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_locations(self):\n return len(self.locations)", "def error_count(self):\n return len(self.errors)", "def get_location_count(self):\n return len(self.matrix)", "def getNumFailures(self):\n return _libsbml.SBMLValidator_getNumFailures(self)", "def counter(self) -> int:\n return self._fail_counter", "def numReportableFailures(self):\r\n count = 0\r\n for failure in self.failures:\r\n if not failure.platform.isBroken():\r\n count += 1\r\n pass\r\n pass\r\n return count", "def num_failed(self):\n return sum(cmd.failed for id, cmd in self.commands)", "def get_fail_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.FAIL)", "def num_failures(self):\n min_time = time.time() - self.window\n\n while self.failures and self.failures[0] < min_time:\n self.failures.popleft()\n\n return len(self.failures)", "def get_error_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.ERROR)", "def locations_n(self):\n return self.locations[1]", "def fail_counter(self) -> int:\n return self._state_storage.counter", "def count(self):\n # TODO not implemented yet\n return 0", "def _get_count(results):\n return len(results)", "def location_length(self, location):\n return self.num_cells", "def _failed_tests(self, metric_source_id: str) -> int:\n return self.__test_count(metric_source_id, 'failed')", "def lines_errored(self) -> int:\n with self.lock:\n return self._lines_errored", "def number_of_loc_changes(self) -> int:\n raise NotImplementedError('not implemented')", "def _loc(self) -> int:\n return len(self.lines)", "def get_tries(self):\n return self._tries", "def getNumErrors(self):\n return _libsbml.XMLErrorLog_getNumErrors(self)", "def count(self):\n return len(self._runs)", "def _error_count(cls, samples: Samples) -> int:\n return cls.__sample_count(samples, \"false\")", "def hits(self):\n return len(self.successes) + len(self.failures)", "def get_num_locations(self, project):\n locations = Location.objects.filter(\n Q(private=False) |\n Q(private_for_project=project)).count()\n return locations", "def length(self):\n total_length = 0\n for location_a, location_b in zip(\n self.locations[:-1], self.locations[1:]):\n total_length += Line(location_a, location_b).length\n return total_length", "def countSites(self):\n self.ni = len(self.sites)\n return self.ni", "def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]", "def get_retry_count(self):\r\n return self.retried_nomax + self.retried_withmax", "def location_length(self, location):\n if location == 'cells':\n return self.num_cells\n return self.num_nodes" ]
[ "0.7635072", "0.731395", "0.7201066", "0.7142528", "0.7035365", "0.7015711", "0.7007765", "0.69258225", "0.68146944", "0.67784536", "0.6741547", "0.6715071", "0.6687685", "0.66636074", "0.6612101", "0.66006345", "0.6583765", "0.657363", "0.65356445", "0.64715993", "0.64708614", "0.64582217", "0.6457043", "0.644023", "0.64348996", "0.64255726", "0.6317111", "0.63137263", "0.63031435", "0.62737674" ]
0.88088346
0
The Application Insights web test Id.
def web_test_id(self) -> str: return pulumi.get(self, "web_test_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def application_insights_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_insights_id\")", "def web_id(self):\n return self._web_id", "def application_insights_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_insights_id\")", "def unique_id(self) -> str:\n return self.tahoma_device.url", "def run_id() -> int:\n return sg_covid_impact.config[\"flows\"][\"glass\"][\"run_id\"]", "def get_test_id(response):\n response_text = response.text\n\n regexpr = re.compile(TEST_NAME_REGEX)\n\n return regexpr.findall(response_text)[0]", "def tracking_id(self) -> str:\n return pulumi.get(self, \"tracking_id\")", "def app_id(self) -> str:\n return self._app_id", "def unique_id(self):\n return self.config_entry.entry_id + \"stg\"", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def application_insights_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_insights_id\")", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\"", "def developer_app_insights_application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"developer_app_insights_application_id\")", "def get_id(self):\n return self.get_api_endpoint()", "def trace_id(self) -> str:\n return self._trace_id", "def run_id(self) -> str:\n return self._step_execution_context.run_id", "def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"", "def get_id(self):\n return self.get_sitename()", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def scenario_id(self) -> str:\n return self.__scenario_id", "def test_event_id(self):\n result = self.test_client.event_id\n\n assert result == \"2130389\"", "def ApplicationId(self) -> _n_0_t_0:", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def id(self) -> str:\n return self._event.get('id')", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_status\", \"frigate\"\n )", "def get_identifier(self, request):\n return '%s-%s'.format((request.session.session_key, self.flowIdentifier))[:200]", "def appid(self):\n return self._item[\"appid\"]", "def id(self): # type: () -> str\n return self.inspection['Id']", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def app_insights_instrumentation_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_insights_instrumentation_key\")" ]
[ "0.66186976", "0.65677965", "0.63514495", "0.6283445", "0.62499297", "0.62100387", "0.620081", "0.6128248", "0.61215425", "0.61211663", "0.61199594", "0.6118256", "0.60975343", "0.608105", "0.6077543", "0.6069551", "0.60655457", "0.6046929", "0.6040767", "0.6037821", "0.5960957", "0.59179604", "0.5888644", "0.58727044", "0.5856144", "0.5853519", "0.5849275", "0.58404803", "0.58339125", "0.5823553" ]
0.8475513
0
Pass in an index_name to be deleted. Raises an IndexNotFound exception if the index is missing on the node.
def delete_index(index_name): try: ES.indices.delete(index=[index_name]) except NotFoundError: raise IndexNotFound(index_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_index(self, index_name):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s\" % quote(index_name.encode('utf8'), safe=''), self.timeout)", "def delete_index(client, index_name):\n\n client.conn.indices.delete(index=index_name)", "def delete_index(index_name):\n connection = es.get_es()\n\n return connection.indices.delete(index_name)", "def drop_index(self, index_name):\n self.execute(self.commands.drop_index(self.name, index_name))", "def _delete_index( env, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n adapter.delete( queries=[\"*:*\"] )\n adapter.commit()\n logger.info(u\"Deleted index\")", "def delete_index(self, index: str):\n self.__client__.indices.delete(index, ignore=[400, 404])", "def delete_index(index_name):\n resp = es.indices.delete(index=index_name)\n print(resp)\n\n\n import tutorial\n tutorial.create_index('novels')\n tutorial.document_add('novels', 'authors', {'name':'Sidney Sheldon'}, 1)\n tutorial.document_view(index_name='novels', doc_type='authors', doc_id=1)", "def delete_index(self):\n es = self.get_es()\n if es.head(self.es_index):\n es.delete(self.es_index)", "def delete_index_field(DomainName=None, IndexFieldName=None):\n pass", "def delete(client, name, index):\n r = client.indices.delete_alias(name=name, index=index)\n ok = r.get(\"acknowledged\")\n LOG.info(json.dumps(r))\n if not ok:\n sys.exit(UNKNOWN_ERROR)", "def delete_all_in_index(index_name):\n doc_index = search.Index(name=index_name)\n\n # looping because get_range by default returns up to 100 documents at a time\n while True:\n # Get a list of documents populating only the doc_id field and extract the ids.\n document_ids = [document.doc_id\n for document in doc_index.get_range(ids_only=True)]\n if not document_ids:\n break\n # Delete the documents for the given ids from the Index.\n doc_index.delete(document_ids)", "def _Dynamic_DeleteIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"DeleteIndex\", request_id)\n return void", "def delete(get_index, document_id): \n client, index_name = connection_es()\n resp = client.delete(index = get_index, doc_type=\"nvisnx\", id = document_id)\n return resp", "def zero_downtime_index(index_name, index_config):\n client = indices_client()\n temporary_name = index_name + '_' + str(uuid.uuid4())\n logging.info('creating index with config %s', index_config)\n create_index(temporary_name, index_config, client)\n try:\n yield temporary_name\n atomic_swap(index_name, temporary_name, client)\n except Exception:\n logging.error(\n 'deleting temporary index %s due to error:',\n temporary_name,\n exc_info=True\n )\n client.delete(index=temporary_name)", "def remove_document_from_index(self, doc_name):\n\t\tif not doc_name:\n\t\t\treturn\n\n\t\tix = self.get_index()\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, doc_name)\n\t\t\twriter.commit(optimize=True)", "def delete_at_index(self, index: int) -> T:\n pass", "def delete_at_index(self, index: int) -> T:\n pass", "def delete_index(self, index):\n delete = f\"(//*[name()='svg'][@class='chakra-icon css-onkibi'])[{index}]\"\n delete_sitem = self.locator_finder_by_xpath(delete)\n delete_sitem.click()\n time.sleep(1)\n delete_confirmation = \"//*[text()='Delete']\"\n delete_confirmation_stiem = self.locator_finder_by_xpath(delete_confirmation)\n delete_confirmation_stiem.click()\n time.sleep(1)", "def wipe_index(self, index):\n url = f'{self.host}{index}/_delete_by_query?conflicts=proceed'\n data = {'query': {'match_all': {}}}\n resp = requests.post(url, json=data)\n self.flush(index)\n return resp.json()", "def delete_index(\n self,\n ) -> Callable[\n [datastore_admin.DeleteIndexRequest], Awaitable[operations_pb2.Operation]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_index\" not in self._stubs:\n self._stubs[\"delete_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/DeleteIndex\",\n request_serializer=datastore_admin.DeleteIndexRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"delete_index\"]", "def test_index_delete(self):\n a = self.test_index()\n a.delete()\n es = self.es\n es.refresh()\n r = es.search(query=StringQuery('zool'))\n eq_(r['hits']['total'], 0, \"We shouldn't get any hits.\")", "def delete(self, index):\n del self.data[index]", "def remove(self, attributeIndexOrName) -> None:\n ...", "def delete_index(self, request):\n return request.param", "def __delitem__(self, index: int) -> None:\n error = self._coreIndex.removeDescriptor(index)\n assertError(error)", "def delete_document(self, index: str, doc_id: str):\n self.__client__.delete(index=index, id=doc_id, refresh=True)", "def drop_index(self, index):\n if isinstance(index, list):\n for column in index:\n self.table.remove_index(f\"{self.table.name}_{column}_index\")\n\n return self\n\n self.table.remove_index(index)\n\n return self", "def removeNode(self, index):\n del self.nodes[index]", "def DeleteIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_DeleteIndex(self, arg0)", "def document_delete(index_name, doc_type, doc_id):\n resp = es.delete(index=index_name, doc_type=doc_type, id=doc_id)\n print(resp)" ]
[ "0.8237238", "0.7881001", "0.7858925", "0.7437211", "0.7330589", "0.7315321", "0.7273739", "0.71558714", "0.6878204", "0.678057", "0.6732007", "0.6623749", "0.65878093", "0.641981", "0.6412163", "0.63936096", "0.63936096", "0.63688356", "0.6324927", "0.63195837", "0.62910414", "0.6282386", "0.6248987", "0.62466246", "0.62356216", "0.62254703", "0.6216733", "0.61746955", "0.61701244", "0.61700493" ]
0.7987658
1
Create a document based on instance of a model. Returns None if successful and an error string if it's not.
def create_document(obj): index = obj.get_index_name() doc_type = obj.get_document_type() body = obj.get_document_body() exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk) if not exists: ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk) return None return "Conflict: document already exists for {0} with id {1}.".format( obj.__class__.__name__, obj.pk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass", "def _create(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def create_document(document: DocumentIn, db: Session = Depends(get_db)):\n return add_document(db, document)", "async def create_doc(self, *args, **kwargs):\n pass", "def create(cls, **kwargs):\n try:\n instance = cls(**kwargs)\n return instance.save()\n except Exception as e:\n return e", "def _create_document(result_dict):\n document = Document(\n name=result_dict['docname'],\n original_id=result_dict['itemid'],\n doctype=result_dict['doctype'],\n language=result_dict['languageisocode'],\n conclusion=result_dict['conclusion'],\n originatingbody=result_dict['originatingbody'],\n application=result_dict['application'],\n )\n return document", "async def create(self, alias=None, **kwargs):\n document = self.__klass__(**kwargs)\n return await self.save(document=document, alias=alias)", "def new_document(klass, name=None, author=None):\n doc = Factory.new_document(klass, author)\n doc.name = name\n doc._osl.id = uuid.uuid4()\n return doc", "def create_document(self):\n # set single values\n if len(self.field_values) > 0:\n self._set_field_values()\n\n # set multi values\n if len(self.block_field_values) > 0:\n self._set_multi_field_values()\n\n self.field_values = {}\n self.block_field_values = {}\n\n self.client.service.CreateDocument()", "def _doc_create(type, data):\n doc = dict(data)\n doc.update({'model_type': type})\n return doc", "def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj", "def create_document(content: Union[str, bytes]) -> Document:\n r = requests.post(\"https://pastecord.com/documents\", data=content)\n r.raise_for_status()\n \n return Document(r.json()['key'])", "def _create(self, model_obj):\n conn = self._get_session()\n\n try:\n conn.add(model_obj)\n except DatabaseError as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return model_obj", "def get_document(obj):\n try:\n return ES.get(\n index=obj.get_index_name(), doc_type=obj.get_document_type(), id=obj.pk)\n except NotFoundError:\n raise DocumentNotFound(obj.get_index_name(), obj.pk)", "def perform_create(self, serializer):\n try:\n\n # Create the document object\n long_url = serializer.validated_data['long_url']\n document, _ = Document.objects.get_or_create(long_url=long_url)\n serializer.instance = document\n\n # Create the annotation object\n annotate, _ = Annotation.objects.get_or_create(\n user = self.request.user, document = document\n )\n\n except CorpusException as e:\n raise ValidationError(str(e))", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "async def insert_one(self, model: Model) -> Model:\n\n if not isinstance(model, Model):\n raise ValueError('insert_one method expects Model instance.')\n\n model_as_dict = model.as_dict\n\n if not model_as_dict.get('_id'):\n model_as_dict.pop('_id')\n\n result = await self.collection.insert_one(model_as_dict)\n return await self.get_one(where={'_id': result.inserted_id})", "def create(cls, **validated_data):\n instance = cls(**validated_data)\n if isinstance(instance, cls):\n db.session.add(instance)\n try:\n db.session.commit()\n return instance\n except Exception as error:\n db.session.rollback()\n print(error.args)\n return None", "def _get_document(self, doc_uid, doc_type, row, mappings):\n # Create document.\n doc = pyesdoc.create(doc_type,\n project=DOC_PROJECT,\n source=DOC_SOURCE,\n version=1,\n uid=doc_uid)\n\n # Assign document dates.\n try:\n doc.meta\n except AttributeError:\n pass\n else:\n doc.meta.create_date = DOC_CREATE_DATE\n doc.meta.update_date = DOC_UPDATE_DATE\n\n # Assign document author.\n try:\n doc.meta.author = DOC_AUTHOR_REFERENCE\n except AttributeError:\n pass\n\n # Set document attributes from mapped worksheet cells.\n for mapping in mappings:\n self._set_document_attribute(doc, row, mapping)\n\n return doc", "def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')", "def create(self, **kwargs):\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db, skip_moderation=True)\n return obj", "async def _save(self, document, alias=None):\n doc = document.to_son()\n\n if document._id is not None:\n try:\n await self.coll(alias).update({\"_id\": document._id}, doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n else:\n try:\n doc_id = await self.coll(alias).insert(doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n document._id = doc_id\n\n return document", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def get_or_create(model, **kwargs):\n session = db.session()\n instance = session.query(model).filter_by(**kwargs).first()\n if instance:\n return instance\n else:\n instance = model(**kwargs)\n session.add(instance)\n session.commit()\n return instance", "def create(self, request, *args, **kwargs):\n # Deserialize and validate the data from the user.\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n # Execute the document and annotation creation\n self.perform_create(serializer)\n\n # Get the headers and return a response\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def test_client_document_create(self):\n pass", "def createModel(self, sid=\"\"):\n return _libsbml.SBMLDocument_createModel(self, sid)", "def create(self, model, data, context={}):\n try:\n res = self.object_facade.execute(self.dbname, self.user_id, self.user_passwd,\n model, 'create', data, context)\n return res\n except socket.error, err:\n raise Exception(u'Conexion rechazada: %s!' % err)\n except xmlrpclib.Fault, err:\n raise Exception(u'Error %s en create: %s' % (err.faultCode, err.faultString))" ]
[ "0.6608516", "0.64796597", "0.64517796", "0.61958444", "0.6173862", "0.61243945", "0.61194986", "0.60412973", "0.6032219", "0.6027759", "0.5958546", "0.59557694", "0.59135765", "0.5911795", "0.58485466", "0.5826208", "0.581102", "0.5803531", "0.5795227", "0.5792533", "0.57392025", "0.57029843", "0.56997406", "0.56997406", "0.56997406", "0.56881857", "0.56308556", "0.5624891", "0.56156886", "0.56132007" ]
0.70802784
0
Get a document based on the instance. Raises a DocumentNotFound exception if the document is not found on the index.
def get_document(obj): try: return ES.get( index=obj.get_index_name(), doc_type=obj.get_document_type(), id=obj.pk) except NotFoundError: raise DocumentNotFound(obj.get_index_name(), obj.pk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_document(self, url: str) -> Optional[Document]:\n return self._index.get(url, None)", "def get_document(self, *args, **kwargs):\n return self._documents_manager.get_document(*args, **kwargs)", "def fetch_search_document(self, index):\n assert self.pk, \"Object must have a primary key before being indexed.\"\n client = get_client()\n return client.get(\n index=index,\n doc_type=self.search_doc_type,\n id=self.pk\n )", "def get_document_by_id(self, index: str, doc_id: str) -> Optional[Dict[str, Any]]:\n try:\n return self.__client__.get(index=index, id=doc_id)\n except elasticsearch.exceptions.NotFoundError:\n return None", "def get_document(self, doc_id, options=None):\n path = urllib.quote_plus(str(doc_id))\n try:\n result = self.request(path, pylastica.request.Request.GET, query=options).data\n except pylastica.exception.ResponseException:\n raise pylastica.exception.NotFoundException(\"Document with id %s not found.\" % doc_id)\n if result['found'] is None or result['found'] == '' or not result['found']:\n raise pylastica.exception.NotFoundException(\"Document with id %s not found.\" % doc_id)\n data = result['_source'] if '_source' in result else {}\n document = pylastica.document.Document(doc_id, data, self.name, self.index)\n document.version = result['_version']\n return document", "def get_document(self, docid):\n raise NotImplementedError", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def get_document(self, metadata_field, field_val):\n\n if metadata_field not in self.metadata_fields:\n raise MissingMetadataError([metadata_field])\n\n if metadata_field == \"date\":\n field_val = int(field_val)\n\n for document in self.documents:\n if getattr(document, metadata_field) == field_val:\n return document\n\n raise ValueError(\"Document not found\")", "def get_document(self, doc_uri: str) -> Document:\n return self._docs.get(doc_uri) or self._create_document(doc_uri)", "def GetDocument(self, *args, **kwargs):\n pass", "def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))", "def get_document(self, docid):\n try:\n return self.sql_session.query(Document).get(docid)\n except OperationalError:\n raise IOError(\"Sorry, this database is incompatible with the \"\n \"current version of Luminoso. If you want, you can \"\n \"delete the model directory and start again.\")", "def get_document(eid, session: CondorSession) -> Response:\n document = Document.find_by_eid(session, eid)\n if not document:\n return Response(\n {'message': 'The especified eid is not found on database'},\n status=404,\n )\n return Response(sc.Document(document))", "def get_document_or_404(id, db=None):\n try:\n return dbs[DEFAULT_DATABASE if db is None else db][id]\n except ResourceNotFound:\n raise Http404", "def get_document_by_id(document_id):\n return Documents.query.filter_by(id=document_id).first()", "def get_document_by_name(label, doc_type):\n return Documents.query.filter_by(type=doc_type, label=label).first()", "def __get__(self, instance, owner):\n if instance is None:\n # Document class being used rather than a document object\n return self\n\n # Get value from document instance if available\n return instance._data.get(self.name)", "def document(self, document_id):\r\n return doc.Document(self, document_id)", "def as_search_document(self, index='_all'):\n raise NotImplementedError(\n \"{} does not implement 'get_search_document'.\".format(self.__class__.__name__)\n )", "def document(self):\n query = {\"_id\": ObjectId(self.document_id)}\n return Document(get_collection(\"documents\").find_one(query))", "def find_doc(self, doc_type, property_name, property_value):\n try:\n self.client.connect()\n db = self.client[self.db_name]\n selector = {\n '_id': {'$gt': 0},\n 'type': doc_type,\n property_name: property_value\n }\n query = Query(db, selector=selector)\n for doc in query()['docs']:\n return doc\n return None\n finally:\n self.client.disconnect()", "def get_named_document(self, entity, name):\n view = self.db.view(\"%s/name\" % entity, include_docs=True)\n result = view[name]\n if len(result) != 1:\n raise ValueError(\"no such %s document '%s'\" % (entity, name))\n return result.rows[0].doc", "def get_document(self, doc_id: int) -> Optional[Users]:\n try:\n doc = self.session.query(CandidatesDocuments).get(doc_id)\n\n return doc\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get doc: {excpt}')\n\n return None", "def get(self, index, id):\n url = f'{self.host}{index}/_doc/{id}'\n resp = requests.get(url)\n return resp.json()", "def test_get_document(index_with_documents):\n response = index_with_documents().get_document(\"500682\")\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert response.title == \"The Highwaymen\"", "def getDocument(self, key):\n data = self.client.get(self.name +\"/_all_docs\", {\n \"include_docs\": True,\n \"key\" : \"\\\"%s\\\"\" % util.quote(key),\n }).getBodyData()\n\n try:\n return data[\"rows\"][0]\n except: pass", "def get_doc(doc_id):\n queue = get_doc_queue(app.config)\n data = queue.get_by_id(doc_id)\n if data:\n return jsonify(doc=data)\n return jsonify(err=f\"{doc_id} not found\"), 404", "def find_by_id(cls, doc_id: str):\n document = None\n if doc_id:\n try:\n document = cls.query.get(doc_id)\n except Exception as db_exception: # noqa: B902; return nicer error\n current_app.logger.error('Db2Document.find_by_id exception: ' + str(db_exception))\n raise DatabaseException(db_exception)\n if document:\n document.strip()\n return document", "async def get(self, id=None, alias=None, **kwargs):\n\n from jetengine import Q\n\n if id is None and not kwargs:\n raise RuntimeError(\"Either an id or a filter must be provided to get\")\n\n if id is not None:\n if not isinstance(id, ObjectId):\n id = ObjectId(id)\n\n filters = {\"_id\": id}\n else:\n filters = Q(**kwargs)\n filters = self.get_query_from_filters(filters)\n\n instance = await self.coll(alias).find_one(filters, projection=self._loaded_fields.to_query(self.__klass__))\n if instance is None:\n return None\n else:\n doc = self.__klass__.from_son(\n instance,\n # if _loaded_fields is not empty then\n # document is partly loaded\n _is_partly_loaded=bool(self._loaded_fields),\n # set projections for references (if any)\n _reference_loaded_fields=self._reference_loaded_fields,\n )\n if self.is_lazy:\n return doc\n else:\n await doc.load_references()\n return doc", "def get_document_by_name(update, name_or_id):\n sc_api = SmartCAT(SMARTCAT_API_USERNAME, SMARTCAT_API_PASSWORD)\n try:\n document = sc_api.project.get_document_by_name(SMARTCAT_PROJECT_ID, name_or_id)\n except SmartcatException as e:\n logging.error('Error getting document: {0} {1}'.format(e.code, e.message))\n update.message.reply_text(SHIT_HAPPENS)\n return None\n\n if not document:\n logging.warning('Document not found')\n update.message.reply_text(NOTHING_FOUND)\n return None\n\n return document" ]
[ "0.7509461", "0.7387173", "0.73165256", "0.7186353", "0.7047284", "0.691709", "0.67920256", "0.6776412", "0.6762755", "0.6755989", "0.6754873", "0.6712459", "0.6660555", "0.6659591", "0.6637856", "0.6628112", "0.6611464", "0.66052437", "0.6423677", "0.6407396", "0.63751054", "0.6360176", "0.63191724", "0.6307892", "0.6273919", "0.6254734", "0.625013", "0.6230417", "0.6220704", "0.6211642" ]
0.8279071
0
Updates the document from the index. This should be called via a signal whenever the obj gets saved.
def update_document(obj): index = obj.get_index_name() doc_type = obj.get_document_type() body = dict(doc=obj.get_document_body()) try: ES.update(index=index, doc_type=doc_type, body=body, id=obj.pk) except NotFoundError: raise DocumentNotFound(obj.get_index_name(), obj.pk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_document(self):\n pass", "def update_index(self, document):\n\t\tix = self.get_index()\n\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, document[self.id])\n\t\t\twriter.add_document(**document)\n\t\t\twriter.commit(optimize=True)", "def update(self, index, id, **kwargs):\n url = f'{self.host}{index}/_doc/{id}/_update'\n data = {'doc': {**kwargs}}\n requests.post(url, json=data)\n self.flush(index)\n return self.get(index, id)", "def update(self, obj):\n self._updater.update(obj)", "def update_document(\n self,\n index: str,\n doc_id: str,\n document: Dict[str, Any],\n partial_update: bool = False,\n ):\n if partial_update:\n self.__client__.update(index=index, id=doc_id, body={\"doc\": document})\n self.__client__.index(index=index, id=doc_id, body=document)", "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def on_save(sender, **kwargs):\n obj = kwargs[\"instance\"]\n if (\n not hasattr(obj, \"search_document\")\n or not hasattr(obj, \"index_components\")\n or not callable(obj.index_components)\n ):\n return\n transaction.on_commit(make_updater(kwargs[\"instance\"]))", "def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)", "def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")", "def _index_doc(self, db, doc_id):\n doc = db.get(doc_id)\n if doc is None:\n log.warning(\"Unable to find document in database: '%s'\" % doc_id)\n return\n fields = doc.get('solr_fields')\n fields = [\"payload\", \"timesaved\"]\n if not fields:\n log.debug(\"Document '%s' does not define solr_fields\" % doc_id)\n return\n updates = []\n for field in fields:\n if doc.has_key(field):\n self.__normalize(updates, field, doc[field])\n updates.extend([{'type' : 'any'}, {'_id' : doc_id}])\n return updates", "def update(self, using=None):\n\n backend = self.get_backend(using)\n\n if backend is not None:\n backend.update(self, self.index_queryset(using=using))", "def __setitem__(self, idx, doc):\n if doc is None:\n return\n assert isinstance(idx, int)\n assert isinstance(doc, Document)\n path = self.paths[idx]\n doc.save(os.path.join(self.dirpath, path), fmt=self.fmt)", "def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj", "def document_update(index_name, doc_type, doc_id, doc=None, new=None):\n if doc:\n resp = es.index(index=index_name, doc_type=doc_type,\n id=doc_id, body=doc)\n print(resp)\n else:\n resp = es.update(index=index_name, doc_type=doc_type,\n id=doc_id, body={\"doc\": new})", "def update(self, docid, doc):\n self.delete(docid)\n wrapper = RedisWrapper(self.dbprefix, self.client, docid)\n self._store_doc(doc, wrapper)", "def update_object(self, instance, using=None, **kwargs):\n # Check to make sure we want to index this first.\n if self.should_update(instance, **kwargs):\n backend = self.get_backend(using)\n\n if backend is not None:\n backend.update(self, [instance])", "def updateModel(self):\n pass", "def updateDocument(self, document):\n data = self.updateDocumentAll([document])\n try:\n return data[0]\n except: pass", "def store_new_item(self, doc):\n self._collection.save(doc.document)", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def store(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n idx = doc.features.get(self.idxfeatname())\n if idx is None:\n raise Exception(\"Cannot append document, no __idx_ID feature\")\n self.__setitem__(idx, doc)", "def _update_model(self, idx):\n self._wfield.update(self._choices[idx][0])", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def update(self, request, *args, **kwargs):\n response = super(NoteViewSet, self).update(request, *args, **kwargs)\n obj = self.get_object()\n instance = obj.instance\n # update mongo data\n instance.parsed_instance.save()\n return response", "def index_doc(self, docid, object):\n\n if callable(self.discriminator):\n value = self.discriminator(object, _marker)\n else:\n value = getattr(object, self.discriminator, _marker)\n\n if value is _marker:\n # unindex the previous value\n self.unindex_doc(docid)\n self._not_indexed.add(docid)\n return None\n\n if isinstance(value, Persistent):\n raise ValueError('Catalog cannot index persistent object %s' %\n value)\n\n if docid in self._not_indexed:\n self._not_indexed.remove(docid)\n\n old = self._rev_index.get(docid)\n if old is not None:\n self.unindex_doc(docid)\n\n changed = False\n\n for facet in value:\n L = []\n categories = facet.split(':')\n for category in categories:\n L.append(category)\n facet_candidate = ':'.join(L)\n for fac in self.facets:\n if fac == facet_candidate:\n changed = True\n fwset = self._fwd_index.get(fac)\n if fwset is None:\n fwset = self.family.IF.Set()\n self._fwd_index[fac] = fwset\n fwset.insert(docid)\n revset = self._rev_index.get(docid)\n if revset is None:\n revset = self.family.OO.Set()\n self._rev_index[docid] = revset\n revset.insert(fac)\n\n if changed:\n self._num_docs.change(1)\n\n return value", "def updateItem(self, object):\n pass", "def refresh(self, form=None, **kwargs):\n default = dict(creation=False, refresh_index=True, asAuthor=False, onSaveEvent=False)\n default.update(kwargs)\n self.save(doc, form, **default)", "def update_index_by_name(self, doc_name):\n\t\tdocument = self.get_document_to_index(doc_name)\n\t\tif document:\n\t\t\tself.update_index(document)", "def update_data(self):\n self._model.update()\n self.__refresh()", "def index_fobj(fobj):\n doc = fileobject_to_dict(fobj)\n if doc is not None:\n #print doc\n SOLR.add(doc)\n else:\n pass" ]
[ "0.7923124", "0.7285815", "0.6899564", "0.674032", "0.6725462", "0.6640049", "0.660014", "0.6572434", "0.6434228", "0.6432489", "0.6420817", "0.64181536", "0.6382638", "0.6373966", "0.63086164", "0.6277604", "0.6238722", "0.6227302", "0.62181604", "0.620456", "0.6190446", "0.6180473", "0.61661404", "0.614894", "0.6139121", "0.61244243", "0.6107652", "0.6096093", "0.6076482", "0.60578865" ]
0.7662434
1
Delete a document from the index. This should be called via a signal when the obj gets deleted.
def delete_document(obj): index = obj.get_index_name() doc_type = obj.get_document_type() try: ES.delete(index=index, doc_type=doc_type, id=obj.pk) except NotFoundError: raise DocumentNotFound(obj.get_index_name(), obj.pk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_document(self, index: str, doc_id: str):\n self.__client__.delete(index=index, id=doc_id, refresh=True)", "def delete_document(self):\n pass", "def document_delete(index_name, doc_type, doc_id):\n resp = es.delete(index=index_name, doc_type=doc_type, id=doc_id)\n print(resp)", "def remove_document_from_index(self, doc_name):\n\t\tif not doc_name:\n\t\t\treturn\n\n\t\tix = self.get_index()\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, doc_name)\n\t\t\twriter.commit(optimize=True)", "def delete(get_index, document_id): \n client, index_name = connection_es()\n resp = client.delete(index = get_index, doc_type=\"nvisnx\", id = document_id)\n return resp", "def delete_document(self, document):\n assert isinstance(document, pylastica.document.Document), \"document must be a Document object: %r\" % document\n options = document.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'parent',\n 'replication',\n 'consistency',\n 'refresh',\n 'timeout'\n ])\n return self.delete_by_id(document.doc_id, options)", "async def remove_doc(self, *args, **kwargs):\n pass", "def delete_index(self):\n es = self.get_es()\n if es.head(self.es_index):\n es.delete(self.es_index)", "def delete(self, obj):\n raise NotImplementedError", "def delete_document(document_id, sync=False):\n q = {'term': {'document_id': document_id}}\n schemata = (DocumentRecord.SCHEMA_PAGE,\n DocumentRecord.SCHEMA_ROW,\n Document.SCHEMA)\n query_delete(entities_read_index(schemata), q, sync=sync)", "def delete(self, obj=None):\n pass", "def deleteDocument(self, document):\n data = self.deleteDocumentAll([document])\n try:\n return data[0]\n except: pass", "def delete_model(self, request, document):\n\n self.send_notification_email(document, request, \n 'email/document_deleted.txt.django')\n document.deleted = True\n document.save()", "def delete(self, index):\n del self.data[index]", "def delete(self, obj):", "def delete(self):\n if not self.id:\n raise AttributeError(\n \"Provide Document ID to delete a document.\"\n \"Assign it to AutomatedDocument object `id` attribute or pass to class constructor.\"\n )\n\n return self._client.delete(\"{}{}/\".format(self._path, self.id))", "def delete(self, obj):\n self.session.delete(obj)", "def remove(self, document_id, namespace, timestamp):\n index, doc_type = self._index_and_mapping(namespace)\n\n action = {\n '_op_type': 'delete',\n '_index': index,\n '_type': doc_type,\n '_id': u(document_id)\n }\n\n meta_action = {\n '_op_type': 'delete',\n '_index': self.meta_index_name,\n '_type': self.meta_type,\n '_id': u(document_id)\n }\n\n self.index(action, meta_action)", "def delete_object(self, object_id):\n if (len(\"%s\" % object_id) == 0):\n raise AlgoliaException(\"object_id is required\")\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % object_id).encode('utf8'), safe='')), self.client.timeout)", "def delete_at_index(self, index: int) -> T:\n pass", "def delete_at_index(self, index: int) -> T:\n pass", "def _delete(self, model_obj):\n conn = self._get_session()\n\n try:\n model_obj.delete(\n index=self.model_cls._index._name,\n using=conn,\n refresh=True,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def document_delete(document_id):\n\n log(session['login'], 'deleted', 'document {}'.format(document_id))\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n db.session.delete(doc)\n db.session.commit()\n return redirect(request.referrer)", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def delete(self):\n self.solr.delete(q=self.q)", "def test_delete_document(index_with_documents):\n index = index_with_documents()\n response = index.delete_document(\"500682\")\n assert isinstance(response, TaskInfo)\n assert response.task_uid is not None\n index.wait_for_task(response.task_uid)\n with pytest.raises(Exception):\n index.get_document(\"500682\")", "def update_index(self, document):\n\t\tix = self.get_index()\n\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, document[self.id])\n\t\t\twriter.add_document(**document)\n\t\t\twriter.commit(optimize=True)", "def trigger_delete(cls, instance):\n es_client.delete(instance.blog.index_name(), 'blog_post_index', instance.id)", "def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)", "def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)" ]
[ "0.7964796", "0.78938943", "0.73939353", "0.7126827", "0.7060756", "0.70206016", "0.69494325", "0.68832034", "0.6852863", "0.6832469", "0.6787975", "0.6762144", "0.6753757", "0.67328316", "0.6688438", "0.6654373", "0.66286594", "0.66176105", "0.6594214", "0.6585247", "0.6585247", "0.6577534", "0.6563415", "0.6535019", "0.6516708", "0.65090716", "0.6477987", "0.6458015", "0.643729", "0.643729" ]
0.82214576
0
Initialize all models with statistics about cases. It can either receive an explicit data frame with cases/deaths statistics, a callable object that receives a model and return the desired cases. If none of these are passed, it assumes that the cases should be initialized from the region.
def init_cases(self: T, data=None, regions=None, raises=True, **kwargs) -> T: kwargs["regions"] = {} if regions is None else regions kwargs.setdefault("real", True) for i, report in enumerate(self._reports): print(i, report.model.region.id, report.model.region.name) call_safe_if(raises, report, report.init_cases, data, **kwargs) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_case_metrics(self) -> None:\n for case in self.cases:\n graph_distance, time_distance = extract_case_distances(self.process_model_graph, case)\n case.graph_distance = graph_distance\n case.time_distance = time_distance", "def setUp(self):\n self.df_casesrecord = pd.DataFrame(\n {\n \"date\": [\n date(2020, 4, 9),\n date(2020, 4, 9),\n date(2020, 4, 10),\n date(2020, 4, 10),\n date(2020, 4, 11),\n date(2020, 4, 11),\n ],\n \"iso_code\": [\"FRA\", \"GBR\", \"FRA\", \"GBR\", \"FRA\", \"GBR\"],\n \"area\": [547557, 241930, 547557, 241930, 547557, 241930],\n \"population\": [\n 67059887,\n 66834405,\n 67059887,\n 66834405,\n 67059887,\n 66834405,\n ],\n \"weekly_avg_cases\": [7000, 4800, 7130, 4400, 7299, 4250],\n }\n )\n self.df_knotdateset = pd.DataFrame(\n {\n \"growth_factor_0_1\": [1.28, 1.29, 1.25, 1.25],\n \"growth_factor_1_2\": [1.1, 1.1, 1.1, 1.05],\n \"growth_factor_2_3\": [0.9, 0.9, 0.95, 0.95],\n \"iso_code\": [\"FRA\", \"FRA\", \"GBR\", \"GBR\"],\n \"knot_date_1\": [\n date(2020, 3, 15),\n date(2020, 3, 15),\n date(2020, 3, 19),\n date(2020, 3, 21),\n ],\n \"knot_date_2\": [\n date(2020, 4, 4),\n date(2020, 4, 5),\n date(2020, 4, 4),\n date(2020, 4, 18),\n ],\n \"weight\": [24, 8, 14, 12],\n }\n )\n self.df_modeldaterange = pd.DataFrame(\n {\n \"initial_date\": [date(2020, 3, 1), date(2020, 3, 2)],\n \"maximum_date\": [date(2020, 6, 8), date(2020, 6, 10)],\n \"first_restrictions_date\": [date(2020, 2, 29), date(2020, 3, 13)],\n \"lockdown_date\": [date(2020, 3, 17), date(2020, 3, 21)],\n \"iso_code\": [\"FRA\", \"GBR\"],\n }\n )\n self.df_possibledateset = pd.DataFrame(\n {\n \"n_days_first_restrictions\": [0, 0, 1, 1, 0, 0, 1, 1],\n \"n_days_lockdown\": [15, 16, 0, 1, 6, 7, 0, 1],\n \"dates_counterfactual_first_restrictions\": [\n date(2020, 2, 29),\n date(2020, 2, 29),\n date(2020, 2, 28),\n date(2020, 2, 28),\n date(2020, 3, 13),\n date(2020, 3, 13),\n date(2020, 3, 12),\n date(2020, 3, 12),\n ],\n \"dates_counterfactual_lockdown\": [\n date(2020, 3, 2),\n date(2020, 3, 1),\n date(2020, 3, 17),\n date(2020, 3, 16),\n date(2020, 3, 15),\n date(2020, 3, 14),\n date(2020, 3, 21),\n date(2020, 3, 20),\n ],\n \"iso_code\": [\"FRA\", \"FRA\", \"FRA\", \"FRA\", \"GBR\", \"GBR\", \"GBR\", \"GBR\"],\n }\n )", "def __init__(self, case_file_path, session_file_path):\n try:\n self.df_cases = pd.read_csv(case_file_path)\n except FileNotFoundError:\n print(\"Case data not found.\")\n\n try:\n self.df_sessions = pd.read_csv(session_file_path)\n except FileNotFoundError:\n print(\"Session data not found\")\n\n self.model = self.create_model()", "def run_models(\n weather_fn: str,\n weather_header_row: int,\n start_date: str,\n start_time: str,\n duration: int,\n selected_models: Dict,\n params_grass: Dict,\n params_mk5: Dict,\n params_vesta: Dict,\n params_vesta_fhr: Dict,\n ) -> Dict:\n start = dt.datetime.now()\n weather_df = get_weather(weather_fn, weather_header_row)\n weather_df = trim_weather(weather_df, start_date, start_time, duration)\n \n\n MODELS = {\n # 'GRASS_Cheney_98': ros_grass_cheney(weather_df, grass_state, grass_curing),\n 'GRASS_Cheney_98': ros_grass_cheney(weather_df, params_grass),\n 'FOREST_Mk5': ros_forest_mk5(weather_df, params_mk5),\n 'FOREST_Vesta': ros_forest_vesta(weather_df, params_vesta),\n 'FOREST_Vesta_FHR': ros_forest_vesta_fhr(weather_df, params_vesta_fhr),\n 'FOREST_Vesta_KT': ros_forest_vesta_kt(weather_df, params_vesta),\n }\n\n model_outputs = {} # model name as key, dataframes as val\n\n models_run = 0\n for key, val in selected_models.items():\n if val:\n model_outputs[key] = MODELS[key]\n models_run += 1\n\n time_elapsed = dt.datetime.now()-start\n print(f'{models_run} models run in {time_elapsed}')\n return model_outputs", "def cases(\n case_id,\n institute,\n reruns,\n finished,\n causatives,\n research_requested,\n rerun_monitor,\n is_research,\n status,\n within_days,\n json,\n):\n adapter = store\n\n models = []\n if case_id:\n case_obj = adapter.case(case_id=case_id)\n if case_obj:\n models.append(case_obj)\n else:\n LOG.info(\"No case with id {}\".format(case_id))\n\n else:\n models = adapter.cases(\n collaborator=institute,\n reruns=reruns,\n rerun_monitor=rerun_monitor,\n finished=finished,\n has_causatives=causatives,\n research_requested=research_requested,\n is_research=is_research,\n status=status,\n within_days=within_days,\n )\n models = [case_obj for case_obj in models]\n if len(models) == 0:\n LOG.info(\"No cases could be found\")\n\n if json:\n click.echo(json_lib.dumps(models, default=jsonconverter))\n return\n\n for model in models:\n pp(model)", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def run_cases(state_data, modifiable_states_ind, acis_state_limits, cases, times, schedule):\n all_dpa_case_results = {}\n all_dea_case_results = {}\n all_psmc_case_results = {}\n all_fp_case_results = {}\n\n dpa_diagnostic_results = {}\n dea_diagnostic_results = {}\n psmc_diagnostic_results = {}\n fp_diagnostic_results = {}\n\n all_dpa_ok = {}\n all_dea_ok = {}\n all_psmc_ok = {}\n all_fp_ok = {}\n\n n = -1\n loop_cases = deepcopy(cases)\n zero_case = cases[0]\n\n max_cases = len(cases)\n\n while len(loop_cases) > 0:\n n = n + 1\n case = loop_cases.pop(0)\n if np.mod(n, 10) == 0:\n print('Running case {} out of {}'.format(n + 1, max_cases))\n\n # Generate new schedule data for CCD and FEP count\n mod_states_ccd_count = deepcopy(state_data['ccd_count'])\n mod_states_ccd_count[modifiable_states_ind] = mod_states_ccd_count[modifiable_states_ind] + np.array(case)\n ccd_count = np.array(list((zip(mod_states_ccd_count, mod_states_ccd_count)))).reshape((-1))\n\n mod_states_fep_count = deepcopy(state_data['fep_count'])\n mod_states_fep_count[modifiable_states_ind] = mod_states_fep_count[modifiable_states_ind] + np.array(case)\n fep_count = np.array(list((zip(mod_states_fep_count, mod_states_fep_count)))).reshape((-1))\n\n schedule['fep_count'] = fep_count\n schedule['ccd_count'] = ccd_count\n\n # Run the new profile\n dpa_case_results = run_profile(times, schedule, '1dpamzt', model_specs['1dpamzt'], model_init['1dpamzt'])\n dea_case_results = run_profile(times, schedule, '1deamzt', model_specs['1deamzt'], model_init['1deamzt'])\n psmc_case_results = run_profile(times, schedule, '1pdeaat', model_specs['1pdeaat'], model_init['1pdeaat'])\n fp_case_results = run_profile(times, schedule, 'fptemp', model_specs['fptemp'], model_init['fptemp'])\n\n # Determine the maximum temperatures for this case\n max_dpa = get_max_dwell_mvals(dpa_case_results['1dpamzt'], state_data)\n max_dea = get_max_dwell_mvals(dea_case_results['1deamzt'], state_data)\n max_psmc = get_max_dwell_mvals(psmc_case_results['1pdeaat'], state_data)\n max_fp = get_max_dwell_mvals(fp_case_results['fptemp'], state_data)\n\n # Store these cases (will delete later if bad)\n all_dpa_case_results[case] = max_dpa\n all_dea_case_results[case] = max_dea\n all_psmc_case_results[case] = max_psmc\n all_fp_case_results[case] = max_fp\n\n # Evaluate the current case against all models\n dpa_ok = evaluate_one_case_for_one_msid(acis_state_limits['1dpamzt'], all_dpa_case_results[zero_case],\n max_dpa)\n dea_ok = evaluate_one_case_for_one_msid(acis_state_limits['1deamzt'], all_dea_case_results[zero_case],\n max_dea)\n psmc_ok = evaluate_one_case_for_one_msid(acis_state_limits['1pdeaat'], all_psmc_case_results[zero_case],\n max_psmc)\n fp_ok = evaluate_one_case_for_one_msid(acis_state_limits['fptemp'], all_fp_case_results[zero_case],\n max_fp)\n all_ok = dpa_ok & dea_ok & psmc_ok & fp_ok\n\n if not np.all(all_ok):\n print('Case {} is bad'.format(case))\n all_dpa_case_results.pop(case)\n all_dea_case_results.pop(case)\n all_psmc_case_results.pop(case)\n all_fp_case_results.pop(case)\n\n first_change = case.index(1)\n\n if all_ok[modifiable_states_ind[first_change]] is not True:\n # Eliminate all other cases that use the failing case\n original_len = len(loop_cases)\n loop_cases = [c for c in loop_cases if c[first_change] != 1]\n new_len = len(loop_cases)\n max_cases = max_cases - (original_len - new_len)\n\n else:\n all_dpa_ok[case] = dpa_ok\n all_dea_ok[case] = dea_ok\n all_psmc_ok[case] = psmc_ok\n all_fp_ok[case] = fp_ok\n\n # Store results for later inspection\n dpa_diagnostic_results[case] = {'times': dpa_case_results['1dpamzt'].times,\n 'mvals': dpa_case_results['1dpamzt'].mvals}\n dea_diagnostic_results[case] = {'times': dea_case_results['1deamzt'].times,\n 'mvals': dea_case_results['1deamzt'].mvals}\n psmc_diagnostic_results[case] = {'times': psmc_case_results['1pdeaat'].times,\n 'mvals': psmc_case_results['1pdeaat'].mvals}\n fp_diagnostic_results[case] = {'times': fp_case_results['fptemp'].times,\n 'mvals': fp_case_results['fptemp'].mvals}\n\n diagnostic_results = {'1dpamzt': dpa_diagnostic_results,\n '1deamzt': dea_diagnostic_results,\n '1pdeaat': psmc_diagnostic_results,\n 'fptemp': fp_diagnostic_results}\n\n case_results = {'1dpamzt': all_dpa_ok,\n '1deamzt': all_dea_ok,\n '1pdeaat': all_psmc_ok,\n 'fptemp': all_fp_ok,\n 'ok_cases': all_dpa_ok.keys()} # Only OK cases are kept for all models\n\n return case_results, diagnostic_results", "def model_performance_summary(df, data_all_df, cov_buck, fisc_calender, mod_start, pred_start, summ_start, summ_end):\n var = \"pv_yr\" \n agg_col = \"LINE_ORDERS\"\n select_cols = [ \"LINE_ORDERS_ACT\", \"Prediction_Trf\", \"LEAD_MNTS_ACT\", \"LD_UNCLAIMED_ACT\", \"LD_DRIVE_ACT\", \n \"IDV_LEAD_MNTS_Pred\", \"IDV_LD_UNCLAIMED_Pred\", \"IDV_LD_DRIVE_Pred\",\n \"IDV_LEAD_MNTS_ACT\", \"IDV_LD_UNCLAIMED_ACT\", \"IDV_LD_DRIVE_ACT\",\n \"HELPER_MNTS_ACT\", \"HP_UNCLAIMED_ACT\", \"HP_DRIVE_ACT\",\n \"IDV_HELPER_MNTS_Pred\", \"IDV_HP_UNCLAIMED_Pred\", \"IDV_HP_DRIVE_Pred\",\n \"IDV_HELPER_MNTS_ACT\", \"IDV_HP_UNCLAIMED_ACT\", \"IDV_HP_DRIVE_ACT\",\n \"HELPER_OVR_ACT\", \"HPN_UNCLAIMED_ACT\", \"HPN_DRIVE_ACT\",\n \"IDV_HELPER_OVR_Pred\", \"IDV_HPN_UNCLAIMED_Pred\", \"IDV_HPN_DRIVE_Pred\",\n \"IDV_HELPER_OVR_ACT\", \"IDV_HPN_UNCLAIMED_ACT\", \"IDV_HPN_DRIVE_ACT\"\n ]\n\n for i in select_cols:\n if str(i) not in data_all_df.columns:\n data_all_df[str(i)] = np.nan\n\n cov_grp_mm = dp.cov_check(\"RSS_MM\", df, cov_buck, agg_col, fisc_calender, summ_start, summ_end)\n cov_mm = dp.cov_check(\"MM\", df, cov_buck, agg_col, fisc_calender, summ_start, summ_end)\n \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n sample = pd.merge(sample, cov_grp_mm[[\"RSS_MM\", \"CAT\"]])\n sample.CAT = sample.CAT.astype(int)\n sample[\"MAPE\"] = abs(sample[\"LINE_ORDERS_ACT\"]-sample[\"Prediction_Trf\"])/sample[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n sample[\"MAPE_BIN\"] = pd.cut(sample['MAPE'], bins=bins, labels=labels)\n wk_list = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID >= pred_start].FISC_WK_OF_MTH_ID.unique()\n ly_start = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID == wk_list[4]].LY_FISC_WK_OF_MTH_ID.values\n ly_end = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID == wk_list[16]].LY_FISC_WK_OF_MTH_ID.values\n train = sample[((sample.FISC_WK_OF_MTH_ID >= mod_start) & (sample.FISC_WK_OF_MTH_ID < pred_start))]\n test_in_train = sample[((sample.FISC_WK_OF_MTH_ID >= ly_start[0]) & (sample.FISC_WK_OF_MTH_ID <= ly_end[0]))]\n # The 4 th index of the list has the prediction start date after excluding the 4 week hold out period\n # for analysis purpose we are considereing only 13 weeks of test period\n test = sample[((sample.FISC_WK_OF_MTH_ID >= wk_list[4]) & (sample.FISC_WK_OF_MTH_ID <= wk_list[16]))]\n fy20 = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n \n ####################################################################################\n \n df_grp_summary = fy20.groupby([\"RSS\"]).sum()[\"LINE_ORDERS_ACT\"].reset_index()\n df_grp_summary[\"LINE_ORDERS_PERC\"] = df_grp_summary[\"LINE_ORDERS_ACT\"]/df_grp_summary[\"LINE_ORDERS_ACT\"].sum()\n ####################################################################################\n \n df_sample_list = []\n df_sample_list = cu.mape_stability(train, \"Train CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(test_in_train, \"Test_in_train CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(test, \"Test CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(fy20, \"FY20 CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n stability = pd.concat(df_sample_list)\n stability.reset_index(inplace = True)\n ####################################################################################\n \n df_sample_res = cu.mape_stability_2d(fy20, \"FISC_MTH_NBR\", \"CAT\")\n grp_sum = fy20.groupby(\"FISC_MTH_NBR\").sum()[\"LINE_ORDERS_ACT\"].reset_index()\n monthly_stability_check = df_sample_res.merge(grp_sum)\n ####################################################################################\n \n mape_cov = cu.mape_stability_2d(fy20, \"CAT\", \"MAPE_BIN\")\n ####################################################################################\n \n mape_grp = cu.mape_stability_2d(fy20, \"RSS\", \"MAPE_BIN\")\n mape_grp[\"TYPE\"] = \"MAPE Buckets\"\n ####################################################################################\n \n mape_cov_grp = cu.mape_stability_2d(fy20, \"RSS\", \"CAT\")\n mape_cov_grp[\"TYPE\"] = \"Covariance Category\"\n ####################################################################################\n \n fy20 = cu.sh_mape(fy20)\n \n sh_summary = []\n for grp in fy20[\"RSS\"].unique():\n sample_df = fy20[fy20[\"RSS\"] == grp]\n\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_LD_BIN\", \"MAPE_LD\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_HP_BIN\", \"MAPE_HP\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_HPN_BIN\", \"MAPE_HPN\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_summary_grp = pd.concat(sh_summary)\n sh_summary_grp.reset_index(inplace = True)\n\n ####################################################################################\n\n sh_summary = []\n var_list = [\"pv_yr\", \"pv_roll_5wk\"]\n for var in var_list:\n sample_df = data_all_df[data_all_df.Variable_Type == var].groupby([\"FISC_WK_OF_MTH_ID\", \"FISC_YR_NBR\", \"MM\"]).sum()[select_cols].reset_index()\n sample_df = pd.merge(sample_df, cov_mm[[\"MM\", \"CAT\"]])\n sample_df = sample_df[sample_df.CAT>0]\n sample_df = sample_df[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n sample_df = cu.sh_mape(sample_df)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LDA_BIN\", \"MAPE_LD_ACT\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPA_BIN\", \"MAPE_HP_ACT\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPNA_BIN\", \"MAPE_HPN_ACT\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LD_BIN\", \"MAPE_LD\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HP_BIN\", \"MAPE_HP\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPN_BIN\", \"MAPE_HPN\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_mm_summary_df = pd.concat(sh_summary)\n sh_mm_summary_df.reset_index(inplace = True)\n\n ####################################################################################\n\n sh_summary = []\n for var in data_all_df.Variable_Type.unique():\n sample_df = data_all_df[data_all_df.Variable_Type == var].groupby([\"FISC_WK_OF_MTH_ID\", \"FISC_YR_NBR\", \"MM\"]).sum()[select_cols].reset_index()\n sample_df[select_cols] = sample_df[select_cols].replace({0:np.nan})\n sample_df = pd.merge(sample_df, cov_mm[[\"MM\", \"CAT\"]])\n sample_df = sample_df[sample_df.CAT>0]\n sample_df = sample_df[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n sample_df = cu.sh_mape(sample_df)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LDAP_BIN\", \"MAPE_LD_ACT_PRED\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPAP_BIN\", \"MAPE_HP_ACT_PRED\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPNAP_BIN\", \"MAPE_HPN_ACT_PRED\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_prop_summary = pd.concat(sh_summary)\n sh_prop_summary.reset_index(inplace = True)\n \n best_mthd = sh_prop_summary.groupby(['IDENTIFIER']).mean()['WEIGHTED MAPE'].reset_index().sort_values(by = [\"WEIGHTED MAPE\"], ascending = True)\n var_list = best_mthd[\"IDENTIFIER\"][:2].to_list()\n ####################################################################################\n\n sample_df = fy20.copy()\n sample_df = sample_df[sample_df.MM.str.contains(\"HD\")]\n sample_df = sample_df.groupby([\"FISC_WK_OF_MTH_ID\", \"MM\"]).sum()[[\"LINE_ORDERS_ACT\", \"Prediction_Trf\"]].reset_index()\n df_grp_all = sample_df.copy()\n df_grp_all[[\"LINE_ORDERS_ACT\"]] = df_grp_all[[\n \"LINE_ORDERS_ACT\"]].replace({0:np.nan})\n df_grp_all[\"MAPE_TA\"] = abs(df_grp_all[\"LINE_ORDERS_ACT\"]-df_grp_all[\"Prediction_Trf\"])/df_grp_all[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n df_grp_all[\"MAPE_TA_BIN\"] = pd.cut(df_grp_all['MAPE_TA'], bins=bins, labels=labels)\n\n var = \"pv_yr\" \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n \n sample = sample.merge(cov_grp_mm[[\"RSS_MM\", \"CAT\"]])\n sample = sample[sample.CAT > 0]\n sample = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n \n \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n sample = pd.merge(sample, cov_mm[[\"MM\", \"CAT\"]])\n sample = sample[sample.CAT > 0]\n sample = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n fy20_mm = sample.groupby([\"FISC_WK_OF_MTH_ID\", \"MM\", \"CAT\"]).sum()[[\"LINE_ORDERS_ACT\", \"Prediction_Trf\"]].reset_index()\n fy20_mm[[\"LINE_ORDERS_ACT\" ]] = fy20_mm[[\"LINE_ORDERS_ACT\"]].replace({0:np.nan})\n fy20_mm[\"MAPE\"] = abs(fy20_mm[\"LINE_ORDERS_ACT\"]-fy20_mm[\"Prediction_Trf\"])/fy20_mm[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n fy20_mm[\"MAPE_BIN\"] = pd.cut(fy20_mm['MAPE'], bins=bins, labels=labels)\n \n df_sample_list = []\n df_sample_list = cu.mape_stability(fy20, \"FY20 RSS_MM\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(fy20_mm, \"FY20 MM\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_overall_summay = pd.concat(df_sample_list)\n df_overall_summay.reset_index(inplace = True)\n return df_grp_summary, stability, monthly_stability_check, mape_cov, mape_cov_grp, mape_grp, sh_mm_summary_df, sh_summary_grp, sh_prop_summary, df_overall_summay, var_list", "def __init__(self, year=None, month=None, day=None, map_type='Cases'):\n self.covid_df = pd.DataFrame([])\n self.geo_data = pd.DataFrame([])\n self.name_iso2_mapping = {}\n self.countries_centroids = pd.DataFrame([])\n \n try:\n self.date = datetime(year=year, month=month, day=day)\n except:\n print('Invalid/empty date entry (year, month, day take valid int inputs)! Date defaulted to today.')\n self.date = datetime.today()\n \n if self.date > datetime.today():\n print('Can\\'t input future date! Date defaulted to today.')\n self.date = datetime.today()\n \n if map_type not in ['Cases', 'Deaths']:\n sys.exit('Please specify either \"Cases\" or \"Deaths\" as map type!')\n else:\n self.map_type = map_type", "def observations(store, loqusdb, case_obj, variant_obj):\n chrom = variant_obj[\"chromosome\"]\n pos = variant_obj[\"position\"]\n ref = variant_obj[\"reference\"]\n alt = variant_obj[\"alternative\"]\n var_case_id = variant_obj[\"case_id\"]\n var_type = variant_obj.get(\"variant_type\", \"clinical\")\n\n composite_id = \"{0}_{1}_{2}_{3}\".format(chrom, pos, ref, alt)\n variant_query = {\n \"_id\": composite_id,\n \"chrom\": chrom,\n \"end_chrom\": variant_obj.get(\"end_chrom\", chrom),\n \"pos\": pos,\n \"end\": variant_obj[\"end\"],\n \"length\": variant_obj.get(\"length\", 0),\n \"variant_type\": variant_obj.get(\"sub_category\", \"\").upper(),\n \"category\": variant_obj[\"category\"],\n }\n obs_data = loqusdb.get_variant(variant_query) or {}\n if not obs_data:\n LOG.debug(\"Could not find any observations for %s\", composite_id)\n obs_data[\"total\"] = loqusdb.case_count()\n return obs_data\n\n user_institutes_ids = set([inst[\"_id\"] for inst in user_institutes(store, current_user)])\n\n obs_data[\"cases\"] = []\n institute_id = variant_obj[\"institute\"]\n for i, case_id in enumerate(obs_data.get(\"families\", [])):\n if i > 10:\n break\n if case_id == var_case_id:\n continue\n # other case might belong to same institute, collaborators or other institutes\n other_case = store.case(case_id)\n if not other_case:\n # Case could have been removed\n LOG.debug(\"Case %s could not be found in database\", case_id)\n continue\n other_institutes = set([other_case.get(\"owner\")])\n other_institutes.update(set(other_case.get(\"collaborators\", [])))\n\n if user_institutes_ids.isdisjoint(other_institutes):\n # If the user does not have access to the information we skip it\n continue\n document_id = parse_document_id(chrom, str(pos), ref, alt, var_type, case_id)\n other_variant = store.variant(document_id=document_id)\n # If the other variant is not loaded we skip it\n if not other_variant:\n continue\n obs_data[\"cases\"].append(dict(case=other_case, variant=other_variant))\n\n return obs_data", "def generate_stats(city, month, day):\n\n print('-'*40)\n df = load_data(city, month, day)\n\n print(get_human_readable_choice(city, month, day))\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n return df", "def observations(store, loqusdb, case_obj, variant_obj):\n composite_id = (\"{this[chromosome]}_{this[position]}_{this[reference]}_\"\n \"{this[alternative]}\".format(this=variant_obj))\n obs_data = loqusdb.get_variant({'_id': composite_id}) or {}\n obs_data['total'] = loqusdb.case_count()\n\n obs_data['cases'] = []\n institute_id = variant_obj['institute']\n for case_id in obs_data.get('families', []):\n if case_id != variant_obj['case_id'] and case_id.startswith(institute_id):\n other_variant = store.variant(variant_obj['variant_id'], case_id=case_id)\n other_case = store.case(case_id)\n obs_data['cases'].append(dict(case=other_case, variant=other_variant))\n\n return obs_data", "def generate_stats(case: Case, min_train_len=1, max_train_len=10, iterations=1_000):\n one_way_stats: Dict[int, float] = {}\n two_way_stats: Dict[int, float] = {}\n\n for i in tqdm(range(min_train_len, max_train_len + 1)):\n one_way_stats[i] = simulate(i, iterations, one_way_approach, case)\n two_way_stats[i] = simulate(i, iterations, two_ways_approach, case)\n\n return one_way_stats, two_way_stats", "def default_methods(hist, model, base_year, method_choice=None, **kwargs):\n\n if kwargs.get(\"ratio_method\") is None:\n kwargs[\"ratio_method\"] = \"reduce_ratio_2080\"\n if kwargs.get(\"offset_method\") is None:\n kwargs[\"offset_method\"] = \"reduce_offset_2080\"\n if kwargs.get(\"luc_method\") is None:\n kwargs[\"luc_method\"] = \"reduce_offset_2150_cov\"\n if kwargs.get(\"luc_cov_threshold\") is None:\n kwargs[\"luc_cov_threshold\"] = 10\n\n y = str(base_year)\n try:\n h = hist[base_year]\n m = model[base_year]\n except KeyError:\n h = hist[y]\n m = model[y]\n dH = (h - m).abs() / h\n f = h / m\n dM = (model.max(axis=1) - model.min(axis=1)).abs() / model.max(axis=1)\n neg_m = (model < 0).any(axis=1)\n pos_m = (model > 0).any(axis=1)\n zero_m = (model == 0).all(axis=1)\n go_neg = ((model.min(axis=1) - h) < 0).any()\n cov = hist.apply(coeff_of_var, axis=1)\n\n df = pd.DataFrame(\n {\n \"dH\": dH,\n \"f\": f,\n \"dM\": dM,\n \"neg_m\": neg_m,\n \"pos_m\": pos_m,\n \"zero_m\": zero_m,\n \"go_neg\": go_neg,\n \"cov\": cov,\n \"h\": h,\n \"m\": m,\n }\n ).join(model.index.to_frame())\n\n if method_choice is None:\n method_choice = default_method_choice\n\n ret = df.apply(method_choice, axis=1, **kwargs)\n ret.name = \"method\"\n return ret, df", "def prepare_estimation(model_params_init_file_name, lower, upper):\n\n # Read in data and init file sources\n model_params_df = pd.read_pickle(model_params_init_file_name)\n model_params_df[\"lower\"] = lower\n model_params_df[\"upper\"] = upper\n\n return model_params_df", "def __init__(\n self, api,\n ):\n self._api = api\n self._api_response = self._api.find_cases(range=\"all\", sort=[])\n self._all30_dict = {}\n self._all60_dict = {}\n self._all90_dict = {}\n\n self._data_frame_30days = None\n self._data_frame_60days = None\n self._data_frame_90days = None\n self._data_frame_counts = None\n self._dataset = None", "def derive_model_coverage(self) -> None:\n self.get_model_column_description_coverage()\n self.get_model_test_coverage()", "def __init__(self):\n\n self.name = None\n self.summary = None\n self.cases = []", "def __createCovidModelInstance(self, *args, **kwargs):\n try:\n if 'MODEL_TYPE' in kwargs:\n if kwargs['MODEL_TYPE'] == CovidModel.AGGREGATE_CASES_DECEASED:\n covidModel = CovidAggregateTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n \n if kwargs['MODEL_TYPE'] == CovidModel.MONTHLY_CASES_DECEASED:\n covidModel = CovidMonthlyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.PAST_30_DAYS:\n covidModel = CovidDailyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.MESSAGES:\n covidModel = CovidMessages() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n if kwargs['MODEL_TYPE'] == CovidModel.LOCATIONS:\n covidModel = CovidLocationInfo() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n print (\"CovidMessages.__createCovidModelInstance() - did not receive a recognizable model type - no model object instantiated. Args received = \",kwargs)\n return None\n except:\n print (\"CovidMessages.__createCovidModelInstance() - unexpected error: \",sys.exc_info()[0])\n return None", "def fit(self) -> None:\n\n levels = self.levels\n TSs = GetAggregateTS(self.data).aggregate(levels)\n models = {}\n residuals = {}\n fcsts = {}\n for bm in self.baseModels:\n model_name = bm.model_name\n if model_name is None: # only residuals and fcsts are provided\n models[bm.level] = None\n residuals[bm.level] = bm.residuals\n fcsts[bm.level] = bm.fcsts\n else:\n m = BASE_MODELS[model_name](\n data=TSs[bm.level],\n params=bm.model_params,\n )\n m.fit()\n models[bm.level] = m\n self.models = models\n self.info_fcsts = fcsts\n self.info_residuals = residuals", "def run_scenario(self):\n self.initialize_random_map()\n self.visualize_environment('initial')\n self.get_tower_target_coverages()\n self.solve_environment()\n self.visualize_environment('solved')", "def __init__(self, env, allocate, pop, params, unit_info):\n \n # Set up references to other model objects\n self._env = env\n self._allocate = allocate\n self._pop = pop\n self._params = params\n \n # Set up unit information (and add 'HOME' as a possible patient location)\n self.unit_info = unit_info\n self.audit_unit_list =list(unit_info['unit']) + ['HOME']\n self.audit_subunit_list =list(unit_info['subunit']) + ['HOME']\n self.unit_info.set_index('subunit', inplace=True)\n \n # Set up audit DataFrames\n self.patient_cols = [\n 'day', 'negative', 'positive', 'recovered', 'inpatient', 'died', 'total', 'unallocated']\n self.patient_audit = pd.DataFrame(columns=self.patient_cols)\n\n self.displaced_cols = ['day', 'number', 'add_time_min', 'add_time_1Q', 'add_time_median', \n 'add_time_3Q', 'add_time_max', 'add_time_total']\n self.displaced_audit = pd.DataFrame(columns=self.displaced_cols) \n\n self.unit_cols = [\n 'day', 'master_unit', 'subunit', 'negative', 'positive', 'recovered', 'neg+rec', \n 'total', 'negative_shifts', 'positive_shifts']\n self.unit_audit = pd.DataFrame(columns=self.unit_cols)\n \n self.inpatient_cols = ['day', 'master_unit', 'subunit', 'inpatients']\n self.inpatient_audit = pd.DataFrame(columns=self.inpatient_cols)", "def _load_results(self, filename):\n cr = CaseReader(filename)\n case = cr.system_cases.get_case(-1)\n\n loaded_outputs = case.outputs._prom2abs['output']\n for name in loaded_outputs:\n self.outputs[name] = {}\n self.outputs[name]['value'] = case.outputs[name]\n self.outputs[name]['units'] = None\n self.outputs[name]['shape'] = case.outputs[name].shape[1:]\n\n # TODO: Get time, state, and control options from the case metadata\n self.time_options = TimeOptionsDictionary()\n self.state_options = {}\n self.control_options = {}\n\n states = [s.split(':')[-1] for s in loaded_outputs if s.startswith('states:')]\n controls = [s.split(':')[-1] for s in loaded_outputs if s.startswith('controls:')]\n\n for s in states:\n self.state_options[s] = StateOptionsDictionary()\n\n for c in controls:\n self.control_options[c] = ControlOptionsDictionary()", "def setUp(self):\r\n super(CategoryStatsTests, self).setUp()\r\n self.cs_overview = CategoryStats(self.overview_map, [self.overview_dm],\r\n [\"Treatment\", \"DOB\"])", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def setUp(self):\n# shapes = [(10,20,30), (40,50,60), (100,100,100)]\n# dtypes = [np.float32, np.float64, np.complex64, np.complex128] \n# self.cases = [{'shape':s, 'dtype':t} for s in shapes for t in dtypes]\n self.cases = make_test_cases.cases", "def setUp(self):\r\n super(CorrelationStatsTests, self).setUp()\r\n self.cs = CorrelationStats([self.overview_dm, self.overview_dm])", "def transform(self):\n select_columns = ['Province/State','Lat','Long']\n # df = global_cases.copy()\n global_cases = self.collect_case()\n df = global_cases.copy()\n df.drop(select_columns,axis=1, inplace=True)\n df = df[df['Country/Region'].apply(lambda x: x in Africa)].T.reset_index()\n df.columns = df.iloc[0]\n df.rename(columns={'Country/Region':'Date'},inplace=True)\n df.drop([0],axis=0,inplace=True)\n \n df['Date'] = pd.to_datetime(df['Date']).dt.strftime('%m-%d-%Y')\n # sort to have the latest update on top row\n df.sort_values('Date',ascending=False, inplace=True)\n african_cases = df.copy()\n\n return african_cases", "def initialise_health_model(self):\n self.water_container = model.water.WaterReserve(S['reserves-water'])\n self.food_container = model.food.FoodReserve(S['reserves-food'])\n #\n self.people = {}\n for person in 'abcde':\n property_name = 'person-{0}-properties'.format(person) if not self.options.alternate else \\\n 'person-{0}-alt-properties'.format(person)\n self.people[person] = model.person.Person(**S[property_name])", "def get_timing_data(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"get_timing_data\")\n\n # initialize the timing values in the dictionary\n case_dict[\"model_cost\"] = \"undefined\"\n case_dict[\"model_throughput\"] = \"undefined\"\n\n timing_dir = case_dict[\"CASEROOT\"] + \"/timing\"\n last_time = \"\"\n if os.path.exists(timing_dir):\n # check if timing files exists\n timing_file_pattern = \"cesm_timing.\" + case_dict[\"CASE\"]\n last_time = max(\n glob.glob(timing_dir + \"/\" + timing_file_pattern + \".*\"),\n key=os.path.getctime,\n )\n if last_time:\n if \"gz\" in last_time:\n # gunzip file first\n with gzip.open(last_time, \"rb\") as fname:\n file_content = fname.readlines()\n else:\n with open(last_time, \"r\") as fname:\n file_content = fname.readlines()\n\n # search the file content for matching lines\n model_cost = [line for line in file_content if \"Model Cost:\" in line]\n model_throughput = [\n line for line in file_content if \"Model Throughput:\" in line\n ]\n\n case_dict[\"model_cost\"] = \" \".join(model_cost[0].split())\n case_dict[\"model_throughput\"] = \" \".join(model_throughput[0].split())\n\n return case_dict" ]
[ "0.6291318", "0.5654613", "0.54874974", "0.54647356", "0.5348363", "0.5319384", "0.52680993", "0.5202522", "0.5159842", "0.5042738", "0.5006515", "0.49600613", "0.49330205", "0.4911519", "0.48664477", "0.4865999", "0.4861922", "0.48572075", "0.4801666", "0.4765036", "0.47423086", "0.47339323", "0.4722308", "0.47117984", "0.469243", "0.46899062", "0.46861538", "0.4681074", "0.4680222", "0.46452397" ]
0.6120037
1
Initialize R0 from cases data.
def init_R0(self: T, *args, raises=False, **kwargs) -> T: for report in self._reports: call_safe_if(raises, report, report.init_R0, *args, **kwargs) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_from_data(self, data):\n self.data = data\n self.norm_data()", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def __init__(self, RA, DEC, AST, I_data, Q_data, U_data, V_data):\n self.RA = RA\n self.DEC = DEC\n self.AST = AST\n self.I_data = I_data\n self.Q_data = Q_data\n self.U_data = U_data\n self.V_data = V_data\n self.fit_p = None\n self.e_fit_p = None\n self.good_fit = None\n self.bad_reasons =\"\"", "def init_from_test_data(self, data_dir):\n assert(os.path.exists(self.intrinsic_path))\n self.test_data_dir = os.path.join(self.base_dir, data_dir)\n rgb_num = int(open(self.intrinsic_path).readline().rstrip())\n total_cam_num = len(glob.glob(os.path.join(self.test_data_dir, '*')))\n depth_cam_num = total_cam_num - rgb_num\n self.rgb_cam_list = range(rgb_num)\n self.rgb_of_depth_cam_list = range(rgb_num - depth_cam_num, rgb_num)\n self.counter = len(glob.glob(os.path.join(self.test_data_dir, '0', '*.jpg')))\n return self", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def init_step(self,u0):\n\n assert len(self.levels) >=1\n assert len(self.levels[0].u) >=1\n\n # pass u0 to u[0] on the finest level 0\n P = self.levels[0].prob\n self.levels[0].u[0] = P.dtype_u(u0)", "def __init__(self, data):\n\n self.__data = np.array(data, dtype=object)\n\n # Get number of rows / columns\n self.__nrows, self.__ncols = self.__data.shape\n\n # Construct the cells\n grid = []\n for i in range(self.__nrows):\n row = []\n for j in range(self.__ncols):\n dcol = self.__data[i, j]\n if dcol is None:\n row.append(cell(i, j, black=True))\n elif dcol == 0:\n row.append(cell(i, j))\n else:\n bot, rig = dcol\n if bot is not None:\n cs = []\n for ii in range(i + 1, self.__nrows):\n if self.__data[ii, j] != 0:\n break\n cs.append((ii, j))\n bot = (bot, tuple(cs))\n if rig is not None:\n cs = []\n for jj in range(j + 1, self.__ncols):\n if self.__data[i, jj] != 0:\n break\n cs.append((i, jj))\n rig = (rig, tuple(cs))\n row.append(cell(i, j, bottom=bot, right=rig))\n grid.append(row)\n self.__tuple = tuple(tuple(row) for row in grid)", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def test_init_data(self):\n r = self.RNA(\"ucagg\")\n # no longer preserves case\n self.assertEqual(r, \"UCAGG\")", "def __init__(self, x0, r):\n self.x, self.r = x0, r", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def zero(klass):\n return RatTerm(RatNum(0, 1), 0)", "def __init__(self, starting_point=-1):\n self.i_read = starting_point\n self.data = [['fake_chip_id', 'fake_version'],\n [96, 110, 203, 104, 50, 0, 29, 145, 59, 215, 208, 11,\n 232, 38, 42, 255, 249, 255, 172, 38, 10, 216, 189, 16],\n [75],\n [129, 1, 0, 16, 44, 3, 30],\n [76, 60, 128, 129, 49, 128, 94, 120]]", "def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))", "def init_from_data(self, calib_data_dir_name):\n self.dst_dir = os.path.dirname(calib_data_dir_name)\n self.calib_data_dir = calib_data_dir_name\n self.counter = len(glob.glob(os.path.join(self.calib_data_dir, '0', 'cam0', '*.jpg')))\n cam_dirs = sorted(glob.glob(os.path.join(self.dst_dir, 'calib_data/*')))\n self.rgb_cam_list = [int(os.path.basename(e)) for e in cam_dirs if os.path.isdir(e)]\n return self", "def __init__(self, lower, upper):\n\n self.vector = self._initialise(lower, upper)\n self.seeds = 0\n self.year = 0\n self.valid = True", "def reset_initial_seed(self, x0: Dict[str, ArrayType]) -> None:\n self.x0 = self.opt.decision_variables.dict2vec(x0)", "def __init__(self):\r\n # sample ID -> (ref individual count,\r\n # {size -> (estimate, std err, ci_low, ci_high)})\r\n self._data = {}", "def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)", "def initialize(self):\n x0 = [None]*3\n for i in self.free:\n if i=='lengthscale':x0[0]=self.free[i]\n if i=='variance':x0[1]=self.free[i]\n if i=='gstds':x0[2]=self.free[i]\n x0 = [x for x in x0 if x is not None]\n return np.array(x0)", "def test_null_from_data(self):\n # define prior data that R will use\n data = array([[ 4., 6., 5., 4., 3.],\n [ 5., 3., 11., 17., 2.],\n [ 8., 4., 4., 13., 0.],\n [ 0., 0., 9., 8., 10.],\n [ 19., 8., 3., 8., 1.],\n [ 5., 1., 14., 4., 3.],\n [ 2., 7., 3., 4., 0.],\n [ 5., 5., 5., 14., 6.],\n [ 2., 3., 6., 0., 5.],\n [ 6., 1., 1., 2., 2.]])\n Rseed = 0\n tpk = 10\n actual_out = null_from_data(data, tpk, Rseed=Rseed)\n expected_out = array([[ 18., 1., 0., 3., 0.],\n [ 2., 10., 10., 3., 22.],\n [ 1., 8., 4., 1., 12.],\n [ 10., 1., 5., 1., 2.],\n [ 8., 0., 14., 28., 5.],\n [ 5., 16., 3., 11., 0.],\n [ 0., 2., 8., 2., 0.],\n [ 0., 12., 2., 1., 3.],\n [ 0., 0., 6., 2., 5.],\n [ 8., 2., 0., 0., 3.]])\n self.assertEqual(expected_out, actual_out)", "def __init__(self, matlab_data, params: RatDay_Preprocessing_Parameters) -> None:\n self.params = params\n print(\"Reformating data\")\n self.raw_data = self.reformat_data(matlab_data)\n print(\"Cleaning data\")\n self.data = self.clean_recording_data(self.raw_data)\n print(\"Calculating run periods\")\n self.velocity_info = self.calculate_velocity_info()\n print(\"Calculating place fields\")\n np.random.seed(0)\n self.place_field_data = self.calculate_place_fields()\n print(\"DONE\")", "def initialize(self):\n V = np.zeros([len(self.R), 1])\n policy = np.ones([len(self.R), 4]) * 0.25\n return V, policy", "def __init__(self, data=None):\n self.cnt = defaultdict(int)\n if data != None:\n for val in data:\n self + val", "def _init_A(self, Dl, d, Dr, initial, state=0):\n if initial == 'randR':\n return np.array(2 * np.random.rand(Dl, d, Dr) - 1, order='C')\n elif initial == 'randC':\n return np.array((2 * np.random.rand(Dl, d, Dr) - 1) + 1j * (2 * np.random.rand(Dl, d, Dr) - 1), order='C')\n elif initial == 'X':\n A = np.zeros((Dl, d, Dr))\n A[0, :, 0] = 1. / np.sqrt(d)\n return np.array(A, order='C')\n else: # == 'Z'\n A = np.zeros((Dl, d, Dr))\n A[0, state, 0] = 1\n return np.array(A, order='C')", "def __init__(self, m,r,v):\n self.m = m\n self.r = r\n self.v = v\n self.rv = np.array([r,0,0,v])", "def __init__(self, geo_model=None):\n self.rex_bytes = bytearray()\n self.n_bytes = 0\n\n self.data_id = 0\n self.geo_model = geo_model", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Rscanpose, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.Rscanpose is None:\n self.Rscanpose = [0.] * 645\n else:\n self.Rscanpose = [0.] * 645", "def _initialize(self, X, resp, *arg, **kwarg):\n n_samples, _ = X.shape\n\n if self.mv_stat:\n weights, params = _estimate_mv_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n else:\n weights, params = _estimate_1d_stat_parameters(\n self.stat, X, resp) # self.reg_covar\n weights /= n_samples\n\n self.weights_ = (weights if self.weights_init is None\n else self.weights_init)\n self.params_ = params if self.params_init is None else self.params_init", "def __init__(self, one_channel_raw_data, number_of_subsamples,\n number_of_levels):\n self._raw_data = one_channel_raw_data\n self._number_of_subsamples = None\n self._number_of_levels = None\n self._down_sample_factor = None\n self._quantization_factor = None\n self._subsamples = None\n self._quantized_subsamples = None\n\n self._set_number_of_subsamples(number_of_subsamples)\n self._set_number_of_levels(number_of_levels)\n self._compute_quantized_subsamples()" ]
[ "0.59208477", "0.56282413", "0.55791855", "0.55220836", "0.5463353", "0.5461818", "0.54125756", "0.54075485", "0.54050404", "0.5377448", "0.5374158", "0.5368257", "0.53553694", "0.53524685", "0.5322364", "0.53080964", "0.53036493", "0.5297168", "0.5273072", "0.5272792", "0.52609044", "0.52580684", "0.52324545", "0.5212459", "0.51998425", "0.5195873", "0.51861465", "0.5155174", "0.51483226", "0.51323813" ]
0.5742876
1
Create a data frame from simulations by extracting all columns in the given list at the selected times.
def report_time_columns_data( self, columns, times=(7, 15, 30, 60), info=None, dtype=None ) -> pd.DataFrame: locs = [t - self._niter - 1 for t in times] col_names = [*map(col_name, columns)] columns = [*map(to_column, columns)] rows = [] n_times = len(times) n_cols = len(columns) for m in self: m = m.clinical() dates = m.dates[locs] row = [None] * (n_times * n_cols) rows.append(row) for i, col in enumerate(columns): values = col(m, dates) row[i::n_cols] = values index = [m.region.id for m in self] col_tuples = ((x, y) for x in times for y in col_names) col_index = pd.MultiIndex.from_tuples(col_tuples) data = pd.DataFrame(rows, index=index, columns=col_index) if dtype: data = data.astype(dtype) prefix = pd.DataFrame( [[m.R0, m.region.name, m.region.population] for m in self], index=index, columns=[("info", "R0"), ("info", "region.name"), ("info", "region.population")], ) data = pd.concat([prefix, data], axis=1) data.columns = pd.MultiIndex.from_tuples(data.columns) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_results_frames(results_df, times):\n # coherence check:\n for t in times:\n assert results_df['t'].iloc[0] <= t <= results_df['t'].iloc[-1], \\\n 'time={} is outside the results_df range'.format(t)\n\n frames = pd.DataFrame(columns=results_df.columns)\n frames.loc[:, 't'] = times\n ignore_columns = {'t'}\n for col in results_df.columns:\n if col not in ignore_columns:\n vals_at_times = np.interp(times, results_df['t'], results_df[col])\n frames.loc[:, col] = vals_at_times\n return frames", "def timingColumns(self, results):\n \n pass", "def column_select(df,returnList = [\"x\",\"y\"]):\n df = df.sort_values(by = 'frame_id')\n return [ list(df[k]) for k in returnList]", "def request_df(request_list, row_template, col_headers):\n \n rows = [row for request_rows in request_list for row in request_rows.rows]\n outer = []\n \n for i in range(len(rows)):\n inner = []\n \n for j in range(len(row_template)):\n \n inner.append(getattr(rows[i].duals[j], row_template[j]))\n \n outer.append(inner)\n \n return pd.DataFrame(outer, columns=col_headers)", "def load_simulations_from_npy(dirs, file_name, columns, times):\n dfs = []\n for d in dirs:\n df = pd.DataFrame(np.load(os.path.join(d, file_name)),\n columns=columns)\n df['t0'] = np.load(os.path.join(d, times))\n dfs.append(df)\n return dfs", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def report_time_rows_data(self, columns, times=None, dtype=None) -> pd.DataFrame:\n times = [normalize_times(m, times) for m in self]\n index = [(ref, t) for ref, ts in zip(map(self.ref, self), times) for t in ts]\n data = {\"ref\": [ref for ref, _ in index], \"date\": [t for _, t in index]}\n\n for col in columns:\n get_column = to_column(col)\n column_data = []\n\n for i, report, ts in zip(itertools.count(), self._reports, times):\n if report.is_valid:\n values = get_column(report.model.clinical(), ts)\n column_data.extend(values)\n\n data[col_name(col)] = np.array(column_data)\n\n # Prepare result\n out = pd.DataFrame(data)\n if dtype is not None:\n blacklist = (\"ref\", \"date\")\n dtypes = {col: dtype for col in out.columns if col not in blacklist}\n out = out.astype(dtypes)\n return out", "def extract_cols(lst,indexes):\n return (lst[i] for i in indexes)", "def simulation_to_frame_times(simulation, t, traj_cols=['replicate']):\n sim = simulation\n\n # First, get the discrete window\n\n left_lext = sim.groupby(traj_cols)['start_time'].min()\n start_i = np.searchsorted(t, left_lext)\n # start times that searchsort past the end means that the entire trajectory\n # occured *after* all frames were over, so it's unobservable\n t_rnan = np.append(t, np.nan)\n leftmost_frame = pd.Series(t_rnan[start_i], index=left_lext.index)\n right_rext = sim.groupby(traj_cols)['end_time'].max()\n # normally would be \" - 1\", but we insert nan at start of t instead\n # to automagically catch the case that the rightmost wait end searchsorts\n # left of *all* frames, in which case the trajectory is unobservable\n end_i = np.searchsorted(t, right_rext)\n t_lnan = np.insert(t, 0, np.nan)\n rightmost_frame = pd.Series(t_lnan[end_i], index=right_rext.index)\n\n # Second, build the frames df\n\n # exclude useless waits\n sim = sim[sim['start_time'] < t[-1]]\n sim = sim[sim['end_time'] > t[0]]\n states = sim[traj_cols + ['state']].copy()\n start_times = sim['start_time'].values\n states['start_i'] = np.searchsorted(t, start_times)\n frames = states.groupby(traj_cols + ['start_i'])['state'].last()\n frames = frames.reset_index().set_index(traj_cols)\n frames['start_time'] = t[frames['start_i'].values]\n frames['window_start'] = leftmost_frame\n frames['window_end'] = rightmost_frame\n # Before adding the end times we need to remove neighboring wait\n # times that end up corresponding to the same state.\n # HACK: create a special index that tracks which unique trajectory each\n # wait time belongs to, then blindly diff state col, and use the new\n # traj_id col to tell when a lack of diff is due to a redundant wait time\n # (same state as prev one) and when it's just due to a new trajectory.\n traj_id = frames.groupby(traj_cols)['start_time'].first() * 0\n traj_id.iloc[:] = np.arange(len(traj_id))\n frames['traj_id'] = traj_id\n # Need to add a single element start to get size match. First wait time by\n # definition can't be redundant, so add traj_id which will never matches so\n # it can't get marked as _is_redundant.\n frames['traj_id2'] = np.insert(frames['traj_id'].values[:-1], 0, -1)\n states = frames['state'].unique()\n state_id = np.arange(len(states))\n state_map = {s: state_id[i] for i, s in enumerate(states)}\n frames['state_id'] = frames['state'].replace(state_map)\n # insert, so the *second* of the redundant guys is deleted\n state_diff = np.insert(np.diff(frames['state_id']), 0, 1)\n redundant = (state_diff == 0) & (frames['traj_id'] == frames['traj_id2'])\n frames = frames.loc[~redundant]\n # Use the same hack as for the redundant guys, but we need the nan on the\n # other side this time, to match what we'll do to the end time col\n traj_id = frames.groupby(traj_cols)['start_time'].first() * 0\n traj_id.iloc[:] = np.arange(len(traj_id))\n frames['traj_id'] = traj_id\n frames['traj_id2'] = np.append(frames['traj_id'].values[1:], np.nan)\n frames['end_time'] = np.append(frames['start_time'].values[1:], np.nan)\n is_last = frames['traj_id'] != frames['traj_id2']\n frames.loc[is_last, 'end_time'] = frames.loc[is_last, 'window_end']\n # get rid of hacky temporary columns\n del frames['traj_id']\n del frames['traj_id2']\n\n return frames", "def collect_data(default_path, elects_d, widths, lmbdas):\n cols = [\"elect\", \"d\", \"lmbda\", \\\n \"Dx\", \"Dy\", \"Dz\", \"Dxy\", \"Dyz\", \"Dxz\", \"D3d\"]\n index = np.arange(len(lmbdas) * len(widths) * len(elects_d))\n df = DataFrame(index=index, columns=cols)\n\n cnt = 0\n for el in sorted(elects_d.keys()):\n for d in widths:\n for l in lmbdas:\n data = [elects_d[el], d, l]\n fname = \"diffusivity_%s_d%i_l%i.log\" % (el, d, l)\n fpath = default_path + fname\n try:\n f = open(fpath, \"r\").readlines()\n for line in f:\n if \"1d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n if \"2d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n if \"3d in SI\" in line:\n data.extend(np.array(line.split()[3:]).astype(float))\n except FileNotFoundError:\n print(\"File not found: %s.\" % fpath)\n\n if len(data) == 10:\n df.loc[cnt] = data\n cnt += 1\n return df", "def frames_df(task, conditions):\n\n frames_df = pd.DataFrame([])\n\n for subject in subjects:\n for condition in conditions:\n evs = load_evs(subject, task, condition)\n df = pd.DataFrame(evs) # load evs into df\n df['run'] = [0, 1]\n df['subject'] = subject\n df['condition'] = condition\n df['frames'] = condition_frames(evs)\n frames_df = frames_df.append(df, ignore_index=True)\n\n return frames_df", "def calculate_stats(time_list):\n time_array = np.array(time_list)\n\n median = np.median(time_array)\n mean = np.mean(time_array)\n std_dev = np.std(time_array)\n max_time = np.amax(time_array)\n min_time = np.amin(time_array)\n quantile_10 = np.quantile(time_array, 0.1)\n quantile_90 = np.quantile(time_array, 0.9)\n \n basic_key = [\"median\",\"mean\",\"std_dev\",\"min_time\",\"max_time\",\"quantile_10\",\"quantile_90\"]\n basic_value = [median,mean,std_dev,min_time,max_time,quantile_10,quantile_90]\n\n dict_basic = dict(zip(basic_key, basic_value))\n\n \n return pd.DataFrame(dict_basic, index = [0])", "def create_static_features(data: pd.DataFrame, column_names: List[str]) -> pd.DataFrame:\n id_columns = _get_id_columns(data)\n columns_to_select = id_columns + column_names\n return data[columns_to_select]", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def get_exp_columns(self, wanted_exps):\n # Get the dict.\n exp_id_str = get_id_str(self.req_exps)\n table_name = '%s_datatable_%s' % (self.app_label, exp_id_str)\n # Make experiment unique.\n if not db_table_exists(table_name):\n self.create_base_table(table_name)\n column_names = list(get_columnnames(table_name))\n divisor_col = None\n dividend_col = None\n #exp_cols = []\n #potential_columns = []\n # Iterate over all column names, and make a list of those that hold data.\n datacol_pattern = re.compile(r'_\\d+$')\n dividend_pattern = r'_{}$'.format(str(wanted_exps['dividend'].id))\n divisor_pattern = r'_{}$'.format(str(wanted_exps['divisor'].id))\n for column_name in column_names:\n if datacol_pattern.search(column_name) is not None:\n if divisor_col is None:\n if re.search(divisor_pattern, column_name):\n divisor_col = column_name\n if dividend_col is None:\n if re.search(dividend_pattern, column_name):\n dividend_col = column_name\n \n sql = \"SELECT %s, %s FROM %s;\" % (dividend_col, divisor_col, table_name)\n return from_db(sql, fetch_as='tuple')", "def movie_tbl(band,tranges,verbose=0,framesz=0,retries=20):\n\tif verbose:\n\t\tprint_inline('Populating exposure time table.')\n\ttstarts,tstops,exptimes=[],[],[]\n\tfor trange in tranges:\n\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\ttstarts.append(t0)\n\t\t\ttstops.append(t1)\n\t\t\texptimes.append(dbt.compute_exptime(band,[t0,t1],\n\t\t\t\t\t\t\tverbose=verbose,retries=retries))\n\tcol1 = pyfits.Column(name='tstart',format='E',array=np.array(tstarts))\n\tcol2 = pyfits.Column(name='tstop',format='E',array=np.array(tstops))\n\tcol3 = pyfits.Column(name='exptime',format='E',array=np.array(exptimes))\n\tcols = pyfits.ColDefs([col1,col2,col3])\n\ttbl = pyfits.new_table(cols)\n\n\treturn tbl", "def select_columns(data, columns):\n return data.loc[:, columns]", "def select_columns(variables):\n return relevant_raw_data_df[variables]", "def build_matrix(path_screen, nmols, list_models):\r\n df = pd.DataFrame(columns=list_models, index=nmols)\r\n ntarget = os.path.split(path_screen)[1]\r\n df.index.name = ntarget\r\n df = df.fillna(0)\r\n for num_db in os.listdir(path_screen):\r\n for ff in os.listdir(os.path.join(path_screen, num_db)):\r\n if ff.split('.')[0] in list_models:\r\n pscreenfile = os.path.join(path_screen, num_db, ff)\r\n match_compounds = [int(mol.strip()) for mol in open(pscreenfile).readlines()]\r\n for compound in match_compounds:\r\n df.at[compound, ff.split('.')[0]] = 1\r\n df = df.fillna(0)\r\n return df", "def generate_timeseries(data_list, setname=\"MagneticFields\"):\n full_data = TimeSeriesList()\n for seg in sorted(data_list):\n hfile = h5py.File(data_list[seg], \"r\")\n full_data.append(retrieve_data_timeseries(hfile, \"MagneticFields\"))\n hfile.close()\n return full_data", "def get_tap_events(user_id: str, user_session_id: str) -> DataFrame:\n full_df = pd.DataFrame()\n for tap_file in tap_file_names:\n columns = tap_file_important_columns[tap_file]\n data = read_file(user_id, user_session_id, tap_file)\n time_data = pd.DataFrame()\n time_data['Start'] = data[columns[0]]\n time_data['End'] = data[columns[-2]]\n time_data['Type'] = tap_file_to_feature_name[tap_file]\n full_df = pd.concat([full_df, time_data], ignore_index = True)\n return full_df.dropna().sort_values(by = 'Start').reset_index(drop = True)", "def get_data(list_data_tuples):\n \n \n benchmark_symbol=list_data_tuples[0][0]; # First element is the benchmark symbol\n \n #print benchmark_symbol\n \n df=pd.DataFrame(index=list_data_tuples[0][1]['data'].index) # First dataframe index is nothing but date\n \n for tpl in list_data_tuples:\n #print tpl[0]\n df_temp = pd.DataFrame(tpl[1]['data']['Adj. Close'],index=tpl[1]['data'].index)\n df_temp = df_temp.rename(columns={'Adj. Close': tpl[0]}) # tpl[0] is the symbol\n #print df_temp,tpl[0]\n df = df.join(df_temp)\n if tpl[0] == benchmark_symbol: # drop dates SPY did not trade\n df = df.dropna(subset=[benchmark_symbol])\n\n df=df.dropna(axis=0) # This drops any NaN values especially if the stock price has no information\n \n return df", "def create_loadshape_pmult_dataframe_for_simulation(settings: SimulationSettingsModel):\n df = create_loadshape_pmult_dataframe(settings)\n simulation_index = create_datetime_index_from_settings(settings)\n return df.loc[simulation_index]", "def test_8_data_fetching_multiple(self):\n d = {'WorkoutType': 'Running',\\\n 'Minutes': 10.0,\\\n 'CaloriesBurned': 100.9}\n _ = self.fitness.insert_in_database(d, date_time=datetime.utcnow()+timedelta(days=1)+timedelta(minutes=1))\n d = {'WorkoutType': 'Jogging',\\\n 'Minutes': 10.0,\\\n 'CaloriesBurned': 100.9}\n _ = self.fitness.insert_in_database(d, date_time=datetime.utcnow()+timedelta(days=1)+timedelta(minutes=2))\n d = {'WorkoutType': 'Dancing',\\\n 'Minutes': 10.0,\\\n 'CaloriesBurned': 100.9}\n _ = self.fitness.insert_in_database(d, date_time=datetime.utcnow()+timedelta(days=1)+timedelta(minutes=4))\n d1 = date.today() + timedelta(days=1)\n dt1 = datetime(d1.year, d1.month, d1.day) + timedelta(hours=8)\n result, success = self.fitness.get_columns_given_range(dt1,dt1+timedelta(days=1))\n\n self.assertEqual(len(result), 3)\n self.assertTrue(success)\n self.assertEqual(result[0]['WorkoutType'],'Running')\n self.assertEqual(result[1]['WorkoutType'],'Jogging')\n self.assertEqual(result[2]['WorkoutType'],'Dancing')", "def _get_data(self, gas, loc, voltage, speed, trial):\n cols = []\n for g in gas:\n for l in loc:\n try:\n (sub, files) = self._get_sensor_col_files(g, l)\n except OSError as e:\n print('{}\\n Keeping calm and carrying on.'.format(e))\n continue\n for v in voltage:\n for s in speed:\n end = \"_board_setPoint_%s_fan_setPoint_%s_mfc_setPoint_%sppm_p%s\" % (\n self.SensorVoltages[v],\n self.FanSpeeds[s],\n self.GasNames[g],\n self.AltLocs[l])\n filtered = [f.split('/')[-1] for f in files if f.endswith(end)]\n if not filtered:\n if self._args['verbose']:\n print('No valid files found for \"%s\", skipping!' % sub)\n continue\n timeStamp = [filt.split('_', 1)[0] for filt in filtered]\n date = [time.strptime(ts, '%Y%m%d%H%M') for ts in timeStamp]\n date = [time.strftime('%Y-%m-%d %H:%M', d) for d in date]\n filtered = [os.path.join(sub, f) for f in filtered]\n for i, filt in enumerate(filtered):\n j = i + 1\n if j in trial:\n p = os.path.sep.join([self.dataloc_prefix,\n self.data_location,\n filt])\n\n cols.append(SensorColumn(data_location=p,\n gas=self.GasNames[g],\n loc=self.Locs[l],\n voltage=self.SensorVoltages[v],\n speed=self.AltFanSpeeds[s],\n trial=j,\n _args=self._args))\n\n if self._args['verbose']:\n print('\\nSelected %i single trial SensorColumns!' % len(cols))\n return cols", "def extract_columns(tracer_file, columns=None):\n table = pd.DataFrame()\n\n if columns is None:\n columns = tables_config.columns\n\n for column in columns:\n col_low = column.lower()\n table[col_low] = tracer_file[column]\n\n # rescale if needed\n scale = tables_config.column_scales.get(col_low)\n if scale is not None:\n table[col_low] *= scale\n\n return table", "def runSimulationDF(cls, **kwargs):\r\n fittedTS = NamedTimeseries(namedArray=cls.runSimulationArr(**kwargs))\r\n return fittedTS.to_dataframe()", "def test_load_points_times_columns():\n cols = leiap.get_points_times(warn='disable').columns.tolist()\n assert 'search_time' in cols\n assert 'dist' in cols", "def gen_test(self):\n count = 500\n dates = []\n value = []\n now = datetime.datetime.now()\n # print(now)\n for i in range(count):\n some_date = now - datetime.timedelta(\n days=randrange(30),\n hours=randrange(20),\n# minutes=randrange(60),\n# seconds=randrange(60),\n )\n some_value = randint(1, 9999)\n dates.append(some_date.strftime(\"%Y-%m-%d %H:%M:%S\"))\n value.append(some_value)\n d = {'Time':dates, 'Value':value}\n return pd.DataFrame(d)", "def times_by_station(self, data, cols):\r\n times = pd.DataFrame(columns=['Id', 'times', 'station'])\r\n # every dictionary has a key value pair therefore for each key value pair we want to update the value\r\n for c in range(1,len(cols)):\r\n x = np.array([cols[c].split('_')[1]])\r\n d = {'Id': data.iloc[:, 0], 'times': data.loc[:,cols[c]], 'station': np.repeat(x, len(data.loc[:, cols[c]]))}\r\n p1 = pd.DataFrame(d)\r\n times = times.append(p1, ignore_index=True)\r\n return times" ]
[ "0.6223679", "0.5822695", "0.54896504", "0.5359699", "0.5312869", "0.52989984", "0.52454346", "0.52427393", "0.52333844", "0.52250195", "0.521591", "0.51858145", "0.516235", "0.51598847", "0.51354957", "0.51323926", "0.5114523", "0.50899047", "0.50888115", "0.50779295", "0.50757307", "0.5059494", "0.5046536", "0.5041101", "0.50371116", "0.5035763", "0.5026208", "0.5020833", "0.5008241", "0.49815878" ]
0.60906285
1
Convert column identifier into a function with signature ``(model, dates) > data`` used to retrieve data from models.
def to_column(col): if callable(col): return col def fn(model, dates): return model[col + ":dates"].loc[dates] return fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _column_picker(attr, objects):\n getter = attr\n if not isinstance(getter, collections.Callable):\n getter = functools.partial(_getter, attr=attr)\n data = list(map(getter, objects))\n return data", "def column_expression(self, col):\n return getattr(func, self.impl.as_binary)(\n func.ST_Transform(col, self.app_srid),\n type_=self.__class__.impl(srid=self.app_srid)\n # srid could also be -1 so that the SRID is deduced from the\n # WKB data\n )", "def _convert_fn(dataset: tf.data.Dataset) -> tf.data.Dataset:\n spec = dataset.element_spec\n if isinstance(spec, collections.abc.Mapping):\n return dataset.map(lambda observation: (observation[\"x\"], observation[\"y\"]))\n else:\n return dataset.map(lambda x, y: (x, y))", "def _column_name_getter(self, obj):\n if is_namedtuple(obj):\n # Use namedtuple fields as columns\n def get(obj):\n return list(obj._fields)\n\n elif is_dict_like(obj):\n # Use dictionary keys as columns\n def get(obj):\n return list(obj.keys())\n\n elif is_list_like(obj):\n # Use either predefined columns, or\n # generate range-based column values\n predefined = list(self._columns)\n\n def get(obj):\n count = len(obj)\n if predefined:\n if count > len(predefined):\n raise ValueError(\n f\"Data had more than defined {len(predefined)} columns\"\n )\n return predefined[:count]\n else:\n return list(range(count))\n\n else:\n # Fallback to single column\n def get(_):\n return self._columns[:1] if self._columns else [0]\n\n return get", "def get_data_column ( self, object ):\n return getattr( object, self.name )", "def create_columns(data_frame, function):\n fields = [\"veh_id\", \"mpr\", \"flow\", \"distance\"]\n data_frame[fields] = data_frame.apply(function, axis=1)\n return data_frame", "def _convert_column(self, col, function):\n col_new = []\n for x in self[col]:\n if x == \"\":\n col_new.append(None)\n else:\n col_new.append(function(x))\n self.df[col] = col_new", "def my_dff_model(a: int, b: int) -> int:\n if b == 1:\n return a\n else:\n return b", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Date()", "def select (a_data,a_column) :\n return a_data[a_column]", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def col_name(function, study_best_params):\n\n # Optuna string of indicator\n function_name = function.split(\"(\")[0].replace(\".\", \"_\")\n\n # Optuna string of parameters\n params = re.sub('[^0-9a-zA-Z_:,]', '', str(study_best_params)).replace(\",\", \"_\").replace(\":\", \"_\")\n\n # Concatenate name and params to define\n col = f\"{function_name}_{params}\"\n return col", "def map(\n self,\n arg: Union[Dict, Callable],\n /,\n na_action: Literal[\"ignore\", None] = None,\n dtype: Optional[dt.DType] = None,\n columns: Optional[List[str]] = None,\n ):\n\n if columns is None:\n return super().map(arg, na_action, dtype)\n self._check_columns(columns)\n\n if len(columns) == 1:\n idx = self._data.type().get_child_idx(columns[0])\n return ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n ).map(arg, na_action, dtype)\n else:\n if not isinstance(arg, dict) and dtype is None:\n (dtype, _) = dt.infer_dype_from_callable_hint(arg)\n dtype = dtype or self._dtype\n\n def func(*x):\n return arg.get(tuple(*x), None) if isinstance(arg, dict) else arg(*x)\n\n cols = []\n for n in columns:\n idx = self._data.type().get_child_idx(n)\n cols.append(\n ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n )\n )\n\n res = Scope.default._EmptyColumn(dtype)\n for i in range(len(self)):\n if self.is_valid_at(i):\n res._append(func(*[col[i] for col in cols]))\n elif na_action is None:\n res._append(func(None))\n else:\n res._append(None)\n return res._finalize()", "def _make_json_converter(self, column_name):\n return lambda x: {column_name: json.loads(x)}", "def dependent_cols():\n\n return ...", "def getDateColumn(self): \n return self.datecol", "def _transform_col(col, val):\n if dict_values(col.types)[0] in ('int', 'real'):\n return col.asnumeric(), float(val)\n\n # for enums, character, etc...\n return col, val", "def table_function(model_cls):\n return \"Events\"", "def df(x):\n raise NotImplementedError", "def separate_date(x):\n x[\"SALE DAY\"] = x.apply(get_day, axis=1)\n x[\"SALE MONTH\"] = x.apply(get_month, axis=1)\n x[\"SALE YEAR\"] = x.apply(get_year, axis=1)", "def _get_metric(df: DataFrame,\r\n prob_mod: pyspark.ml.Model,\r\n metric: str) ->Tuple[DataFrame, str]:\r\n functions_dict = {'probability': _get_probability}\r\n df, metric_col = functions_dict[metric](df, prob_mod)\r\n return df, metric_col", "def datetime_columns(df, feature):\r\n df['day'] = pd.to_datetime(df[feature]).dt.day\r\n df['month'] = pd.to_datetime(df[feature]).dt.month\r\n df['year'] = pd.to_datetime(df[feature]).dt.year\r\n return df", "def get_unc(param, data):\n if param + \"_unc\" in data.colnames:\n return data[param + \"_unc\"].data\n else:\n return None", "def build_model_fn(self):", "def _column_value_getter(self, obj):\n if is_namedtuple(obj):\n # Get values using properties\n def get(obj, column):\n return getattr(obj, column, None)\n\n elif is_dict_like(obj):\n # Get values using dictionary keys\n def get(obj, column):\n return obj.get(column)\n\n elif is_list_like(obj):\n # Get values using list indexes\n def get(obj, column):\n col = self.column_location(column)\n try:\n return obj[col]\n except IndexError:\n return None\n\n else:\n # Fallback to single column\n def get(obj, _):\n return obj\n\n return get", "def __call__(self) -> pd.Series:\n\n converter = {\"datetime\": self._convert_datetime,\n \"int\": self._convert_int,\n \"bool\": self._convert_bool}\n\n func = converter.get(self.parent.dtype, self._convert)\n\n return func()", "def pd_col_myfun(df=None, col=None, pars={}):\n from source.util_feature import save, load\n prefix = 'col_myfun`'\n if 'path_pipeline' in pars : #### Inference time LOAD previous pars\n prepro = load(pars['path_pipeline'] + f\"/{prefix}_model.pkl\" )\n pars = load(pars['path_pipeline'] + f\"/{prefix}_pars.pkl\" )\n pars = {} if pars is None else pars\n #### Do something #################################################################\n df_new = df[col] ### Do nithi\n df_new.columns = [ col + \"_myfun\" for col in df.columns ]\n cols_new = list(df_new.columns)\n\n prepro = None\n pars_new = None\n\n\n\n ###################################################################################\n if 'path_features_store' in pars and 'path_pipeline_export' in pars:\n save(prepro, pars['path_pipeline_export'] + f\"/{prefix}_model.pkl\" )\n save(cols_new, pars['path_pipeline_export'] + f\"/{prefix}.pkl\" )\n save(pars_new, pars['path_pipeline_export'] + f\"/{prefix}_pars.pkl\" )\n\n col_pars = {'prefix' : prefix , 'path' : pars.get('path_pipeline_export', pars.get('path_pipeline', None)) }\n col_pars['cols_new'] = {\n 'col_myfun' : cols_new ### list\n }\n return df_new, col_pars", "def date(self, kind='DataFrame'):\n \"\"\"\n :param kind: \n :return: \n \"\"\"\n if kind.lowe() == 'dataframe':\n return self.df['Fecha']\n elif kind.lower() == 'array':\n return np.array(self.df['Fecha'])\n else:\n print('kind must be equal to: \"DataFrame\" or \"Array\"')", "def _xform_columns(self, columns, xforms):\n new_columns = []\n\n for col in columns:\n if col in xforms:\n if callable(xforms[col]):\n new_columns.append(xforms[col](col))\n else:\n new_columns.append(xforms[col])\n else:\n new_columns.append(col)\n\n return new_columns", "def get_model_function(name: str):\n if name not in REGISTRY:\n names = \", \".join(sorted(REGISTRY.keys()))\n raise KeyError(f\"Model {name} not found in registry. Available names: {names}\")\n return REGISTRY[name]" ]
[ "0.5211079", "0.5170846", "0.5079418", "0.50410134", "0.49328077", "0.4853944", "0.47984892", "0.4774336", "0.47474742", "0.47217572", "0.47154763", "0.46920162", "0.4674512", "0.4672522", "0.46535575", "0.46521583", "0.46270508", "0.46200037", "0.46099943", "0.45872426", "0.4581501", "0.45722646", "0.45626953", "0.4532432", "0.45153093", "0.45047864", "0.45035845", "0.4502593", "0.44901377", "0.44901145" ]
0.7271065
0
Return a column name from its representation.
def col_name(col): if isinstance(col, str): return col return col.__name__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def get_name(self):\n return self.col_name", "def _valid_column(column_name):\n return str(column_name)", "def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")", "def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")", "def column_name(self) -> Optional[str]:\n return pulumi.get(self, \"column_name\")", "def columnName(self):\n return self.__column", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def columnToStr(self, column):\n return unicode(self.__getattribute__(HistoryEntry.COLUMN_STR[column]))", "def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name", "def typed_column(self) -> str:\n\n return \"{}:{}\".format(self.name, self.dtype)", "def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name", "def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)", "def _get_column(self, name):\r\n return self.column(name)", "def format_column(self, column, use_table=False, name=None, table_name=None):\n if name is None:\n name = column.name\n if not getattr(column, 'is_literal', False):\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + self.__generic_obj_format(column, name)\n else:\n return self.__generic_obj_format(column, name)\n else:\n # literal textual elements get stuck into ColumnClause alot, which shouldnt get quoted\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + name\n else:\n return name", "def _get_column(cls, name):\r\n return cls._columns[name]", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)", "def act_on_column_name(self, *, arg, value):\n assert isinstance(arg, (pl.DataFrame, type(None)))\n assert isinstance(value, str)\n return PolarsTerm(polars_term=pl.col(value), is_column=True)", "def column(self):\n return self[\"column\"]", "def _output_imei_column(self):\n if self._generate_check_digit:\n imei_col_name = sql.Identifier('imei_norm_with_check_digit')\n else:\n imei_col_name = sql.Identifier('imei_norm')\n return imei_col_name", "def get_column_name(self) -> str:\n if self.is_shared():\n assert self._shared_id is not None\n return self._shared_id\n else:\n return str(id(self))", "def _label(self, column):\n # XXX\n return column", "def column(self, name: str) -> Column:\n resolved_column = self.resolve_column_name(name)\n return Column(resolved_column)", "def capnp_field_name(self, field):\n name = field.name\n return as_identifier(name)", "def _get_column_from_name(cls, c_name: str):\n\n cls.__class_validation()\n for column in cls.__table__.columns:\n if c_name.lower().strip() == column.name:\n return column\n return None" ]
[ "0.6985993", "0.6985993", "0.6697623", "0.6697423", "0.66891664", "0.6680047", "0.6680047", "0.65051776", "0.6443857", "0.6407808", "0.64052784", "0.63633984", "0.6293007", "0.62478954", "0.6172242", "0.61684376", "0.6145761", "0.6128707", "0.6106663", "0.6092888", "0.60595965", "0.6033694", "0.6018737", "0.60078293", "0.5983832", "0.59232223", "0.5913354", "0.59089637", "0.5904508", "0.5844189" ]
0.7314486
0
Safely call function that involves a report. If function raises an error, log the error with report.log_error() method.
def call_safe(*args, **kwargs): report, func, *args = args if not callable(func): func = getattr(report, func) try: return func(*args, **kwargs) except Exception as ex: msg = f"{type(ex).__name__}: {ex}" report.log_error(msg, code=ex) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report(self) -> computation_base.Computation:\n return self._report_fn", "def report():\n pass", "def ReportError(text):\n raise IOError(text)", "def safe_run(self, function: Callable) -> Callable:\n\n def wrapper(*args, **kwargs) -> Any:\n result = None\n try:\n result = function(*args, **kwargs)\n except BaseException:\n self._new_error(ExceptionInfo(*sys.exc_info()))\n\n self._show_info()\n return result\n\n return wrapper", "def _clean_onerror(func, path, excinfo):\n print(\"%s encountered error when processing %s: %s\" % (func, path, excinfo))", "def report(self) -> Any:", "def safe_call(func):\r\n\r\n @wraps(func)\r\n def _func(*args, **kwargs):\r\n try:\r\n return func(*args, **kwargs)\r\n except GAEError, e:\r\n raise DatabaseError, DatabaseError(str(e)), sys.exc_info()[2]\r\n return _func", "def pytest_runtest_makereport(item, call):\n outcome = yield\n report = outcome.get_result()\n if report.when == \"call\":\n doc = getattr(getattr(item, \"function\", None), \"__doc__\", None)\n item.report_call = ReportResult(report=report, excinfo=call.excinfo, doc=doc)", "def __safe_call(f, a):\n try:\n return f(a)\n except Exception as e:\n return \"{}@{}({})\".format(a.__class__.__name__, id(a), e)", "def does_not_raise(self, function, *args, **kwargs):\n try:\n return function(*args, **kwargs)\n except Exception as e:\n self.log_error(\"{} did raise {}: {}\".format(\n function.__name__,\n type(e).__name__, e\n ), None)", "def safexec(self, func, params):\n try:\n func(params)\n except Exception as error:\n LOG.error(\"Failed to update the gauge, because \"\n \"of: %(error)s\" % {\"error\": error})", "def call_safe(self, callable, args = [], kwargs = {}):\r\n\r\n try:\r\n # calls the provided callable (method) with the\r\n # provided arguments and keyword arguments returning\r\n # the result to the caller method\r\n return callable(*args, **kwargs)\r\n except BaseException as exception:\r\n # in case there's an exception displays a warning\r\n # about the raised exception and the logs the current\r\n # stack so that the exception is traceable\r\n self.warning(exception)\r\n self.log_stack()", "def safe_run(func, args=[], kwargs={}):\n\n try:\n return func(*args, **kwargs)\n\n except Exception as e:\n\n err_msg = '=' * 80 + '\\n'\n err_msg += 'Time: %s\\n' % datetime.datetime.today()\n err_msg += 'Function: %s %s %s\\n' % (func, args, kwargs)\n err_msg += 'Exception: %s\\n' % e\n err_msg += str(traceback.format_exc()) + '\\n\\n\\n'\n\n with _lock:\n sys.stderr.write(err_msg + '\\n')\n log(err_msg)\n\n return _SafeRunError()", "def run(self, func: Callable, args: tuple) -> ExecReport:\n process = self._prepare(func, args)\n time_usage, stats, killed = self._measure(process)\n return self._report(args, time_usage, stats, killed)", "def report_callback(self, object, report, request):\n ...", "def test_log_extra_no_func(test_df):\n with pytest.raises(ValueError) as e:\n\n @log_step_extra()\n def do_nothing(df, *args, **kwargs):\n return df\n\n test_df.pipe(do_nothing)\n\n assert \"log_function\" in str(e)", "def check_errors(func):\n # noinspection PyBroadException\n @wraps(func)\n def new_func(*args, **kwargs):\n \"\"\"Wrapper around function with exception handling\"\"\"\n try:\n return func(*args, **kwargs)\n except Exception:\n import traceback\n\n traceback.print_exc()\n\n return new_func", "def exception_report(storage_backend=LocalErrorStorage(), output_format=\"html\", data_processor=None):\n\n def _exception_reports(func, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, tb = sys.exc_info()\n\n report_location = create_exception_report(exc_type, exc_value, tb, output_format, storage_backend=storage_backend, data_processor=data_processor)\n\n e = append_to_exception_message(e, tb, f\"[report:{report_location}]\")\n setattr(e, \"report\", report_location)\n\n # We want to raise the original exception:\n # 1) with a modified message containing the report location\n # 2) with the original traceback\n # 3) without it showing an extra chained exception because of this handling (`from None` accomplishes this)\n raise e from None\n\n return decorator(_exception_reports)", "def report_full(*args, **kwargs): # real signature unknown\n pass", "def test_log_extra_not_callable_func(test_df):\n with pytest.raises(ValueError) as e:\n\n @log_step_extra(1)\n def do_nothing(df, *args, **kwargs):\n return df\n\n test_df.pipe(do_nothing)\n\n assert \"callable\" in str(e)\n assert \"int\" in str(e)", "def safe_execution(log_message=None, default=None):\r\n if log_message is None:\r\n log_message = \"\"\r\n\r\n def inner_safe_execution(function):\r\n @functools.wraps(function)\r\n def wrap_inner_safe_execution(*args, **kwargs):\r\n try:\r\n return function(*args, **kwargs)\r\n except (KeyError, IndexError, APIException, Exception) as ex:\r\n message = f\"Exception ocurred in {function.__name__}.{log_message}\\nMore info: {ex}\"\r\n log(message, level=\"ERROR\", start=\"\\n\")\r\n return default\r\n\r\n return wrap_inner_safe_execution\r\n\r\n return inner_safe_execution", "def report():\n Robot.report()", "def set_report_callback(self, func, additive = True):\n if additive == True:\n if func is not None:\n old_func = self.report_func\n if old_func is not None:\n def glue(opt):\n old_func(opt)\n func(opt)\n self.report_func = glue\n else:\n self.report_func = func\n else:\n self.report_func = func", "def _check_call(*args, **kwargs):\r\n kwargs['stderr'] = open(devnull, 'w')\r\n return check_call(*args, **kwargs)", "def 报错(自身, func):\n 自身.错误处理 = func\n return func", "def report_error(error_text):\n client = google.cloud.logging.Client()\n logger = client.logger(\"automated_error_catch\")\n logger.log_text(error_text)", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def try_convert(self, func, *args, **kwargs):\n try:\n func(*args, **kwargs)\n except UnitsError, e:\n if self.show_xml_context_only:\n e.show_xml_context_only()\n if self.warn_only:\n e.warn = True\n e.level = logging.WARNING\n logging.getLogger('units-converter').log(e.level, unicode(e).encode('UTF-8'))", "def _reportError(self, failure):\r\n self._connection.reportError(failure.getErrorMessage())", "def __report(arguments, _):\n ignored_packages, other_packages, invalid_packages, skips = __gather_package_data(\n arguments\n )\n\n packages, invalids = worker.report(\n other_packages,\n maximum_repositories=arguments.maximum_repositories,\n maximum_rez_packages=arguments.maximum_rez_packages,\n )\n\n invalids.extend(invalid_packages)\n\n _print_ignored(ignored_packages)\n print(\"\\n\")\n _print_skips(skips, arguments.verbose)\n print(\"\\n\")\n _print_invalids(invalids, arguments.verbose)\n print(\"\\n\")\n _print_missing(packages, arguments.verbose)\n\n sys.exit(0)" ]
[ "0.5956291", "0.59040755", "0.5768061", "0.56814855", "0.5658449", "0.5552167", "0.55508804", "0.5523072", "0.54855865", "0.5481154", "0.5434947", "0.54322094", "0.54240316", "0.5408172", "0.5373893", "0.5319612", "0.5294064", "0.52918303", "0.52637404", "0.5253317", "0.5244302", "0.5241359", "0.52412456", "0.52332693", "0.5213972", "0.5195767", "0.5180423", "0.5160805", "0.5146129", "0.51357716" ]
0.70517135
0
Load calibrations into the database
def load(db): r = db.truncate_table('calibrations') print "Truncated calibrations table" # Allowed columns columns = ['class','asset_uid','start_date','serial','name','value','notes'] # Read in calibration data file_mask = "repos/asset-management/calibration/*" directory_list = glob.glob(file_mask) for directory in directory_list: file_list = glob.glob(directory + '/*.csv') for ifile in file_list: with open(ifile, 'rb') as csvfile: print "Loading file: " + ifile reader = csv.DictReader(csvfile) for row in reader: row['class'] = directory.split('/')[-1] row['asset_uid'] = ifile.split('/')[-1].split('__')[0] row['start_date'] = ifile.split('/')[-1].split('__')[1].split('.')[0] data = remove_extraneous_columns(columns, row) save_cal(db,data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_calibrations(self):\r\n try:\r\n self.calibrations = load_calibrations(os.path.join(os.path.dirname(__file__),\r\n \"..\",\r\n \"calibrations\"))\r\n for key in self.calibrations:\r\n self.ui.calibration_selection.addItem(key,self.calibrations[key])\r\n idx = self.ui.calibration_selection.count()-1\r\n self.ui.calibration_selection.setItemData(idx,\r\n self.calibrations[key][\"tooltip\"],\r\n QtCore.Qt.ToolTipRole)\r\n self.ui.calibration_selection.setCurrentIndex(0)\r\n except (IOError, OSError) as e: \r\n logging.error(\"failure while reading calibration files: \" + \r\n e.strerror+\" at \"+e.filename)", "def init_calibration_databases(inst_lookups=None, procmode=None,\n ucals=None, upload=None):\n # Read the mdf_dict file and create an actual dict with the complete\n # paths to each of the MDF files\n try:\n masks = import_module('.maskdb', inst_lookups)\n mdf_dict = getattr(masks, 'mdf_dict')\n mdf_key = getattr(masks, 'mdf_key')\n except (ImportError, TypeError, AttributeError):\n mdf_dict = None\n mdf_key = None\n else:\n for k, v in mdf_dict.items():\n mdf_dict[k] = path.join(path.dirname(masks.__file__),\n 'MDF', v)\n caldb = UserDB(name=\"manual calibrations\", mdf_dict=mdf_dict,\n mdf_key=mdf_key, user_cals=ucals)\n\n upload_calibs = upload is not None and \"calibs\" in upload\n upload_science = upload is not None and \"science\" in upload\n for cls, db, kwargs in parse_databases():\n kwargs[\"procmode\"] = procmode\n if cls == RemoteDB:\n # Actually storing to a remote DB requires that \"store\" is set in\n # the config *and* the appropriate type is in upload\n kwargs[\"store_science\"] = kwargs[\"store_cal\"] and upload_science\n kwargs[\"store_cal\"] &= upload_calibs\n elif cls == LocalDB:\n kwargs[\"force_init\"] = False\n database = cls(db, name=db, **kwargs)\n caldb.add_database(database)\n return caldb", "def save_cal(db,data):\n id = find_cal(db,data['asset_uid'],data['start_date'],data['name'])\n if id == False:\n res = db.insert('calibrations', data)\n print \"Created calibration: \" +data['asset_uid'] +' Start Date: ' +str(data['start_date']) +' ' +data['name']\n else:\n #data['modified'] = time.strftime('%Y-%m-%d %H:%M:%S')\n res = db.update('calibrations', id, data)\n print \"Updated calibration: \" +data['asset_uid'] +' Start Date: ' +str(data['start_date']) +' ' +data['name']", "def read_calibr_table(self):\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Открыть', '.')[0]\n if filename and filename.lower().endswith('.csv'):\n self.set_calibr_table(filename)\n if self.state.ser:\n self.SpinFine.setEnabled(True)\n self.BtnSetFine.setEnabled(True)\n else:\n error_message(\"Файл не выбран или в формате .csv\")", "def set_calibr_table(self, filename: str):\n try:\n with open(filename, encoding='utf-8') as f:\n data = list(csv.reader(f, delimiter=';'))\n states_dict['L1'][1], states_dict['L2'][1], states_dict['L5'][1] = \\\n float(data[0][0].replace(',', '.')), float(data[0][1].replace(',', '.')), \\\n float(data[0][2].replace(',', '.'))\n for row in data[1:]:\n self.calibr_table[int(row[0])] = float(row[1].replace(',', '.'))\n self.SpinDACValue.setMinimum(min(self.calibr_table.keys()))\n self.SpinDACValue.setMaximum(max(self.calibr_table.keys()))\n except Exception as e:\n print(e)\n self.SpinDACValue.setMaximum(65535)\n self.SpinDACValue.setMinimum(0)\n self.calibr_table = dict()\n self.create_message()", "def load_cal(self):\n global DEBUG, dtParameterDesc\n if DEBUG:\n print(self.__class__.__name__+'.load_cal(): calibration is loaded')\n if 'refatt' in self.parameters and 'refoutpower' in self.parameters:\n self.parameters['refatt'] = dtParameterDesc['refatt']['default']\n self.parameters['refoutpower'] = dtParameterDesc['refoutpower']['default']", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )", "def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]", "def mo_parse_cal(self, filepath):\n\n if not filepath.endswith('.cal'):\n raise Exception(f'Not a .cal filetype.')\n\n with open(filepath) as file:\n data = file.read()\n\n for line in data.splitlines():\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n\n if 'SERIALNO' in key:\n sn = value\n if self.serial != sn:\n raise Exception(f'File serial number {sn} does not match UID {self.uid}')\n\n elif 'CALDATE' in key:\n cal_date = pd.to_datetime(value).strftime('%Y%m%d')\n self.date.update({len(self.date): cal_date})\n\n elif 'INSTRUMENT_TYPE' in key:\n ctd_type = value[-2:]\n if self.ctd_type != ctd_type:\n raise Exception(f'CTD type {ctd_type} does not match uid {self.uid}.')\n\n else:\n if key.startswith('T'):\n key = key.replace('T', '')\n if key.startswith('C') and len(key) == 2:\n key = key.replace('C', '')\n name = self.mo_coefficient_name_map.get(key.lower())\n if name is not None:\n self.coefficients.update({name: value})\n\n # Now we need to add in the range of the sensor\n name = self.mo_coefficient_name_map.get('prange')\n self.coefficients.update({name: '1450'})", "def load_file_data_from_db(self):\n\n file_objs = self.file_queryset.filter(sip=self.sip, removedtime__isnull=True)\n for file_obj in self._batch_query(file_objs):\n self.file_events = get_file_events(file_obj)\n if not self.file_events:\n return\n try:\n # merge the map_file_data dict with the map_av_data\n mapped_file_info = merge_file_data_dicts(\n map_file_data(file_obj, self.file_events), map_av_data(file_obj)\n )\n self.md_info[\"files\"].append(mapped_file_info)\n self.md_info[\"premis:size\"] = create_package_size(\n mapped_file_info[\"premis:size\"]\n )\n self.md_info[\"amount_of_files\"] += 1\n failed_virus_checks = get_failed_virus_checks(self.file_events)\n if failed_virus_checks:\n self.md_info[\"virus_scan_info\"][\"failed_virus_checks\"].append(\n failed_virus_checks\n )\n passed_virus_checks = get_passed_virus_checks(self.file_events)\n # add info virus_scan_tools if they passed and respect\n # different tools and versions if needed.\n if (\n passed_virus_checks\n and passed_virus_checks\n not in self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"]\n ):\n self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"].append(\n passed_virus_checks\n )\n except KeyError:\n logger.info(\n \"File is no longer present on the filesystem: %s\",\n file_obj.currentlocation,\n )\n continue", "def get_associated_calibrations(filename, nbias=5):\n url = f\"https://archive.gemini.edu/calmgr/{filename}\"\n tree = et.parse(urllib.request.urlopen(url))\n root = tree.getroot()\n prefix = root.tag[:root.tag.rfind('}') + 1]\n\n rows = []\n for node in tree.iter(prefix + 'calibration'):\n cal_type = node.find(prefix + 'caltype').text\n cal_filename = node.find(prefix + 'filename').text\n if not ('processed_' in cal_filename or 'specphot' in cal_filename):\n rows.append((cal_filename, cal_type))\n\n tbl = Table(rows=rows, names=['filename', 'caltype'])\n tbl.sort('filename')\n tbl.remove_rows(np.where(tbl['caltype'] == 'bias')[0][nbias:])\n return tbl", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def load_data(self):", "def load_raw_data(apps, schema_editor):\n from season.import_raw_data import InitialDataProcessor\n matches_path = str(BASE_DIR) + '/season/migrations/matches.csv'\n deliveries_path = str(BASE_DIR) + '/season/migrations/deliveries.csv'\n # Initialization path to read data\n load_data = InitialDataProcessor(matches_path=matches_path, deliveries_path=deliveries_path)\n # transform data frame and save the data step by step\n # only support new season import for the first tym when data structure is ready to use\n load_data.transform_input_save()", "def load(self):\n self.data = pd.read_pickle(self.DATE_PKL)\n self.data.index.name = DATE_COL\n\n for hname, h in self.handlers.items():\n print(\"Loading %s\" % hname)\n cur_out = '../'+h.out_path\n df = pd.read_pickle(cur_out).resample('D').ffill() # make daily and forward fill the values\n if hname in self.data.columns:\n # getting to a distinct column:\n i = 2\n while \"%s_%s\" % (hname, i) in self.data.columns:\n i += 1\n print(\"warning: %s was already in the data set, instead we merged new column as %s\" %\n (hname, hname + '_%s' % i))\n self.data = self.data.join(df, how='left', rsuffix=\"_%s\" % i)\n else:\n self.data = self.data.join(df, how='left')", "def read_db_energies( self ):\n for row in self.db.select():\n db_energy = row.get(\"energy\")\n if ( not db_energy is None ):\n self.db_energies.append(db_energy)", "def _read_calibration_data(self):\n #Declare global variables.\n global calT1\n global calT2\n global calT3\n global calP1\n global calP2\n global calP3\n global calP4\n global calP5\n global calP6\n global calP7\n global calP8\n global calP9\n global calP10\n global calH1\n global calH2\n global calH3\n global calH4\n global calH5\n global calH6\n global calH7\n global calGH1\n global calGH2\n global calGH3\n global calResHeatRange\n global calResHeatVal\n global calRangeSwErr\n\n #Temperature calibration.\n calT1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_T1_LSB_REG)\n calT2 = self._read_2bytes_as_short_lsbfirst(self.BME680_T2_LSB_REG)\n calT3 = self._read_register_1sbyte(self.BME680_T3_REG)\n\n #Pressure calibration.\n calP1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_P1_LSB_REG)\n calP2 = self._read_2bytes_as_short_lsbfirst(self.BME680_P2_LSB_REG)\n calP3 = self._read_register_1sbyte(self.BME680_P3_REG)\n calP4 = self._read_2bytes_as_short_lsbfirst(self.BME680_P4_LSB_REG)\n calP5 = self._read_2bytes_as_short_lsbfirst(self.BME680_P5_LSB_REG)\n calP6 = self._read_register_1sbyte(self.BME680_P6_REG)\n calP7 = self._read_register_1sbyte(self.BME680_P7_REG)\n calP8 = self._read_2bytes_as_short_lsbfirst(self.BME680_P8_LSB_REG)\n calP9 = self._read_2bytes_as_short_lsbfirst(self.BME680_P9_LSB_REG)\n calP10 = self._read_register_1ubyte(self.BME680_P10_REG)\n\n #Humidity calibration.\n calH1 = self._read_register_1ubyte(self.BME680_H1_MSB_REG) << 4 | (self._read_register_1ubyte(self.BME680_H1_LSB_REG) & 0x0F)\n calH2 = self._read_register_1ubyte(self.BME680_H2_MSB_REG) << 4 | ((self._read_register_1ubyte(self.BME680_H2_LSB_REG)) >> 4)\n calH3 = self._read_register_1sbyte(self.BME680_H3_REG)\n calH4 = self._read_register_1sbyte(self.BME680_H4_REG)\n calH5 = self._read_register_1sbyte(self.BME680_H5_REG)\n calH6 = self._read_register_1ubyte(self.BME680_H6_REG)\n calH7 = self._read_register_1sbyte(self.BME680_H7_REG)\n\n #Gas calibration.\n calGH1 = self._read_register_1sbyte(self.BME680_GH1_REG)\n calGH2 = self._read_2bytes_as_short_lsbfirst(self.BME680_GH2_LSB_REG)\n calGH3 = self._read_register_1sbyte(self.BME680_GH3_REG)\n\n #Heat calibration.\n calResHeatRange = (self._read_register_1ubyte(self.BME680_RES_HEAT_RANGE) & 0x30) / 16\n calResHeatVal = self._read_register_1sbyte(self.BME680_RES_HEAT_VAL)\n calRangeSwErr = (self._read_register_1sbyte(self.BME680_RANGE_SW_ERR) & 0xF0) / 16", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def load_initial_air_freight_rate(matrix: list):\n print(\"Start loading...\")\n\n # delete all existed records\n models.AirFreightRate.objects.all().delete()\n\n # row index\n index = len(matrix)\n for row in matrix[1:]:\n country = row[0].strip()\n base = row[1].strip()\n rate = row[2]\n danger_rate = row[3]\n t = models.AirFreightRate(\n country=country,\n base=base, \n rate=rate,\n danger_rate=danger_rate \n )\n\n # save models\n t.save()\n\n # return loaded row number\n return index", "def load(input_dir: str) -> None:\n raw_contracts = reader.read_dir(input_dir)\n contracts = tribble.transform.transform(raw_contracts)\n\n LOGGER.info(f'Loading data from {input_dir} in database.')\n loader.load_dataframe(raw_contracts, contract.RawContract)\n loader.load_dataframe(contracts, contract.Contract)\n LOGGER.info('Finished loading data.')", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def load_calibration(self, path: str):\n if not os.path.isdir(path):\n return False, None\n\n filename = os.path.join(path, self._cal_matrix_fn)\n if not os.path.isfile(filename):\n return False, None\n\n return True, pickle.load(open(filename, 'rb'))", "def load_initial_os_rate(matrix: list):\n print(\"Start loading...\")\n\n # delete existed objects\n models.InboundOverseaRate.objects.all().delete()\n\n # reverse mapping of base\n REV_BASE_CHOICE = dict()\n for _i, _s in models.BASE_CHOICE:\n if _i >= 0: # exclude 3rd party\n REV_BASE_CHOICE[_s] = _i\n\n for row in matrix[1:]:\n region = row[0].strip().upper()\n base = REV_BASE_CHOICE[row[1].strip().upper()]\n cc = row[2].strip().upper()\n export_harbor = row[3].strip().upper()\n definition_harbor = row[4].strip().upper()\n os_dm_rate = float(row[5]) if row[5] else None\n cc_rate = float(row[6]) if row[6] else None\n euro_doc_rate = float(row[7]) if row[7] else None\n os_40h_rate = float(row[8]) if row[8] else None\n os_40h_danger_rate = float(row[9]) if row[9] else None\n inter_40h_rate = float(row[10]) if row[10] else None\n inter_40h_danger_rate = float(row[11]) if row[11] else None\n dm_40h_rate = float(row[12]) if row[12] else None\n dm_40h_danger_rate = float(row[13]) if row[13] else None\n delegate = float(row[14]) if row[14] else None\n delegate_danger = float(row[15]) if row[15] else None\n vol_40h = float(row[16]) if row[16] else None\n load_rate = float(row[17]) if row[17] else None\n cpc = float(row[18]) if row[18] else None\n cpc_danger = float(row[19]) if row[19] else None\n\n i = models.InboundOverseaRate(\n region=region,\n base=base,\n cc=cc,\n export_harbor=export_harbor,\n definition_harbor=definition_harbor,\n os_dm_rate=os_dm_rate,\n cc_rate=cc_rate,\n euro_doc_rate=euro_doc_rate,\n os_40h_rate=os_40h_rate,\n os_40h_danger_rate=os_40h_danger_rate,\n inter_40h_rate=inter_40h_rate,\n inter_40h_danger_rate=inter_40h_danger_rate,\n dm_40h_rate=dm_40h_rate,\n dm_40h_danger_rate=dm_40h_danger_rate,\n delegate=delegate,\n delegate_danger=delegate_danger,\n vol_40h=vol_40h,\n load_rate=load_rate,\n cpc=cpc,\n cpc_danger=cpc_danger,\n )\n\n # save models\n i.save()", "def add_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n if 'INTERVENTION' in data.columns:\n data = data[data['INTERVENTION'] == 0]\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)\n self.con.commit()", "def _load_ludb(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 500\n self.lead_match = ['I', 'II', 'III', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n for lead in ['i', 'ii', 'iii', 'avr', 'avl', 'avf', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6']:\n ann_ii = wfdb.rdann(path, extension='atr_{}'.format(lead))\n symbol_1 = ann_ii.symbol\n coords_1 = ann_ii.sample\n if list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't'] and list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't', 'u']:\n print(\"Invalid symbols in ECG annotations.\")\n raise ValueError\n self.symbol.append(symbol_1)\n self.coords.append(coords_1)\n self.label_name = ['(', 'p', ')', '(', 'N', ')', '(', 't', ')']\n self._generate_beatlabel_from_label()", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def load_inputs():\n\n print \"Daily inputs\"\n\n Daily_Input.query.delete()\n\n\n for row in open(\"seed_data/u.input.txt\"):\n row = row.rstrip()\n input_id, date, user_id, sleep, exercise, screen_time, well_being_rating = row.split(\"|\")\n\n date = datetime.strptime(date, \"%m-%d-%y\")\n \n daily_input = Daily_Input(input_id=input_id, date=date, user_id=user_id, sleep=sleep, exercise=exercise, screen_time=screen_time, well_being_rating=well_being_rating)\n db.session.add(daily_input)\n\n db.session.commit()", "def load_budgets():\n\n Budget.query.delete()\n\n with open(budget_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n budget_data = row.split(\",\")\n id = int(budget_data[0])\n budget = budget_data[1]\n category_id = budget_data[2]\n budget_userid = budget_data[3]\n budget_start_date = budget_data[4]\n budget_end_date = budget_data[5]\n\n budget = Budget(\n id = id,\n budget = budget,\n category_id = category_id,\n budget_userid = budget_userid,\n budget_start_date = get_datetime(budget_start_date),\n budget_end_date = get_datetime(budget_end_date)\n )\n\n db.session.add(budget)\n\n db.session.commit()" ]
[ "0.66712004", "0.60240537", "0.5914236", "0.5840371", "0.58154255", "0.576945", "0.5760715", "0.57121146", "0.5657287", "0.54794174", "0.5456236", "0.5436734", "0.5418373", "0.5412157", "0.54064906", "0.5387298", "0.53832376", "0.53711975", "0.53692985", "0.5359947", "0.53554505", "0.53536713", "0.533333", "0.5298514", "0.5290544", "0.5282544", "0.5276827", "0.5272649", "0.52710646", "0.526231" ]
0.7634288
0
Initialize blockchain + open transfers data from a file
def load_data(self): try: with open("blockchain.txt", mode="r") as f: file_content = f.readlines() blockchain = json.loads(file_content[0][:-1]) # OrderedDict updated_blockchain = [] for block in blockchain: converted_transfers = [ Transfer(tx["user"], tx["signature"], tx["amount"]) for tx in block["transfers"] ] # converted_transfers = [OrderedDict( # [('user', tx['user']), ('amount', tx['amount'])]) for tx in block['transfers']] updated_block = Block( block["index"], block["previous_hash"], converted_transfers, block["proof"], block["timestamp"], ) updated_blockchain.append(updated_block) self.__chain = updated_blockchain open_transfers = json.loads(file_content[1][:-1]) # OrderedDict updated_transfers = [] for tx in open_transfers: updated_transfer = Transfer( tx["user"], tx["signature"], tx["amount"] ) # updated_transfer = OrderedDict( # [('user', tx['user']), ('amount', tx['amount'])]) updated_transfers.append(updated_transfer) self.__open_transfers = updated_transfers peer_nodes = json.loads(file_content[2]) self.__peer_nodes = set(peer_nodes) except (IOError, IndexError): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, node_id: str, ledger: Ledger):\n self.ledger = ledger\n self.node_id = node_id\n filename = '../files/blockchain' + node_id\n self.file_path = filename + '.txt'\n self.pickle_path = filename + '.pickle'\n self.blockchain = []\n self.saved_blocks = []\n self.create_or_read_file()", "def create_or_read_file(self):\n # make sure the 'files' directory exists\n if not os.path.isdir('../files'):\n os.mkdir('../files')\n try:\n # try to read in files from disk if they exist\n read_file = open(self.pickle_path, 'rb')\n self.blockchain = pickle.load(read_file)\n read_file.close()\n # print('blockchain loaded from file')\n except FileNotFoundError:\n # if no blockchain exists, initialize one with the genesis block\n self.blockchain = [ # Genesis block! as the first block in the chain the hashes are predetermined.\n Block(\n index=0,\n timestamp=str(datetime.datetime.now()),\n transactions=[]\n )\n ]\n self.write_to_disk()", "def load_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='r') as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]\n converted_chip = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']]\n converted_message = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']]\n updated_block = Block(\n block['index'], block['previous_hash'], converted_tx, converted_chip, converted_message, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n\n open_transactions = json.loads(file_content[1][:-1])\n # need to convert the loaded data because Transactions should use OrderedDict\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n\n open_chipsactions = json.loads(file_content[2][:-1])\n # need to convert the loaded data because Chipsactions should use OrderedDict\n updated_chipsactions = []\n for tx in open_chipsactions:\n updated_chipsaction = Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount'])\n updated_chipsactions.append(updated_chipsaction)\n self.__open_chipsactions = updated_chipsactions\n\n open_messsactions = json.loads(file_content[3][:-1])\n # need to convert the loaded data because Messsactions should use OrderedDict\n updated_messsactions = []\n for tx in open_messsactions:\n updated_messsaction = Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature'])\n updated_messsactions.append(updated_messsaction)\n self.__open_messsactions = updated_messsactions\n\n peer_nodes = json.loads(file_content[4])\n self.__peer_nodes = set(peer_nodes)\n except (IOError, IndexError):\n pass\n finally:\n print('Cleanup!')", "def __init__(self, file_hex) -> None:\n self._file_hex = file_hex\n self._local_file_headers, self._central_dir, self._end_central_dir = self._split_zip_file_into_sections(\n file_hex)\n self.central_directory_list = self._split_central_dir()", "def __init__(self, filename, listfile=True):\n if hasattr(filename, 'read'):\n self.file = filename\n else:\n self.file = open(filename, 'rb')\n self.header = self.read_header()\n self.hash_table = self.read_table('hash')\n self.block_table = self.read_table('block')\n if listfile:\n self.files = self.read_file('(listfile)').splitlines()\n else:\n self.files = None", "def __init__(self, ledger_file, message_file, stats_file):\n\n self.log = logging.getLogger('blockchain')\n self.log.setLevel(logging.DEBUG)\n\n f_handler = logging.FileHandler('miner.log')\n f_handler.setLevel(logging.DEBUG)\n\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.DEBUG)\n\n # Using Matt's log output format for consistency\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n f_handler.setFormatter(formatter)\n con_handler.setFormatter(formatter)\n\n self.log.addHandler(f_handler)\n self.log.addHandler(con_handler)\n\n self.log.warning(\"=========== Blockchain logging started ==========\")\n\n self.keys = Keys(private_key_file=PRIVATE_KEY_FILE,\n pub_key_file=PUBLIC_KEY_FILE,\n key_directory=KEY_DIRECTORY\n )\n\n self.miner_id = str(hashlib.sha256(self.keys.get_main_pub_key()).hexdigest())\n\n self.message_file = message_file\n self.ledger_file = ledger_file\n self.stats_file = stats_file\n self._create_empty_files()\n\n # Use this lock to protect internal data of this class from the\n # multi-threaded server. Wrap code which modifies the blockchain with\n # \"with self.lock:\". Be careful not to nest these contexts or it will\n # cause deadlock.\n self.lock = threading.Lock()\n\n self.blocks = {} # dictionary of Block.hash -> BlockNode\n self.root = None\n self.latest_block = None # BlockNode to mine on\n self.second_longest_chain = None\n self.mined_block = None # latest block mined by this blockchain.\n self.latest_time = 0 # Timestamp of latest_block\n self.last_update = 0\n self.total_blocks = 0 # Total blocks in our blockchain\n self.mining_flag = GIVEN_BLOCK\n self.message_list = [get_collusion_message(self.keys) for _ in range(MSGS_PER_BLOCK)]\n self.max_depth = 0\n self.messages = set()\n self.message_num = 0\n self.last_msg_update = 0\n self.rejects = {}\n\n self.message_queue = MessageQueue()\n\n self._load_saved_ledger()", "def __init__(self, filename, overwrite=False, autocommit=False):\n if overwrite:\n self.file = diskfile.open2(filename, \"w+b\")\n else:\n try:\n self.file = diskfile.open2(filename, \"r+b\")\n except FileNotFoundError:\n self.file = diskfile.open2(filename, \"x+b\")\n self.autocommit = autocommit\n self.revert()", "def __init__(self, public_key, node_id):\n # Our starting block for the blockchain\n genesis_block = Block(0, '', [], [], [], 100, 0)\n # Initializing our (empty) blockchain list\n self.chain = [genesis_block]\n # Unhandled transactions\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = [] \n self.public_key = public_key\n self.__peer_nodes = set()\n self.node_id = node_id\n self.resolve_conflicts = False\n self.load_data()", "def __init__(self, file_name=None):\n self.blocks = []\n self.headers = []\n self.node_block = None\n self.elem_block = None\n self.result_blocks = []\n self.file_name = file_name\n\n if file_name is not None:\n self.load(file_name)", "def new_file():\n if port != '5000':\n values = request.get_json()\n if not values:\n response = {\n 'message': 'No data found'\n }\n return jsonify(response), 400\n if 'name' not in values or 'path' not in values or 'chunk_size' not in values:\n response = {\n 'message': 'Name, path or chunk size was not found'\n }\n return jsonify(response), 400\n \n #get the information to create a new file object\n name = values['name']\n path = values['path']\n chunk_size = values['chunk_size']\n\n #reset the file object\n file.chunks = []\n file.fat = []\n file.file_size = 0\n file.name = name\n file.path = path\n file.chunk_size = int(chunk_size)\n file.owner = str(port)\n if file.isFile():\n fat_file = {\n 'file_name': file.name,\n 'file_owner': file.owner,\n 'fat': []\n }\n hash_table.fat.append(fat_file)\n\n #split the file into chunks of data to distribute between peers\n chunk_hashes = file.split_to_chunks()\n\n MT = MerkleTreeHash()\n root_hash = MT.find_merkle_hash(chunk_hashes)\n\n #make a new blockchain for the file that was crreated\n new_blockchain = Blockchain('', '')\n\n #update some of its properties \n new_blockchain.file_name = file.name\n new_blockchain.file_size = file.file_size\n new_blockchain.chunk_number = len(file.chunks)\n new_blockchain.last_chunk_size = file.file_size%int(chunk_size)\n new_blockchain.root_node = root_hash\n\n\n url = 'http://localhost:5000/new-file'\n try:\n response = requests.post(url, json={\n 'file_name': new_blockchain.file_name,\n 'file_size': new_blockchain.file_size,\n 'chunk_number': new_blockchain.chunk_number,\n 'last_chunk_size': new_blockchain.last_chunk_size,\n 'root_node': new_blockchain.root_node})\n if response.status_code == 400 or response.status_code == 500:\n print('Sending files declined')\n return False\n except requests.exceptions.ConnectionError:\n print('connection exception handled')\n return False\n \n\n url = 'http://localhost:5000/get-dht-size'\n value = requests.get(url)\n json_value = value.json()\n dht_size = json_value['dht_size']\n file.distribute_chunks(int(dht_size))\n\n\n\n #update blockchain files with the new information\n \n\n \n response = {\n 'message': 'File added successfully',\n 'file_name': file.name,\n 'file_size': file.file_size,\n 'file_path': path + name,\n 'number_of_chunks': len(file.chunks),\n 'chunks': file.print_chunks(),\n 'chunk_size': chunk_size,\n 'last_chunk_size': file.file_size%int(chunk_size),\n 'file_creator': 'localhost:' + str(port)\n }\n return jsonify(response), 201\n response = {\n 'message': 'Something went wrong'\n }\n return jsonify(response), 400", "def __init__(self):\n with open('config.json', encoding='UTF-8') as json_data_file:\n self.config = json.load(json_data_file)\n self._get_credential()\n self.file_tree = [{}] * 100", "def init(count, neutrino, uri):\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n click.echo('starting btcd')\n start_btcd()\n \n for index in range(0, count):\n node = Node.from_index(index)\n start_lnd(node, neutrino, uri)\n wait_for_file(node.cert())\n init_lnd(node)\n\n first_node = Node.from_index(0)\n wait_for_file(first_node.macaroon())\n lndconnect_node(first_node)\n \n if count > 1:\n mining_node = Node.from_index(1)\n wait_for_file(mining_node.macaroon())\n _set_mining_node(mining_node)\n time.sleep(4)\n # We need at least 100 blocks because coinbase funds can’t be spent until after 100 \n # confirmations, and we need about 300 to activate segwit.\n _block(400)", "def __init__(self,\n wallet_bin_path,\n datastore_path,\n wallet_password,\n ):\n\n self.wallet_bin_path = wallet_bin_path\n self.datastore_path = datastore_path\n self.wallet_password = wallet_password\n\n self._server = None\n self._accounts = None\n self._block_timestamps = {}\n self.ec = EtherChain()", "def __init__(self, pdb_file):\n self.pdb_file = pdb_file\n self.content = None\n self.lines = None\n self.atom_section = None\n self.conect_section = None\n self.read_all()", "def __init__(self, filepath):\n\t\tself.transactions = list()\n\t\tself.items = set()\n\n\t\ttry:\n\t\t\tlines = [line.strip() for line in open(filepath, \"r\")]\n\t\t\tlines = [line for line in lines if line] # Skipping blank lines\n\t\t\tfor line in lines:\n\t\t\t\ttransaction = list(map(int, line.split(\" \")))\n\t\t\t\tself.transactions.append(transaction)\n\t\t\t\tfor item in transaction:\n\t\t\t\t\tself.items.add(item)\n\t\texcept IOError as e:\n\t\t\tprint(\"Unable to read dataset file!\\n\" + e)", "def __init__(self, file):\n self.file = file\n\n self.env = None\n self.brain_name = None\n self.action_size = None\n self.state_size = None\n\n self.n_agents = None\n self.state = EnvEnum.idle", "def __init__(self, data_file_path):\n self._inventory = Inventory(data_file_path)\n self._baskets = {}", "def lock_init(self, file_path=None, file_hash=None):\n\n with locks_db.atomic() as transaction: # Opens new transaction.\n try:\n self.lock_obj.lock_init(file_path=file_path, file_hash=file_hash)\n except Exception as ex:\n locks_db.rollback()\n raise ex", "def initFromFile(self,file):\n self.source = file\n file_reader = open(file,\"r\")\n self.isInit = True\n lineCounter = 0\n firstLine = None\n SecondLine = None\n ThirdLine = None\n for line in file_reader:\n if(lineCounter == 0):\n firstLine = line.split()\n self.rowsNumber = int(firstLine[0])\n self.columnsNumber = int(firstLine[1])\n self.routerRangeRadius = int(firstLine[2])\n if(lineCounter == 1):\n SecondLine = line.split()\n self.backBoneCosts = int(SecondLine[0])\n Path.backBoneCost = self.backBoneCosts\n self.routerCosts = int(SecondLine[1])\n self.budget = int(SecondLine[2])\n if(lineCounter == 2):\n ThirdLine = line.split()\n self.firstCell = Cell(int(ThirdLine[0]),int(ThirdLine[1]))\n if(lineCounter>2):\n self.map.append([])\n LINE = line\n columnCounter = 0\n for char in LINE:\n temp = Cell(len(self.map)-1,columnCounter,Cell.getCellType(char))\n self.map[len(self.map)-1].append(temp)\n if(temp.cellType == \"FLOOR\"):\n self.notComputeRouter.append(temp)\n columnCounter += 1\n lineCounter +=1\n self.isInit = True", "def __init__(self, networkFile=\"\", demandFile=\"\"):\n self.numNodes = 0\n self.numLinks = 0\n self.numZones = 0\n self.firstThroughNode = 0\n \n self.node = dict()\n self.link = dict()\n self.ODpair = dict()\n self.path = dict()\n\n if len(networkFile) > 0 and len(demandFile) > 0:\n self.readFromFiles(networkFile, demandFile)", "def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}", "def __init__(self, filepath):\n self.transactions = list()\n self.items = set()\n try:\n lines = [line.strip() for line in open(filepath, \"r\")]\n lines = [line for line in lines if line] # Skipping blank lines\n for line in lines:\n transaction = list(map(str, line.split(\" \")))\n self.transactions.append(transaction)\n for item in transaction:\n self.items.add(item)\n\n except IOError as e:\n print(\"Unable to read dataset file!\\n\" + e)", "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' table because we\n # only track file existence, not file contents.)\n self._node_db = _NodeDatabase()\n\n # Start at revision 0 without a root node.\n self._youngest = 0", "def __init__(self):\n\t\t\n\t\tsettings = configparser.SafeConfigParser(allow_no_value=True)\n\t\tlist=settings.read('data/settings.cfg')\n\t\tif not 'data/settings.cfg' in list:\n\t\t\tprint('no configuration file present.. making one')\n\t\t\tself.makeConfigFile(settings)\n\t\t\tshare = ['']\n\t\t\tself.nodes = []\n\t\telse:\n\t\t\tshare, nodes = self.openConfig(settings)\n\t\t\tself.nodes = nodes\n\t\t\n\t\t\n\t\tself.files = self.loadFiles(share)\t\t\n\t\tself.share = share\n\t\tself.kill= False\n\t\tself.downloads = {}\n\t\tself.currentVersion = (0,2,1)\n\t\tself.totalDownloads = 0\n\t\tself.current = 0\n\t\tself.config = settings", "def __init__(self, transactions=None):\n\n self.blocks = []\n if transactions:\n if type(transactions) is not list:\n raise Exception(\"Data must be a list of transactions!\")\n\n for i, tx in enumerate(transactions):\n if i == 0: # Create genesis block\n if not signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n prev_hash = \"0\" # Arbitrary prev_hash for genesis block\n new_block = Block.create_from_transaction(tx, prev_hash)\n self.blocks.append(new_block)\n else:\n if not self.validate_transaction(tx):\n print(\"Transaction is NOT valid.\")\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def __init__(self, *args):\n this = _libSALOME_LifeCycleCORBA.new_SALOME_FileTransferCORBA(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, file):\n\n self.file = file\n self.cmd = None\n self.eof = False\n self.cmd_type = None\n self.cmd_arg1 = None\n self.cmd_arg2 = None\n self.__enter__() # might be a bad idea", "def _load_saved_ledger(self):\n\n with open(self.ledger_file, 'r') as ledger:\n self.log.debug('Loading blocks from local ledger!')\n i = 0\n for block_str in ledger:\n i += 1\n if self._add_block_str(block_str.strip(), False):\n self.log.info(\"Loaded block %d\", i)\n\n # After loading all blocks from file, tell our miner to continue\n self.last_update = self.latest_time\n self.mining_flag = CONTINUE_MINING", "def __init__(self, raw_data_file,):\n self.raw_data_file = raw_data_file\n self.clean_data = self.cleanData()\n self.microtrip_data = []" ]
[ "0.6819916", "0.67035604", "0.66686094", "0.63645846", "0.6158963", "0.60149735", "0.6014652", "0.5977203", "0.59086215", "0.5895487", "0.5874486", "0.5856579", "0.5804675", "0.5782293", "0.57734174", "0.5761372", "0.575894", "0.5711983", "0.5692812", "0.5642049", "0.56329024", "0.55990875", "0.5584735", "0.5561882", "0.556114", "0.5559109", "0.5550441", "0.5540571", "0.55401766", "0.5501707" ]
0.74270946
0
Save blockchain + open transactions snapshot to a file
def save_data(self): try: with open("blockchain.txt", mode="w") as f: dict_chain = [] for block in self.__chain: temp = Block( block.index, block.previous_hash, [tx.__dict__ for tx in block.transfers], block.proof, block.timestamp, ) dict_chain.append(temp.__dict__) f.write(json.dumps(dict_chain)) f.write("\n") dict_open_transfers = [tx.__dict__ for tx in self.__open_transfers] f.write(json.dumps(dict_open_transfers)) f.write("\n") f.write(json.dumps(list(self.__peer_nodes))) except IOError: print("Saving Data failed!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n # save the block object like a dictionary\n saveable_chain = [block.__dict__ for block in \n [Block(block_el.index, block_el.previous_hash, [tx.__dict__ for tx in block_el.transactions] , block_el.proof, block_el.timestamp) \n for block_el in self.__blockchain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_transactions = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_transactions))\n # save the connected nodes\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes))) \n except IOError:\n print('Saving failed')", "def write_to_disk(self):\n text_file = open(self.file_path, \"w\")\n text_file.write(str(self))\n text_file.close()\n # dump to pickle\n pickle.dump(self.blockchain, open(self.pickle_path, \"wb\"))", "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, \n [tx.__dict__ for tx in block_el.transactions], \n [tx.__dict__ for tx in block_el.chipsactions],\n [tx.__dict__ for tx in block_el.messsactions],\n block_el.proof, block_el.timestamp) for block_el in self.__chain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_tx = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_tx))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_chipsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_messsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes)))\n except IOError:\n print('Saving failed!')", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "def saveSnapshot(self, filename): \n\t\tpass", "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('ddos_bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def save_block(block_hash, filename):\n\tr = req.get(\"https://blockchain.info/rawblock/\"+block_hash)\n\twith open(filename, \"wb\") as fp:\n\t\tfp.write(r.content)", "def commit(self, snapshot):\n _, checkpoint_id = snapshot\n self.chaindb.commit(checkpoint_id)", "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)", "def save_snapshot(snapshot_id):\n\n if comm is not None:\n comm.barrier() # if parallel, ensure that we are always in sync, so snapshots are always a consistent set\n\n if ns_args['snapshot_per_parallel_task']:\n rank_id = \"%d\" % rank\n else:\n rank_id = \"ALL\"\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n try:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%s.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n except:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%d.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n\n root_walkers_write_t0 = time.time()\n for at in walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"root walkers write time \", time.time() - root_walkers_write_t0\n\n if not ns_args['snapshot_per_parallel_task']:\n if comm is not None: # gather other walkers to do I/O on master task\n if rank == 0: # I/O on master task\n for r in range(1,size):\n remote_walkers_recv_t0 = time.time()\n remote_walkers = comm.recv(source=r, tag=2)\n print \"save_snapshot remote walkers recv time \", r, time.time() - remote_walkers_recv_t0\n remote_walkers_write_t0 = time.time()\n for at in remote_walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"save_snapshot remote walkers write time \", r, time.time() - remote_walkers_write_t0\n else: # not master task\n comm.send(walkers, dest=0, tag=2)\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n snapshot_io.close()", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def _save_genesis_block(self, genesis_file=GENESIS):\n genesis = json.load(open(genesis_file))\n self.client.bulk_index(docs=genesis, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)", "def __transactions_file(self):\n log.debug(\"Generating __transaction_file\")\n # Retrieve all the transactions\n transactions = self.session.query(db.Transaction).order_by(db.Transaction.transaction_id.asc()).all()\n # Create the file if it doesn't exists\n try:\n with open(f\"transactions_{self.chat.id}.csv\", \"x\"):\n pass\n except IOError:\n pass\n # Write on the previously created file\n with open(f\"transactions_{self.chat.id}.csv\", \"w\") as file:\n # Write an header line\n file.write(f\"UserID;\"\n f\"TransactionValue;\"\n f\"TransactionNotes;\"\n f\"Provider;\"\n f\"ChargeID;\"\n f\"SpecifiedName;\"\n f\"SpecifiedPhone;\"\n f\"SpecifiedEmail;\"\n f\"Refunded?\\n\")\n # For each transaction; write a new line on file\n for transaction in transactions:\n file.write(f\"{transaction.user_id if transaction.user_id is not None else ''};\"\n f\"{transaction.value if transaction.value is not None else ''};\"\n f\"{transaction.notes if transaction.notes is not None else ''};\"\n f\"{transaction.provider if transaction.provider is not None else ''};\"\n f\"{transaction.provider_charge_id if transaction.provider_charge_id is not None else ''};\"\n f\"{transaction.payment_name if transaction.payment_name is not None else ''};\"\n f\"{transaction.payment_phone if transaction.payment_phone is not None else ''};\"\n f\"{transaction.payment_email if transaction.payment_email is not None else ''};\"\n f\"{transaction.refunded if transaction.refunded is not None else ''}\\n\")\n # Describe the file to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"csv_caption\"))\n # Reopen the file for reading\n with open(f\"transactions_{self.chat.id}.csv\") as file:\n # Send the file via a manual request to Telegram\n requests.post(f\"https://api.telegram.org/bot{self.cfg.telegram['token']}/sendDocument\",\n files={\"document\": file},\n params={\"chat_id\": self.chat.id,\n \"parse_mode\": \"HTML\"})\n # Delete the created file\n os.remove(f\"transactions_{self.chat.id}.csv\")", "def _saveState(self, fname=None, save_backup=True):\n if fname is None:\n fname = self.filename\n filepath = Path(fname).resolve()\n\n # it is good to backup this file in caseit exists\n if save_backup:\n if filepath.exists(): # pylint: disable=no-member\n # gets folder/filename.* and transforms into folder/filename_{timestamp}.json\n filepath_backup = Path(filepath).with_name(\n \"{}_{}.json\".format(filepath.stem, timestamp_string()))\n logger.debug(\"Backup %s to %s\", filepath, filepath_backup)\n shutil.copy2(filepath, filepath_backup)\n\n # save to filepath, overwriting\n filepath.touch() # pylint: disable=no-member\n with open(filepath, 'w') as file:\n json_state = self.__toJSON()\n file.write(json.encode(json_state))\n self.__sha256__ = json_state[\"__sha256__\"]\n logger.debug(\"%s's sha: %s\", fname, json_state[\"__sha256__\"])", "def serialize(self, writer: serialization.BinaryWriter) -> None:\n super(Block, self).serialize(writer)\n writer.write_var_int(len(self.transactions) + 1)\n writer.write_serializable(self.consensus_data)\n for tx in self.transactions:\n writer.write_serializable(tx)", "def save_changes(self):\n with open(base_dir + str(self.name) + \".txt\", mode=\"w\") as f:\n f.writelines([str(self.initial_amount)+'\\n', str(self.spent)+'\\n', \" \".join(self.allocs)+'\\n'])\n f.write(\" \".join([str(k) for k in self.alloc_amounts]) + \"\\n\")\n f.write(\" \".join([str(k) for k in self.alloc_spent]) + \"\\n\")\n f.writelines(self.expenditures)\n self.report()", "def saveState(self, file):\n state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True, getIntegratorParameters=True)\n xml = mm.XmlSerializer.serialize(state)\n if isinstance(file, str):\n with open(file, 'w') as f:\n f.write(xml)\n else:\n file.write(xml)", "def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)", "def writeBlocks(self):\n dataFile = open(\"chain.txt\", \"w\")\n chainData = []\n for eachBlock in self.chain:\n chainData.append(eachBlock.__dict__)\n dataFile.write(json.dumps(chainData, indent=4))\n dataFile.close()", "def save_snapshot(epoch, net, optimizer, running_loss, snapshot_name):\n\tstate =\t{\n#\t\t'epoch':\tepoch+1,\t#TODO: support saving epoch later - for continue training\n\t\t'model_state_dict': net.state_dict(), \n\t\t'loss': running_loss,\n\t}\n\tif optimizer:\t#this makes the snapshot much bigger\n\t\tstate.update({'optimizer_state_dict': optimizer.state_dict()})\n\ttorch.save(\n\t\tstate,\n\t\tsnapshot_name+'.pth'\n#\t\tsnapshot_name+str(epoch+1)+'.pth'\t#TODO: encode the epoch we stopped at\n\t)", "def commit(hashes_file, bitcoin_key):\n logentry = LogEntry()\n\n for line in open(hashes_file):\n logentry.add_sha256(unhexlify(line.rstrip()))\n logentry.build()\n\n tx_id = logentry.commit(bitcoin_key)\n\n click.echo(\"Log entry committed as transaction with ID: %s\" % tx_id)", "def save_block(self, dataset, dataset_block):\n dataset_gdf = pd.concat(dataset)\n dataset_gdf[\"original_file\"] = dataset_gdf[\"original_file\"].astype(str)\n\n dataset_gdf.to_file(\n str(self.output_directory / f\"floorplans_{dataset_block}.json\"),\n driver=\"GeoJSON\",\n )", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save(self, f):\n self.f = f\n for region in self.regions:\n ext = region.address & 0xFFFF0000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address = region.address - ext\n for chunk in chunks(region.data):\n if address >= 0x10000:\n ext += 0x10000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address -= 0x10000\n self.write_hex_line(HexLine(address, DATA, chunk))\n address += len(chunk)\n self.write_hex_line(HexLine(0, EOF))", "def create_block_file(blockTxns):\n textfile = open(\"/content/block.txt\", \"w\")\n for element in blockTxns:\n textfile.write(element + \"\\n\")\n textfile. close()", "def upgrade_savefile(fn):\n\n if signing_keys is None:\n return\n\n atime = os.path.getatime(fn)\n mtime = os.path.getmtime(fn)\n\n with zipfile.ZipFile(fn, \"a\") as zf:\n\n if \"signatures\" in zf.namelist():\n return\n\n log = zf.read(\"log\")\n zf.writestr(\"signatures\", sign_data(log))\n\n os.utime(fn, (atime, mtime))", "def save(self, name, params=None, file_object=None):\n # Merge in any provided params\n if params:\n for key in params:\n self.params[key] = params[key]\n if file_object:\n f = file_object\n else:\n f = gzip.open(name, 'wb')\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n self.dirty = False\n nhashes = sum(self.counts)\n # Report the proportion of dropped hashes (overfull table)\n dropped = nhashes - sum(np.minimum(self.depth, self.counts))\n print(\"Saved fprints for\", sum(n is not None for n in self.names),\n \"files (\", nhashes, \"hashes) to\", name,\n \"(%.2f%% dropped)\" % (100.0 * dropped / max(1, nhashes)))", "def serialize(transactions, output_filename):\n x = list()\n y = list()\n\n for transaction in transactions:\n x.append(np.array([\n transaction.amount,\n transaction.sell,\n transaction.asks[0][0],\n transaction.asks[0][1],\n transaction.asks[1][0],\n transaction.asks[1][1],\n transaction.asks[2][0],\n transaction.asks[2][1],\n transaction.asks[3][0],\n transaction.asks[3][1],\n transaction.asks[4][0],\n transaction.asks[4][1],\n transaction.asks[5][0],\n transaction.asks[5][1],\n transaction.asks[6][0],\n transaction.asks[6][1],\n transaction.asks[7][0],\n transaction.asks[7][1],\n transaction.asks[8][0],\n transaction.asks[8][1],\n transaction.asks[9][0],\n transaction.asks[9][1],\n transaction.bids[0][0],\n transaction.bids[0][1],\n transaction.bids[1][0],\n transaction.bids[1][1],\n transaction.bids[2][0],\n transaction.bids[2][1],\n transaction.bids[3][0],\n transaction.bids[3][1],\n transaction.bids[4][0],\n transaction.bids[4][1],\n transaction.bids[5][0],\n transaction.bids[5][1],\n transaction.bids[6][0],\n transaction.bids[6][1],\n transaction.bids[7][0],\n transaction.bids[7][1],\n transaction.bids[8][0],\n transaction.bids[8][1],\n transaction.bids[9][0],\n transaction.bids[9][1],\n transaction.d_high,\n transaction.d_low,\n transaction.d_vwap,\n transaction.d_volume\n ]))\n\n y.append(transaction.price)\n\n savemat(output_filename, dict(x=np.array(x), y=np.array(y)))", "def save_graph(self, widget, data=None):\n\t\t#un po' di pulizia prima di fare il salvataggio\n\t\tos.system(\"find ./extra/MonitorGraph/ -type f -not -name '*.png' | xargs rm -f\")\n\t\tsnapshotFile =\"./extra/UserOutput/Snapshot\"+time.strftime(\"%Y%m%d-%H%M\", time.gmtime())+\".tar\"\n\t\tos.system(\"tar -cf \"+snapshotFile+\" --exclude def* --directory ./extra/ MonitorGraph/\")\n\t\tprint \"Snapshot saved to\",snapshotFile" ]
[ "0.7076406", "0.69661385", "0.69495887", "0.6570535", "0.6515428", "0.6430769", "0.6341623", "0.61975217", "0.61354846", "0.5901512", "0.5844782", "0.57685465", "0.5766446", "0.5762449", "0.5744786", "0.559941", "0.55762947", "0.5561207", "0.5560014", "0.5559062", "0.5533427", "0.5527508", "0.5527042", "0.5518881", "0.55164933", "0.5491218", "0.5484566", "0.5480861", "0.5455488", "0.5452174" ]
0.71053904
0
Generate a proof of work for the open transfers, the hash of the previous block and a random number (which is guessed until it fits).
def proof_of_work(self): last_block = self.__chain[-1] last_hash = hash_block(last_block) proof = 0 # Try different PoW numbers and return the first valid one while not Verification.valid_proof(self.__open_transfers, last_hash, proof): proof += 1 print(proof) return proof
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proof_of_work(self):\n last_block = self.__chain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not Verification.valid_proof(self.__open_transactions, self.__open_chipsactions, self.__open_messsactions, last_hash, proof):\n proof += 1\n return proof", "def proof_of_work(self, last_block_hash):\n transactions_string = self.get_string_of_transactions() # Plaintext will be self.transactions transformed\n nonce = 0\n found = False\n print(\"Starting proof of work with difficulty %s\" % Miner.DIFFICULTY)\n while not found:\n hash_object = hashlib.sha256(str.encode(\n last_block_hash + transactions_string + str(\n nonce))) # b allows to concert string to binary\n complete_hash = hash_object.hexdigest()\n\n if self.is_accepted_by_difficulty(complete_hash):\n found = True\n else:\n nonce += 1\n print(\"Hash found: '%s'\" % complete_hash)\n return complete_hash, nonce", "def proof_of_work(self, block):\n block.nonce = 0 \n computed_hash = block.compute_hash()\n while not computed_hash.starswith('0' * Blockchain.difficulty):\n block.nonce += 1 \n computed_hash = block.compute_hash()", "def proof_of_work(block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('0' * Blockchain.difficulty):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def proof_of_work(self, block, previous_hash):\r\n # Start WIth Nonce = 1\r\n nonce = 1\r\n\r\n # Loop Till You Find A Valid Nonce\r\n check_proof = False\r\n while check_proof is False:\r\n block['nonce'] = nonce\r\n hash_operation = self.hash(block)\r\n if hash_operation[:4] == '0000': # Check if the current_hash fulfills the required condition\r\n check_proof = True # If it does then exit the loop\r\n else:\r\n nonce += 1 # Else try with the next nonce\r\n\r\n return nonce, hash_operation # Return the nonce and the hash that meet the required condition\r", "def proof_of_work(block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not Blockchain.ifsatisfy_diff(computed_hash):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def create_proof_of_work(previous_proof):\n \tproof = previous_proof + 1\n \twhile (proof + previous_proof) % 7 != 0:\n \tproof += 1\n \treturn proof", "def proof_of_work(self, block):\r\n block.nonce = 0\r\n\r\n computed_hash = block.compute_hash()\r\n while not computed_hash.startswith('0' * Blockchain.difficulty):\r\n block.nonce += 1\r\n computed_hash = block.compute_hash()\r\n\r\n return computed_hash", "def proof_of_work(self, block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('0' * Blockchain.difficulty):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def proof_of_work(header: Header) -> Header:\n logger.info(\n \"Mining block for %s version and %s difficulty\",\n header.version,\n header.difficulty,\n )\n while not Verification.valid_nonce(header):\n header.nonce += 1\n\n return header", "def calculate_pow(self):\n self.header['nonce'] = proof_of_work(\n self.header,\n self.header['difficultyTarget'])", "def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)", "def proof_of_work(block):\n while not Blockchain.valid_proof(block):\n block[\"proof\"] += 1", "def find_curve(self):\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break", "def proof_of_work():\n data = request.get_json()\n print(data)\n if data['last_proof'] is None:\n return jsonify({'message':\"You didn't send the proof\"})\n \n start = timer()\n \n print(\"Searching for next proof\")\n proof = 0\n # TODO: Your code here\n while not valid_proof(data, proof):\n proof += 1\n #guess = f'{proof}'.encode()\n #guess_hash = hashlib.sha256(guess).hexdigest()\n print(\"Proof found: \" + str(proof) + \" in \" + str(timer() - start))\n response = {\n \"message\":\"Proof Found\",\n \"proof\":proof,\n \"time\":timer() - start,\n \"last_proof\":data\n }\n return jsonify(response),200", "def get_proof(self, transfer):\n hashlock = transfer.lock.hashlock\n transfer = self.locked[hashlock]\n proof_for = sha3(transfer.lock.as_bytes)\n proof = get_proof(self._cached_lock_hashes, proof_for)\n return proof", "def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def valid_proof(transactions, last_hash, proof):\n # Creates a String containing all the hash inputs.\n guess = (str([transaction.to_ordered_dict() for transaction in transactions]) + str(last_hash) + str(\n proof)).encode()\n # Hashes the String.\n guess_hash = hash_util.hash_string_256(guess)\n # Only a hash based on the above inputs that starts with two 0s is valid for the algorithm.\n # This condition can be changed, but once adding more characters to validate, the more time consuming it is.\n return guess_hash[0:2] == '00'", "def proof_of_work(self, last_proof):\n proof = 0\n while self.valid_proof(last_proof, proof) == False:\n proof += 1\n return proof", "def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)", "def proof_of_work(self, last_proof):\n\n proof = 0\n while self.valid_proof(last_proof, proof) is False:\n proof += 1\n \n return proof", "def mine(self):\n new_block = Block(self.block['timestamp'], self.block['car'],\n self.block['id'])\n # link the block to the previous block\n new_block.previous_hash = self._get_previous_hash()\n while True:\n # get a hash\n new_hash = new_block.get_hash()\n # check hash rules, in our case check if the hash starts with\n # self.difficulty number of zeroes\n if new_hash[0] != self.difficulty * \"0\":\n if self.new_block[\"block\"] is None:\n # the hash hasn't been found yet by any other process,\n # therefore increase the nonce and continue\n # miners will use a different mining mechanism in order\n # to increase the probability of finding a hash by\n # a different miner\n new_block.increment_nonce(self.id + 1)\n continue\n break\n break\n\n # NOTE: May happen that two processes find the hash at the same time,\n # because there is not a big difficulty, however, it's not a problem,\n # for sake of the demo it's fine\n\n if self.new_block[\"block\"] is None:\n # this process has found the hash first\n print(self.id, \" - the winner hash\", new_hash)\n new_block.hash = new_hash\n self.new_block[\"block\"] = new_block\n print(self.id, \" - mined the block\")\n else:\n # validate the block found by other process (miner)\n if self.new_block[\"validated\"] is not False:\n print(self.id, \" - validating\")\n # check block's validity\n valid = False\n if self.new_block[\"block\"].is_block_valid():\n # check blockchain's validity when we apply the newly\n # mined block\n if self.is_blockchain_valid(self.new_block[\"block\"]):\n valid = True\n self.new_block[\"validated\"] = valid\n else:\n # NOTE: this demo doesn't take into account the number of\n # miners who approved the block, the block will be rejected\n # if any of them rejected it\n # but usually just more than 50% of miners must approve\n print(self.id, \" - the block has been rejected by other miner\")", "async def create_wallet(network):\n\n # ask user for desired M and N\n policy = await choose_policy()\n if policy is None: # user canceled selection\n return\n M, N = policy\n\n # Warn user about private key data on-screen\n if not await sensitive_data_warning():\n return\n\n # roll dice > N times\n rolls = await roll_dice()\n if rolls is None:\n return\n rolls_ints = list(map(int, rolls))\n\n computer_entropy = await get_computer_entropy()\n if computer_entropy is None:\n return\n # get additional entropy from os\n dice_entropy = sha256(bytes(rolls_ints)).digest()\n # xor dice & computer entropy to generate wallet xprv\n combined_entropy = bytes([a ^ b for a, b in zip(computer_entropy, dice_entropy)])\n # generate mnemonic from entropy\n Mnem = Mnemonic()\n mnemonic = Mnem.to_mnemonic(combined_entropy)\n seed = Mnem.to_seed(mnemonic)\n xprv = Mnem.to_hd_master_key(seed, network)\n\n ROLLS_ROWS = len(rolls) // ROLLS_PER_ROW\n rolls_str = \"\"\n for i in range(ROLLS_ROWS + 1):\n rolls_str += format_rolls_row(rolls, i, ROLLS_NUM_COLS, ROLLS_PER_ROW, ROLLS_PER_COL)\n\n msg = f\"\"\"Proof Wallet: Create Wallet\n\nPolicy: {M} of {N} (M of N)\n\nDice rolls\\n\n{rolls_str}\nComputer entropy\n{pprint_entropy(computer_entropy)}\n\nMnemonic\n{display_mnemonic(mnemonic)}\n\nIf this is the first computer in the Proof Wallet protocol, you should \\\nenter the above dice rolls and computer-generated entropy into the second machine \\\nto ensure that the same wallet mnemonic phrase is generated. If a different phrase \\\nis generated, you should first try to repeat the process, and if that doesn't work, \\\nabort this process immediately and seek help from a knowledgable party.\n\nIf both mnemonics match, you can proceed to this wallet's menu where you'll be able \\\nto export its extended public key and finalize it by importing cosigner extended public \\\nkeys.\n\nControls\n'x' -- Abort wallet creation process\n[Enter] -- Go to wallet menu\n\"\"\"\n ch = await ux_show_story(msg, ['x', '\\r'])\n if ch == 'x':\n return\n w = Wallet(mnemonic, [], M, N, network)\n WALLETS_GLOBAL.append(w)\n return await wallet_menu(w)", "def new_block(self, proof, previous_hash = None):\n #create a new Block & adds it to the chain.\n \n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp' : time(),\n 'transactions' : self.pending_transactions,\n 'proof' : proof,\n 'previous_hash' : previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.pending_transactions = []\n\n self.chain.append(block)\n return block\n #pass", "def new_block(self, proof, previous_hash=None):\r\n block = {\r\n 'index': len(self.chain) + 1,\r\n 'timestamp': time(),\r\n 'transactions': self.current_transactions,\r\n 'proof': proof,\r\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\r\n }\r\n\r\n # reseta a atual lista de transacoes\r\n self.current_transactions = []\r\n\r\n self.chain.append(block)\r\n return block", "def new_block(self, proof, previous_hash=None):\n \n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n # Add block to existing chain\n self.chain.append(block)\n return block", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions':self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n self.current_transactions = []\n self.chain.append(block)\n return block", "def new_block(self, proof, previous_hash=None):\n\n\t\tblock = {\n\t\t\t'index': len(self.chain) + 1,\n\t\t\t'timestamp': time(),\n\t\t\t'transactions': self.current_transactions,\n\t\t\t'proof': proof,\n\t\t\t'previous_hash': previous_hash or self.hash(self.chain[-1]),\t\t\n\t\t}\n\n\t\t#Reset current list of transactions\n\t\tself.current_transactions = []\n\n\t\tself.chain.append(block)\n\t\treturn block", "def proof_of_work(last_proof):\n incrementor = last_proof + 1\n while not (incrementor % 9 == 0 and incrementor % last_proof == 0):\n incrementor += 1\n\n return incrementor", "def test_proof_of_work_leading_zeros(self):\n proof_assumption = self.blockchain.proof_of_work(self.proof)\n self.assertIsInstance(proof_assumption, int)\n self.assertEqual(proof_assumption, 35293)" ]
[ "0.6767066", "0.66403455", "0.65471375", "0.64007163", "0.6309153", "0.63058364", "0.62753505", "0.6238329", "0.62287176", "0.587455", "0.58743477", "0.58078885", "0.5754991", "0.57458746", "0.5647915", "0.55475503", "0.55412304", "0.5443019", "0.5433007", "0.54295146", "0.5424202", "0.54213655", "0.53924054", "0.53864855", "0.53616637", "0.5355343", "0.5332835", "0.5327399", "0.5314484", "0.53137136" ]
0.7421745
0
Credit points to user. No checks required
def credit_points(self, user, signature, amount=0.0): if self.hosting_node == None: return False transfer = Transfer(user, signature, amount) if not Wallet.verify_transfer(transfer): return False self.__open_transfers.append(transfer) # participants.add(user) self.save_data() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def credit(ctx, *args):\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit", "def points(self, event, user):\n if not self.checkPerms(event, \"mod\"):\n return\n event.msg.delete()\n if not str(user) in self.participants.keys():\n message = \"This user has not participated in the event yet.\"\n else:\n message = \"Points so far for {}: {}\\n\"\n\n points = 0\n for rid, report in self.reported_cards.items():\n if str(user) == report[\"author_id\"] and report[\"status\"] != \"Denied\":\n points += self.config.boards[report[\"board\"]][\"points\"]\n event.msg.reply(message.format(self.participants[str(user)], points))", "def Credit(self):\n self.Deposit()\n self.balance += self.amount\n print \"balance credited\"\n print \" Total balance =\",self.balance\n return self.balance", "def credit(self):\n return pn_link_credit(self._impl)", "def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False", "def credits(self):\r\n return credit.Credits(self)", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "async def admin_credit(self, ctx, target: discord.Member, sum: int = 100):\n if is_registered(target.id):\n \n inventories = get_file(\"inventories\")\n inventories[str(target.id)][\"balance\"] += sum\n update_file(\"inventories\", inventories)\n\n embed = discord.Embed(color=admin_color)\n embed.set_author(name=\"🛠️ Admin\")\n embed.add_field(name=\"💰 Credit\",\n value=f\"{ctx.author.mention}, {target.mention} a été crédité de `{sum}` PO (pièces d'or)\")\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "def supply(request, page_name):\n _ = page_name\n _ = request\n\n todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today())\n\n # Approximate logins by their first points transaction.\n rounds_info = challenge_mgr.get_all_round_info()\n start = rounds_info[\"competition_start\"]\n today = datetime.datetime.today()\n\n users_anno = User.objects.annotate(login_date=Min('pointstransaction__transaction_date'))\n logins = []\n while start <= today:\n result = {}\n result['date'] = start.strftime(\"%m/%d\")\n\n result['logins'] = users_anno.filter(login_date__gte=start,\n login_date__lt=start + datetime.timedelta(days=1)).count()\n logins.append(result)\n start += datetime.timedelta(days=1)\n\n # Find referrals.\n referrals = Profile.objects.filter(referring_user__isnull=False).values(\n 'referring_user__profile__name', 'referring_user__username').annotate(\n referrals=Count('referring_user')\n )\n\n return {\n \"todays_users\": todays_users,\n 'logins': logins,\n \"referrals\": referrals,\n }", "def user_added_credit(self):\n return (self.user.Credit > 0)", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def changeCredit(self,user_ids,credit,changer_admin_name,remote_address,credit_change_comment,loaded_users):\n self.__changeCreditCheckInput(user_ids,credit,changer_admin_name,remote_address,credit_change_comment,loaded_users)\n admin_consumed_credit=credit*len(user_ids)\n ibs_query=IBSQuery()\n ibs_query+=admin_main.getActionManager().consumeDeposit(changer_admin_name,admin_consumed_credit)\n try:\n changer_admin_obj=admin_main.getLoader().getAdminByName(changer_admin_name)\n ibs_query+=self.__changeCreditQuery(user_ids,credit)\n ibs_query+=user_main.getCreditChangeLogActions().logCreditChangeQuery(\"CHANGE_CREDIT\",changer_admin_obj.getAdminID(),user_ids,credit,\\\n admin_consumed_credit,remote_address,credit_change_comment)\n\n ibs_query+=ias_main.getActionsManager().logEvent(\"CHANGE_CREDIT\",changer_admin_name,credit,\",\".join(user_ids))\n\n ibs_query.runQuery()\n except:\n admin_main.getActionManager().consumeDeposit(changer_admin_name,-1*admin_consumed_credit,False) #re-add deposit to admin\n raise\n self.broadcastChange(user_ids)", "def charge(self, other):\n self.credit += other\n print(\"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(other,\n self.credit))", "def allowed(self, user, amount):\n return True", "def withdraw_by_username(self,amount,username):\r\n pass", "def grade(self, points):\n credit = -1\n while credit > points or credit < 0:\n try:\n credit = int(input(\"\\nScore out of \" + str(points) + \": \"))\n except:\n credit = -1\n if credit != points:\n self.feedback += \"\\n\\t\" + str(raw_input(\"Describe problem: \"))\n return credit", "def get_user_reward_points(context):\n tries_counter = 0\n succeeded = False\n balance_text_split = []\n while tries_counter < 3 and not succeeded:\n try:\n elem = context.browser.find_element(By.XPATH, \"(//div[@id='reward']/div/table/tbody/tr)[last()]\")\n balance_text_split = elem.text.split(' ')\n succeeded = True\n except Exception:\n tries_counter += 1\n\n if balance_text_split[0] != \"Balance\":\n # No Reward points history for given user -> no rows\n return 0\n # this may hide possible future errors (page layout changes)\n\n points = int(balance_text_split[1])\n print(\"Bob has\", points, \"points\")\n return points", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def userCanAffordItemObj(self, user : bbUser.bbUser, item : bbItem.bbItem) -> bool:\n return user.credits >= item.getValue()", "def __add_credit_cc(self):\n log.debug(\"Displaying __add_credit_cc\")\n # Create a keyboard to be sent later\n presets = self.cfg.ccard[\"payment_presets\"]\n keyboard = [[telegram.KeyboardButton(str(self.Price(preset)))] for preset in presets]\n keyboard.append([telegram.KeyboardButton(self.loc.get(\"menu_all_cancel\"))])\n # Boolean variable to check if the user has cancelled the action\n cancelled = False\n # Loop used to continue asking if there's an error during the input\n while not cancelled:\n # Send the message and the keyboard\n self.bot.send_message(self.chat.id, self.loc.get(\"payment_cc_amount\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait until a valid amount is sent\n selection = self.__wait_for_regex(r\"([0-9]+(?:[.,][0-9]+)?|\" + self.loc.get(\"menu_all_cancel\") + r\")\",\n cancellable=True)\n # If the user cancelled the action\n if isinstance(selection, CancelSignal):\n # Exit the loop\n cancelled = True\n continue\n # Convert the amount to an integer\n value = self.Price(selection)\n # Ensure the amount is within the range\n if value > self.Price(self.cfg.ccard[\"max_amount\"]):\n self.bot.send_message(self.chat.id,\n self.loc.get(\"error_payment_amount_over_max\",\n max_amount=self.Price(self.cfg.ccard[\"max_amount\"])))\n continue\n elif value < self.Price(self.cfg.ccard[\"min_amount\"]):\n self.bot.send_message(self.chat.id,\n self.loc.get(\"error_payment_amount_under_min\",\n min_amount=self.Price(self.cfg.ccard[\"min_amount\"])))\n continue\n break\n # If the user cancelled the action...\n else:\n # Exit the function\n return\n # Issue the payment invoice\n self.__make_payment(amount=value)", "def get_credit(self):\n res = self.client.get(\"/v1/credit\")\n\n try:\n return res.data[\"credit\"]\n except:\n raise ValueError(\"returned response not valid\")", "async def balance(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. \"\n \"Please add them first using points member add\"\n \" <Discord name or nickname>\".format(name.display_name))\n return\n else:\n gain = self.db[server.id][name.id][\"Lifetime Gain\"]\n loss = self.db[server.id][name.id][\"Lifetime Loss\"]\n balance = self.db[server.id][name.id][\"Balance\"]\n await self.bot.say(\"{} has a current balance of {} points. \"\n \"Their lifetime gain is {} and lifetime loss is {}.\"\n .format(name.display_name, balance, gain, loss))", "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )", "def credits_purchase(request):\n if request.method==\"POST\":\n \"\"\" POST request can come from card form submit or from initial\n credits amount selection page \"\"\"\n try:\n credits_amount = int(request.POST.get('credits_amount', None))\n except TypeError:\n messages.error(request, \"Amount was invalid\")\n return redirect('add_credits')\n if credits_amount or credits_amount == 0:\n if (credits_amount % 10) is not 0:\n # Credits amount wasn't a multiple of 10, so user bypassed\n # JavaScript form validation\n messages.error(\n request, \n \"Credits can only be added in multiples of 10\"\n )\n return redirect('add_credits')\n credits_cost = \\\n settings.COST_PER_TEN_CREDITS * (credits_amount / 10)\n if \"stripeToken\" in request.POST:\n # POST request came from card form submit\n try:\n customer = stripe.Charge.create(\n amount = int(credits_cost*100),\n currency = \"EUR\",\n description = request.user.email,\n source = request.POST['stripeToken'],\n )\n except stripe.error.CardError:\n messages.error(request, \"Your card was declined!\")\n return redirect('credits_purchase')\n if customer.paid:\n # All is good, so add the chosen amount of credits\n profile = request.user.profile\n profile.credits += credits_amount\n profile.save()\n return render(request, 'credits_added.html')\n else:\n messages.error(request, \"Unable to take payment\")\n return redirect('credits_purchase')\n else:\n \"\"\" POST request came from initial credits selection page\n so now render Stripe card form \"\"\"\n return render(request, \"credits_checkout.html\", {\n 'publishable': settings.STRIPE_PUBLISHABLE,\n 'ten_credit_cost': settings.COST_PER_TEN_CREDITS,\n 'credits_amount': credits_amount,\n 'total': credits_cost,\n })\n else:\n messages.error(request, \"No amounts of credits selected\")\n return redirect('add_credits')\n else:\n return redirect('add_credits')", "def charge(self, other):\n if self.flag:\n self.credit += other\n return \"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(\n other, self.credit)\n else:\n return \"Sorry, your card has expired.\"", "def set_user(self, name, balance, credit):\n if balance >= 0:\n text = u\"%s (Balance: \\xA3%.2f\" % (name, balance / 100)\n else:\n text = u\"%s (Balance: -\\xA3%.2f\" % (name, -balance / 100)\n\n if credit > 0:\n text += u\", Pending Credit: \\xA3%.2f\" % (credit / 100)\n\n text += \")\"\n\n self.objects[self.ids.TOPBAR].setText(text)", "async def credits(self, ctx: commands.Context):\r\n await ctx.send(embed=CREDITS_EMBED)", "def userBuyModuleObj(self, user : bbUser.bbUser, requestedModule : bbModule.bbModule):\n if self.userCanAffordItemObj(user, requestedModule):\n self.modulesStock.removeItem(requestedModule)\n user.credits -= requestedModule.getValue()\n user.inactiveShips.addItem(requestedModule)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy module \" + requestedModule.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedModule.getValue()))", "def CashMode(self):\n self.cred_left = 0\n self.is_member = False\n self.cred_id = ''\n self.cred_card = ''\n self.builder.get_object('GuiMode').set_label(\"Payment in Cash\")", "def __repr__(self):\n return \"<Credit({0} {1})>\".format(self.chore_id, self.owner_id)" ]
[ "0.67803556", "0.6295979", "0.6186931", "0.613697", "0.6093839", "0.60716486", "0.60704947", "0.6043647", "0.6004744", "0.59940994", "0.5894286", "0.578384", "0.57367456", "0.57338923", "0.57108366", "0.56967866", "0.5689059", "0.5688745", "0.5649226", "0.5633477", "0.56283885", "0.56060493", "0.55997866", "0.5573243", "0.5567039", "0.5566997", "0.5521552", "0.5512972", "0.55098146", "0.54939854" ]
0.6380664
1
Debit points from user. Need to verify sufficient points.
def debit_points(self, user, signature, amount=0.0): if self.hosting_node == None: return False transfer = Transfer(user, signature, amount) if Verification.verify_single_transfer(transfer, self.get_balance): self.__open_transfers.append(transfer) # participants.add(user) self.save_data() return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def debit(ctx, *args):\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit", "async def credit(ctx, *args):\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit", "def deposit(account, amount):\n pass", "def debitMoney(user_id, expense_group_id, cursor):\n query = \"\"\"\n SELECT a.user_id, SUM(a.amount) as amount\n FROM accured_expenses AS a, expense AS e\n WHERE a.expense_id = e.id AND e.user_id = ? AND e.expense_group_id = ? AND a.paid = 0\n GROUP BY a.user_id \n \"\"\"\n cursor.execute(query, (user_id, expense_group_id))\n return cursor.fetchall()", "def raise_bet(self, points):\n return self.call(points=points)\n # self.points -= points\n # self.bet += points\n # return points", "def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()", "def testDebitTender(self):\n self.setupTransaction()\n if not checkout.pay_card(card_name='Debit'):\n tc_fail(\"Failed to pay with debit tender\")\n # This is an attempt to prevent PHYK-85 from happening\n self.setupTransaction()\n if not checkout.pay_card(card_name='Debit'):\n tc_fail(\"Failed to pay with debit tender\")\n self.handleMessages()", "def points(self, event, user):\n if not self.checkPerms(event, \"mod\"):\n return\n event.msg.delete()\n if not str(user) in self.participants.keys():\n message = \"This user has not participated in the event yet.\"\n else:\n message = \"Points so far for {}: {}\\n\"\n\n points = 0\n for rid, report in self.reported_cards.items():\n if str(user) == report[\"author_id\"] and report[\"status\"] != \"Denied\":\n points += self.config.boards[report[\"board\"]][\"points\"]\n event.msg.reply(message.format(self.participants[str(user)], points))", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def lose_point(self, points):\n self.points -= points\n print(f\"Oh no! You have lost {points} point(s)! That means you now have {self.points} points!\")", "def debit(self):\n debit = 0 #variable to track the remaining debit\n debit = self.total_purchase() - self.total_clearance()\n return debit", "def withdraw(account, amount):\n pass", "async def balance(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. \"\n \"Please add them first using points member add\"\n \" <Discord name or nickname>\".format(name.display_name))\n return\n else:\n gain = self.db[server.id][name.id][\"Lifetime Gain\"]\n loss = self.db[server.id][name.id][\"Lifetime Loss\"]\n balance = self.db[server.id][name.id][\"Balance\"]\n await self.bot.say(\"{} has a current balance of {} points. \"\n \"Their lifetime gain is {} and lifetime loss is {}.\"\n .format(name.display_name, balance, gain, loss))", "def call(self, points, subpot=None):\n points = int(points)\n if points >= self.points:\n points = self.points\n self.all_in = True\n self.active = False\n self.points -= points\n self.bet += points\n return points", "def get_user_reward_points(context):\n tries_counter = 0\n succeeded = False\n balance_text_split = []\n while tries_counter < 3 and not succeeded:\n try:\n elem = context.browser.find_element(By.XPATH, \"(//div[@id='reward']/div/table/tbody/tr)[last()]\")\n balance_text_split = elem.text.split(' ')\n succeeded = True\n except Exception:\n tries_counter += 1\n\n if balance_text_split[0] != \"Balance\":\n # No Reward points history for given user -> no rows\n return 0\n # this may hide possible future errors (page layout changes)\n\n points = int(balance_text_split[1])\n print(\"Bob has\", points, \"points\")\n return points", "def withdraw_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.withdraw_money(credentials)\n start_again() if result else BankOperationsUi.withdraw_money()", "def execute_deposits(self):\n deposits = [v for v in self.action_register if v['action'] == 'deposit']\n for deposit in deposits:\n self.model.schedule.agents_by_type['Customer'][deposit['unique_id']].euro_wallet += deposit['value']", "def deposit(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(amount)\n else:\n print('deposit error')", "def free_bet(inputs):\n print 'Free bet:'\n free_value = inputs['bet_value']\n free_odds = inputs['bet_odds']\n lay_odds = inputs['lay_odds']\n commission_per_cent = inputs['commission_per_cent']\n # With inputs in place calculate bet\n commission = 0.01 * commission_per_cent\n lay_stake = free_value * (free_odds - 1) / (lay_odds - commission)\n # Calculate profit in both cases (free back wins and free back loses)\n profit_free_wins = free_value * (free_odds - 1) \\\n - lay_stake * (lay_odds - 1)\n profit_lay_wins = lay_stake * (1 - commission)\n # Also calculate free SNR value\n free_SNR = profit_free_wins / free_value\n free_SNR_per_cent = free_SNR * 100\n # Print calculated outputs\n print 'Profit if *back* wins: GBP', round(profit_free_wins,2)\n print 'Profit if *lay* wins: GBP', round(profit_lay_wins,2) \n print 'Free value %: ', round(free_SNR_per_cent,1), '%'\n print 'Lay required: GBP', round(lay_stake,2)", "def deposit(self, amount):\n message = self.account.deposit(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def deposit():\n\n if request.method == \"POST\":\n if not request.form.get(\"deposit\"):\n return apology(\"Must enter amount to deposit\")\n\n deposit = request.form.get(\"deposit\")\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n cash = entry[0]['cash'] + float(deposit)\n\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n return render_template(\"deposit.html\")", "def draw_money(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to withdraw:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n if MY_MEMBER[i].balance >= amount:\n MY_MEMBER[i].balance -= amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Withdrawing Cash*****\"\n print\"your New Bank balance: %r\" % new_balance\n print\"Amount Withdraw: %r\" % amount\n print\"*************************\"\n\n else:\n print\"your Account Balance is low!! \"\n print\"Transaction Failed...\"\n what_to_do(name, bank_id, password)\n return\n what_to_do(name, bank_id, password)", "def grade(self, points):\n credit = -1\n while credit > points or credit < 0:\n try:\n credit = int(input(\"\\nScore out of \" + str(points) + \": \"))\n except:\n credit = -1\n if credit != points:\n self.feedback += \"\\n\\t\" + str(raw_input(\"Describe problem: \"))\n return credit", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def landlord_button_deposite_pay(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'supplier',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'outbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def confirm_deposit(self, before, after, params):\n digg = self.manager.badger.digg.token\n\n sharesTransferred = after.get(\"sett.shares\") - before.get(\"sett.shares\")\n sharesTransferredScaled = digg.sharesToScaledShares(sharesTransferred)\n\n totalSupply = before.get(\n \"sett.totalSupply\"\n ) # bDIGG is already at 18 decimal scale\n if totalSupply == 0:\n expected_shares = sharesTransferredScaled\n else:\n poolBefore = before.get(\"sett.shares\")\n poolBeforeScaled = digg.sharesToScaledShares(poolBefore)\n expected_shares = (sharesTransferredScaled * totalSupply) / poolBeforeScaled\n\n params[\"expected_shares\"] = expected_shares\n\n # We need to pass in expected_shares to the core resolver so we call the\n # super method down here.\n super().confirm_deposit(before, after, params)", "def deposits_limit(self):\n limits = self.user.limits\n value = 0\n if limits.exists():\n value = self.user.limits.get(type=Limit.DEPOSIT).value\n return value" ]
[ "0.7162027", "0.6481338", "0.62580574", "0.6227215", "0.62052536", "0.62043357", "0.61842215", "0.6171607", "0.60994", "0.6087336", "0.5968105", "0.59115344", "0.58873826", "0.58712614", "0.5836311", "0.57992375", "0.5780527", "0.5765384", "0.57358664", "0.56935155", "0.568388", "0.5680038", "0.5670659", "0.56502575", "0.5639156", "0.5625422", "0.561658", "0.56072843", "0.5605228", "0.55911803" ]
0.6702249
1
Return a list of all connected peer nodes.
def get_peer_nodes(self): return list(self.__peer_nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peer_list_all(self):\n return self.client.call('GET', self.name + 'peer-list/all')", "def peer_list_reachable(self):\n return self.client.call('GET', self.name + 'peer-list/reachable')", "def list_nodes(self):\n return self.ironic_client.node.list()", "def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes", "def getNodes(self):\n return self.__allNodes", "def get_node_list(self):\n return [[node] for node in self.graph.nodes]", "def get_nodes(self):\n\n return list(self.graph.nodes)", "def get_all_nodes(self):\n return self._get_all_nodes()", "def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())", "def list_nodes(self):\n return self.datanodes.keys()", "def list_connections(self):\n return self.network.list_connections()", "def nodes(self):\n return self.graph.nodes", "def all_nodes(self):\n nodes = []\n for layer in self.layers:\n nodes += layer.nodes\n return nodes", "def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def get_all_connected_nodes(self, where_to=OUTGOING):\n\n list_of_all_nodes = []\n\n if not self._directed or where_to == Vertex.OUTGOING:\n for edge in self._outgoing:\n list_of_all_nodes.append(edge.return_other_side(self))\n elif where_to == Vertex.INCOMING:\n for edge in self._incoming:\n list_of_all_nodes.append(edge.return_other_side(self))\n\n return list_of_all_nodes", "def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers", "def peers(self):\n\n peers_data = ''\n for peer in getattr(self._peer, 'peers', []):\n peers_data += peer.config().remote_config\n return peers_data", "def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()", "def nodes(self):\n return list(self.__graph.keys())", "def get_nodes(self):\n try:\n return list(self._adjacency_list.keys())\n except Exception as error:\n print(f'An error occurred: {error}')", "def get_peers_in_established(self):\n est_peers = []\n for peer in self._peers.values():\n if peer.in_established:\n est_peers.append(peer)\n return est_peers", "def connected_components(self) -> List[list]:\n self.reset_tags()\n ans = []\n visited = dict() # A dictionary of visited nodes\n\n for key in self._graph.get_all_v():\n if not visited.get(key):\n path = self.connected_component(key)\n for node in path:\n visited.__setitem__(node.key, True)\n ans.append(path)\n return ans", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def connected_components(self) -> List[list]:\n self.__set_all_nodes_unvisited()\n res = self.__tarjan()\n # res.reverse()\n return res", "def peers():\n return flask.jsonify(api_utils.get_peer_conf_and_state())", "def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))", "def get_all_resources(self) -> typing.List:\n\n session = self.session()\n\n try:\n available_peers = session\\\n .query(\n ResourceTable.peerIp,\n ResourceTable.peerPort,\n ResourceTable.resourcePath,\n ResourceTable.resourceName,\n ResourceTable.resourceHash\n )\\\n .group_by(ResourceTable.peerId, ResourceTable.resourceHash)\\\n .all()\n\n return available_peers\n\n finally:\n session.close()", "def get_nodes():\n with session_for_read() as session:\n res = session.query(\n model.Node\n ).order_by(\n model.Node.started_at.desc()\n )\n return [model.Node(uuid=entry.uuid, version_id=entry.version_id,\n state=entry.state, started_at=entry.started_at,\n finished_at=entry.finished_at, error=entry.error,\n manage_boot=entry.manage_boot)\n for entry in res.all()]", "def get_nodes(self):\n\n return self._nodes" ]
[ "0.7392017", "0.7002911", "0.69784385", "0.69227666", "0.6882579", "0.6802107", "0.6790903", "0.67703485", "0.6725873", "0.6651802", "0.66069186", "0.6584915", "0.6584334", "0.6553957", "0.6539939", "0.6539486", "0.6532221", "0.65297973", "0.6525269", "0.6498614", "0.6494739", "0.64845675", "0.64745325", "0.6450641", "0.6446141", "0.64385927", "0.6436919", "0.6435536", "0.6430818", "0.6429988" ]
0.849466
1
PWM signal generator with direction signal for DC motors. The generated PWM frequency is approximately 25 KHz (25 MHz / 1024). The duty cycle can be fully controlled via a 11bit speed input. pwm Output PWM signal dir Output direction signal en_n Active low output enable signal clk25 25 MHz clock input speed 11bit signed speed value in clock ticks rst_n Active low reset input (resets internal counter when active). Use the speed input to reset the speed of the motor. optocoupled Set to True if outputs should be inverted to account for optocouplers.
def MotorDriver(pwm, dir, en_n, clk25, speed, rst_n, optocoupled): assert speed.min >= -2**10 and speed.max <= 2**10, 'wrong speed constraints' # account for optocouplers LOW_OPTO = LOW if not optocoupled else HIGH HIGH_OPTO = HIGH if not optocoupled else LOW CNT_MAX = 2**10 - 1; @instance def DriveMotor(): """ Generate PWM, dir and brake signals for motor """ # cnt overflows at 25KHz (approximately) cnt = intbv(0, min = 0, max = CNT_MAX + 1) # 10-bit duty cycle duty_cycle = intbv(0)[10:] while True: yield clk25.posedge, rst_n.negedge if rst_n == LOW: cnt[:] = 0 duty_cycle[:] = 0 dir.next = HIGH_OPTO pwm.next = LOW_OPTO en_n.next = LOW_OPTO else: # accept new consign at the beginning of a period if cnt == 0: # extract duty cycle and direction if speed >= 0: duty_cycle[:] = speed dir.next = HIGH_OPTO elif -speed >= CNT_MAX: # handle -1024 case duty_cycle[:] = CNT_MAX dir.next = LOW_OPTO else: duty_cycle[:] = -speed dir.next = LOW_OPTO # reached consign? if cnt >= duty_cycle: pwm.next = LOW_OPTO else: pwm.next = HIGH_OPTO if cnt == CNT_MAX: cnt[:] = 0 else: cnt += 1 en_n.next = LOW_OPTO return instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def duty_cycle(self):\n pwm = self._pca.pwm_regs[self._index]\n if pwm[0] == 0x1000:\n return 0xffff\n return pwm[1] << 4", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def pwm_loop(self, off_sleep = 0.1, Hz = 500, on_sleep = 0.01, signal_t = 300, dc = 50):\n if hasattr(self.p, 'start') == False:\n self.p = GPIO.PWM(self.LED, Hz)\n else:\n self.p.ChangeFrequency(Hz)\n self.p.start(0)\n t = timeit.default_timer()\n nt = t\n self.cycle_t = on_sleep + off_sleep\n while nt - t < signal_t:\n time.sleep(off_sleep)\n self.p.ChangeDutyCycle(dc)\n time.sleep(on_sleep)\n self.p.ChangeDutyCycle(0)\n nt = timeit.default_timer()\n\n self.p.stop()", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def goal_pwm(self, value):\n self._write(MX_GOAL_PWM, value)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def setWheelsSpeed(self, dc_motor_speed):\n self.dcmotorSpeed = dc_motor_speed # changed rightSpeed to dcmotorSpeed and right to\n self.updatePWM()", "def goal_pwm(self):\n return self._read(MX_GOAL_PWM)", "def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def set_pwm(self, duty_cycle):\n PWM.set_duty_cycle(self.pwm_pin, duty_cycle)", "def pwm(self):\n return self._pwm", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self, speed=300):\n\t\tself.direction = self.find_direction()\n\t\tio.set_bit(OUTPUT.MOTORDIR, self.direction)\n\t\tio.write_analog(OUTPUT.MOTOR, 2048+4*abs(config.SPEED))\n\t\tself.moving = True", "def set_pwm(self, channel, on, off):\n self.i2cBus.write_byte_data(self.address, LED0_ON_L + 4 * channel, on & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_ON_H + 4 * channel, on >> 8)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_L + 4 * channel, int(off) & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_H + 4 * channel, int(off) >> 8)", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def initPWM(tmr_channel, divisorCode, firstPart, secondPart, invert):\n divisorCode += 8\n makeTMRpin(tmr_channel)\n writeTMR(tmr_channel, TMR_CTRL, 0x0000)\n writeTMR(tmr_channel, TMR_LOAD, 0x0000)\n writeTMR(tmr_channel, TMR_CNTR, 0x0000)\n if invert:\n sctrl = 0x0007\n else:\n sctrl = 0x0005\n writeTMR(tmr_channel, TMR_SCTRL, sctrl)\n writeTMR(tmr_channel, TMR_CSCTRL, 0x0009)\n writeTMR(tmr_channel, TMR_COMP1, firstPart)\n writeTMR(tmr_channel, TMR_CMPLD1, firstPart)\n writeTMR(tmr_channel, TMR_COMP2, secondPart)\n writeTMR(tmr_channel, TMR_CMPLD2, secondPart)\n ctrl = 0x2024 | (divisorCode << 9)\n writeTMR(tmr_channel, TMR_CTRL, ctrl)", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def pwm_freq(self):\r\n return self._pwm_freq", "def pdm(times, signal,f0=None,fn=None,df=None,Nbin=5,Ncover=2,\n D=0,forbit=None,asini=None,e=None,omega=None,nmax=10):\n T = times.ptp()\n n = len(times)\n \n #-- initialize variables\n xvar = signal.std()**2.\n xx = (n-1) * xvar\n nf = int((fn-f0) / df + 0.001) + 1\n f1 = np.zeros(nf,'d')\n s1 = np.zeros(nf,'d')\n \n #-- use Fortran subroutine\n #-- Normal PDM\n if D is None and asini is None:\n f1, s1 = pyscargle.justel(signal,times,f0,df,Nbin,Ncover,xvar,xx,f1,s1,n,nf)\n #-- PDM with linear frequency shift\n elif asini is None:\n f1, s1 = pyscargle.justel2(signal,times,f0,df,Nbin,Ncover,xvar,xx,D,f1,s1,n,nf)\n #-- PDM with circular binary orbit\n elif asini is not None and (e is None or e==0):\n f1, s1 = pyscargle.justel3(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,f1,s1,n,nf)\n #-- PDM with eccentric binary orbit\n elif e>0:\n forbit = 2*pi*forbit\n ans,bns = np.array([[__ane__(n,e),__bne__(n,e)] for n in range(1,nmax+1)]).T\n ksins = np.sqrt(ans**2*np.cos(omega)**2+bns**2*np.sin(omega)**2)\n thns = np.arctan(bns/ans*np.tan(omega))\n tau = -np.sum(bns*np.sin(omega))\n f1, s1 = pyscargle.justel4(signal,times,f0,df,Nbin,Ncover,xvar,xx,asini,\n forbit,e,omega,ksins,thns,tau,f1,s1,n,nf,nmax)\n \n \n #-- it is possible that the first computed value is a none-variable\n if not s1[0]: s1[0] = 1. \n \n return f1, s1", "def clockwise_rotate(self, speed):\n\t\tif self._last_dir != 'c': # \"c\" indicates that the last rotation of this wheel was clockwise.\n\t\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\t\tGPIO.output(self._dir_pin_2, GPIO.LOW)\n\t\t\tself._last_dir = 'c'\n\n\t\tself._current_dc_val = speed\n\t\tif self._current_dc_val != self._last_dc_val:\n\t\t\tself._motor_pwm.ChangeDutyCycle(speed) # 0.0 - 100.0\n\t\t\tself._last_dc_val = self._current_dc_val" ]
[ "0.72991186", "0.59104633", "0.59067416", "0.5808709", "0.57710034", "0.55783296", "0.5572987", "0.556341", "0.55390674", "0.5538115", "0.55178213", "0.54046714", "0.53235024", "0.52748865", "0.5251518", "0.5233242", "0.52320105", "0.5143564", "0.5135276", "0.51250744", "0.5122773", "0.5069058", "0.50021666", "0.4979485", "0.49595463", "0.49563202", "0.49500108", "0.49456105", "0.49376827", "0.49207234" ]
0.67168146
1
Generate PWM, dir and brake signals for motor
def DriveMotor(): # cnt overflows at 25KHz (approximately) cnt = intbv(0, min = 0, max = CNT_MAX + 1) # 10-bit duty cycle duty_cycle = intbv(0)[10:] while True: yield clk25.posedge, rst_n.negedge if rst_n == LOW: cnt[:] = 0 duty_cycle[:] = 0 dir.next = HIGH_OPTO pwm.next = LOW_OPTO en_n.next = LOW_OPTO else: # accept new consign at the beginning of a period if cnt == 0: # extract duty cycle and direction if speed >= 0: duty_cycle[:] = speed dir.next = HIGH_OPTO elif -speed >= CNT_MAX: # handle -1024 case duty_cycle[:] = CNT_MAX dir.next = LOW_OPTO else: duty_cycle[:] = -speed dir.next = LOW_OPTO # reached consign? if cnt >= duty_cycle: pwm.next = LOW_OPTO else: pwm.next = HIGH_OPTO if cnt == CNT_MAX: cnt[:] = 0 else: cnt += 1 en_n.next = LOW_OPTO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def MotorDriver(pwm, dir, en_n, clk25, speed, rst_n, optocoupled):\n\n assert speed.min >= -2**10 and speed.max <= 2**10, 'wrong speed constraints'\n\n # account for optocouplers\n LOW_OPTO = LOW if not optocoupled else HIGH\n HIGH_OPTO = HIGH if not optocoupled else LOW\n\n CNT_MAX = 2**10 - 1;\n\n @instance\n def DriveMotor():\n \"\"\" Generate PWM, dir and brake signals for motor \"\"\"\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO\n\n return instances()", "def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def motor_rotate_deg(power,deg,port,sampling_time=.01,delay_when_stopping=.05): \n debug = False\n num_motor=len(power) #Number of motors being used\n #print num_motor\n init_val=[0]*num_motor\n curr_val=[0]*num_motor\n final_val=[0]*num_motor\n last_encod=[0]*num_motor\n \n delta=0\n gain=0.005\n idelta=0.0\n alpha=10\n smulti=0\n BrickPiUpdateValues()\n for i in range(num_motor):\n BrickPi.MotorEnable[port[i]] = 1 #Enable the Motors\n power[i]=abs(power[i])\n \n init_val[i]=BrickPi.Encoder[port[i]] #Initial reading of the encoder \n \n final_val[i]=init_val[i]+(deg[i]*2) #Final value when the motor has to be stopped;One encoder value counts for 0.5 degrees\n \n #For running clockwise and anticlockwise\n if deg[i]>0:\n BrickPi.MotorSpeed[port[i]] = power[i]\n elif deg[i]<0:\n BrickPi.MotorSpeed[port[i]] = -power[i]\n else:\n BrickPi.MotorSpeed[port[i]] = 0\n \n \n run_stat=[0]*num_motor\n\n time_start = time.time()\n time_end = time.time()\n time_total = time_end - time_start\n \n while True:\n time_end = time.time()\n time_total = time_end - time_start\n if time_total >= ROTATE_DEG_TIMEOUT:\n break\n \n result = BrickPiUpdateValues() #Ask BrickPi to update values for sensors/motors\n time.sleep(sampling_time) #sleep for the sampling time given (default:10 ms)\n i = 0\n #if debug:\n #print \"Result of Update Values: \" + `result`\n if not result :\n for i in range(num_motor): #Do for each of the motors\n #The FIRST thing we should do is check our encoders!\n curr_val[i]=BrickPi.Encoder[port[i]]\n if debug :\n print \"Motor \" + `i` + \" encoder: \" + `curr_val[i]`\n \n if run_stat[i]==1:\n continue\n # Check if final value reached for each of the motors\n if(deg[i]>0 and final_val[i]<=curr_val[i]) or (deg[i]<0 and final_val[i]>=curr_val[i]) :\n #This motor has reached its goal\n run_stat[i]=1\n \n #Now let's hit the breaks by going in reverse for a VERY quick amount of time.\n if deg[i]>0:\n BrickPi.MotorSpeed[port[i]] = -power[i]\n elif deg[i]<0:\n BrickPi.MotorSpeed[port[i]] = power[i]\n else:\n BrickPi.MotorSpeed[port[i]] = 0 \n BrickPiUpdateValues()\n time.sleep(delay_when_stopping)\n #Now let's turn the motor off all together\n BrickPi.MotorEnable[port[i]] = 0\n BrickPiUpdateValues()\n \n if(all(e==1 for e in run_stat)): #If all the motors have already completed their rotation, then stop\n break\n \n #Let's use Proportional Integral Control on the Motors to keep them in Sync\n if i == 1 :\n if curr_val[0] <> 0 and curr_val[1] <>0 : \n if last_encod[0]<>0 and last_encod[1] <>1 :\n if abs(last_encod[0] - init_val[0]) < abs(last_encod[1] - init_val[1]) :\n #Motor 1 is going faster\n delta = abs(curr_val[1]-last_encod[1]) - abs(curr_val[0]-last_encod[0])\n idelta = (abs(curr_val[1]-init_val[1]) - abs(curr_val[0]-init_val[0]))/alpha\n if debug:\n print \"Motor 1 is faster by \" + `delta`\n print \"last_encod = \" + `last_encod[0]` + \" , \" + `last_encod[1]`\n print \"idelta = \" + `idelta`\n print \"Current Encode = \" + `curr_val[0]` + \" , \" + `curr_val[1]`\n\n if int(abs(BrickPi.MotorSpeed[port[0]])) == 255 :\n #Motor 0 CANNOT be sped up\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to slow down Motor 1\n if int(abs(BrickPi.MotorSpeed[port[1]]-smulti)) <= 255 : \n #Target speed is inside the bounds of Motor speed\n BrickPi.MotorSpeed[port[1]] = int (BrickPi.MotorSpeed[port[1]]-smulti)\n elif int (BrickPi.MotorSpeed[port[1]]-smulti) < 0 :\n #Target speed is outside the bounds of -255 to 255\n BrickPi.MotorSpeed[port[1]] = -255\n else :\n BrickPi.MotorSpeed[port[1]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 1 speed : \" + `BrickPi.MotorSpeed[port[1]]`\n print \"Speed Multiplier : \" + `smulti`\n\n else :\n #Motor 0 CAN be sped up\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to speed up Motor 0\n if int(abs(BrickPi.MotorSpeed[port[0]]+smulti)) <= 255 : \n #Target speed is inside the bounds of Motor speed\n BrickPi.MotorSpeed[port[0]] = int (BrickPi.MotorSpeed[port[0]]+smulti)\n elif int (BrickPi.MotorSpeed[port[0]]+smulti) < 0 :\n #Target speed is outside the bounds of -255 to 255\n BrickPi.MotorSpeed[port[0]] = -255 \n else :\n BrickPi.MotorSpeed[port[0]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 0 speed : \" + `BrickPi.MotorSpeed[port[0]]`\n print \"Speed Multiplier : \" + `smulti`\n\n\n elif (last_encod[0] - curr_val[0]) > abs(last_encod[1] - curr_val[1]) :\n #Motor 0 is going faster\n delta= abs(curr_val[0]-last_encod[0])- abs(curr_val[1]-last_encod[1]) \n idelta = (abs(curr_val[0]-init_val[0]) - abs(curr_val[1]-init_val[1]))/alpha\n if debug :\n print \"Motor 0 is faster by \" + `delta`\n print \"last_encod = \" + `last_encod[0]` + \" , \" + `last_encod[1]`\n print \"idelta = \" + `idelta`\n print \"Current Encode = \" + `curr_val[0]` + \" , \" + `curr_val[1]`\n\n if abs(BrickPi.MotorSpeed[port[1]]) == 255 :\n #Motor 1 CANNOT be sped up, SLOW DOWN Motor 0\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to slow down Motor 0\n if int(abs(BrickPi.MotorSpeed[port[0]]-smulti)) <= 255 :\n #Target speed is inside the bounds of Motor\n BrickPi.MotorSpeed[port[0]] = int (BrickPi.MotorSpeed[port[0]]-smulti)\n elif int (BrickPi.MotorSpeed[port[0]]-smulti) < 0 :\n #Target speed is outside the -255 to 255 bounds\n BrickPi.MotorSpeed[port[0]] = -255\n else : \n BrickPi.MotorSpeed[port[0]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 0 speed : \" + `BrickPi.MotorSpeed[port[0]]`\n print \"Speed Multiplier : \" + `smulti`\n\n else :\n #Motor 1 CAN be sped up SPEED UP Motor 1\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to speed up Motor 1\n if int(abs (BrickPi.MotorSpeed[port[1]]+smulti)) <= 255 :\n #Target speed is inside the bounds of Motor\n BrickPi.MotorSpeed[port[1]] = int (BrickPi.MotorSpeed[port[1]]+smulti)\n elif int (BrickPi.MotorSpeed[port[1]]+smulti) < 0 :\n #Target speed is outside the -255 to 255 bounds\n BrickPi.MotorSpeed[port[1]] = -255\n else :\n BrickPi.MotorSpeed[port[1]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 1 speed : \" + `BrickPi.MotorSpeed[port[1]]`\n print \"Speed Multiplier : \" + `smulti`\n \n last_encod[0] = curr_val[0]\n last_encod[1] = curr_val[1]\n BrickPi.MotorEnable[MOTOR1] = 1\n BrickPi.MotorEnable[MOTOR2] = 1\n return 0", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def goal_pwm(self, value):\n self._write(MX_GOAL_PWM, value)", "def at_pwm(seq, m1, m2, m3, m4):\n # FIXME: what type do mx have?\n raise NotImplementedError()", "def pwm(self):\n\n ret = []\n for _ in range(self._count_dist_fn()):\n ret.append(self._pwm.pwm())\n return numpy.hstack(ret)", "def main():\n\n # Center positions when joystick is at rest\n center_x_pos = 530\n center_y_pos = 504\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([red_led, green_led, blue_led], GPIO.OUT, initial=GPIO.LOW)\n\n pwm_r = GPIO.PWM(red_led, 300)\n pwm_g = GPIO.PWM(green_led, 300)\n pwm_b = GPIO.PWM(blue_led, 300)\n\n pwm_instances = [pwm_r, pwm_g, pwm_b]\n\n for p in pwm_instances:\n p.start(0)\n\n try:\n while True:\n # If joystick switch is pressed down, turn off LEDs\n switch = read_spi_data_channel(mcp3008_switch_channel)\n if switch == 0:\n for p in pwm_instances:\n p.ChangeDutyCycle(0)\n continue\n\n # Read the joystick position data\n x_pos = read_spi_data_channel(mcp3008_x_voltage_channel)\n y_pos = read_spi_data_channel(mcp3008_y_voltage_channel)\n\n # If joystick is at rest in center, turn on all LEDs at max\n if is_joystick_near_center(x_pos, y_pos, center_x_pos, center_y_pos):\n for p in pwm_instances:\n p.ChangeDutyCycle(100)\n continue\n\n # Adjust duty cycle of LEDs based on joystick position\n angle = convert_coordinates_to_angle(x_pos, y_pos, center_x_pos, center_y_pos)\n pwm_r.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'R'))\n pwm_g.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'G'))\n pwm_b.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'B'))\n\n # print(\"Position : ({},{}) -- Angle : {}\".format(x_pos, y_pos, round(angle, 2)))\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for p in pwm_instances:\n p.stop()\n spi.close()\n GPIO.cleanup()", "def goal_pwm(self):\n return self._read(MX_GOAL_PWM)", "def __init__(self, channel):\n self.servo = wpilib.PWM(channel)\n self.close_value = 0\n #self.setBounds(1.0, 1.48, 1.5, 1.52, 2.0)\n self.setBounds(2.0, 1.65, 1.5, 1.35, 1.0)", "def pwm(self):\n pwm_tot = []\n i = 0\n for x in self._motifs:\n pwm_tot.append(x.pwm())\n i += 1\n\n return numpy.hstack(pwm_tot)", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def pwm(self):\n\n pwm_tot = []\n for x in self._motifs:\n pwm_tot.append(x.pwm())\n\n return numpy.hstack(pwm_tot)", "def set_pwm(self, channel, on, off):\n self.i2cBus.write_byte_data(self.address, LED0_ON_L + 4 * channel, on & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_ON_H + 4 * channel, on >> 8)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_L + 4 * channel, int(off) & 0xFF)\n self.i2cBus.write_byte_data(self.address, LED0_OFF_H + 4 * channel, int(off) >> 8)", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def generate(self): \r\n \r\n self.pfn={} # phase joint functions \r\n self.afn={} # anti phase joint functions\r\n\r\n ## Foot and hip -> Lateral motion\r\n foot_func=SinusoidFunction()\r\n foot_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n foot_func.amplitude= self.parameters[\"foot_amplitude\"]\r\n foot_func.amplitude_offset= self.parameters[\"foot_amplitude_offset\"]\r\n foot_func.phase_offset= self.parameters[\"foot_phase_offset\"]\r\n self.pfn[\"l_foot_joint\"]=foot_func \r\n foot_func_af=foot_func.mirror()\r\n self.afn[\"l_foot_joint\"]=foot_func_af\r\n \r\n hip_func=SinusoidFunction()\r\n hip_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n hip_func.amplitude= self.parameters[\"hip_amplitude\"]\r\n hip_func.amplitude_offset= self.parameters[\"hip_amplitude_offset\"]\r\n hip_func.phase_offset= self.parameters[\"hip_phase_offset\"]\r\n self.pfn[\"l_hip_joint\"]=hip_func\r\n hip_func_af=hip_func.mirror()\r\n self.afn[\"l_hip_joint\"]=hip_func_af\r\n \r\n ## Thigh, ankle and knee -> Frontal motion\r\n thigh_func=SinusoidFunction()\r\n thigh_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n thigh_func.amplitude= self.parameters[\"thigh_amplitude\"]\r\n thigh_func.amplitude_offset= self.parameters[\"thigh_amplitude_offset\"]\r\n thigh_func.phase_offset= self.parameters[\"thigh_phase_offset\"]\r\n self.pfn[\"l_thigh_joint\"]=thigh_func\r\n thigh_func_af=thigh_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_thigh_joint\"]=thigh_func_af\r\n \r\n ankle_func=SinusoidFunction()\r\n ankle_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n ankle_func.amplitude= self.parameters[\"ankle_amplitude\"]\r\n ankle_func.amplitude_offset= self.parameters[\"ankle_amplitude_offset\"]\r\n ankle_func.phase_offset= self.parameters[\"ankle_phase_offset\"]\r\n self.pfn[\"l_ankle_joint\"]=ankle_func\r\n ankle_func_af=ankle_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_ankle_joint\"]=ankle_func_af\r\n \r\n knee_func=SinusoidFunction()\r\n knee_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n knee_func.amplitude= self.parameters[\"knee_amplitude\"]\r\n knee_func.amplitude_offset= self.parameters[\"knee_amplitude_offset\"]\r\n knee_func.phase_offset= self.parameters[\"knee_phase_offset\"]\r\n self.pfn[\"l_knee_joint\"]=knee_func\r\n knee_func_af=knee_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_knee_joint\"]=knee_func_af\r\n \r\n #f3=SinusoidFunction()\r\n #f3.angular_frequency=self.parameters[\"step_frequency\"]\r\n #f3.amplitude=self.parameters[\"step_amplitude\"]\r\n #f3.amplitude_offset=self.parameters[\"step_amplitude_offset\"]\r\n #self.pfn[\"l_thigh_joint\"]= f3\r\n #f33=f3.clone()\r\n #f33.amplitude_offset = self.parameters[\"ankle_amplitude_offset\"]\r\n #f33.amplitude = self.parameters[\"ankle_amplitude\"]\r\n #self.pfn[\"l_ankle_joint\"]=f33\r\n #f4=f3.mirror()\r\n ##f4.amplitude_offset -= 0.4\r\n #self.pfn[\"l_knee_joint\"]=f4\r\n \r\n #f5=f3.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_thigh_joint\"]=f5\r\n \r\n #f6=f33.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_ankle_joint\"]=f6\r\n #f7=f5.mirror()\r\n ##f7.amplitude_offset -= 0.4\r\n #self.afn[\"l_knee_joint\"]=f7\r\n \r\n self.generate_right()\r\n \r\n self.show()", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def cmd_motors(self, motor1, motor2, motor3, motor4):\n pass", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def simulation(self, pvmod=True):\r\n \r\n self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc(\r\n self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in,\r\n self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat)\r\n\r\n self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)\r\n\r\n # Define missing parameters\r\n self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway\r\n self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway\r\n\r\n self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system\r\n self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway\r\n self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway\r\n\r\n print()" ]
[ "0.68529385", "0.668605", "0.66161764", "0.65924245", "0.6577437", "0.6463951", "0.64577734", "0.6276354", "0.6200667", "0.6139953", "0.6101437", "0.60730547", "0.60481185", "0.5986419", "0.5972765", "0.59718496", "0.5970813", "0.5956315", "0.59275526", "0.5881428", "0.5873132", "0.58503", "0.5847372", "0.5783673", "0.57640225", "0.5736539", "0.57362324", "0.5724102", "0.5701776", "0.56949276" ]
0.71569127
0
List the train files of the `fold`
def train_files(self, fold: int) -> List[str]: all_files = [] for fold_id, inputs in enumerate(self.folds): if fold_id != fold: all_files += inputs return all_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_files(self, fold: int) -> List[str]:\n for fold_id, inputs in enumerate(self.folds):\n if fold_id == fold:\n return inputs\n\n return []", "def get_filenames(is_training, data_dir):\n\n return [ os.path.join(data_dir, 'train_'+str(shard_id)+'.tfrecord') for shard_id in range(_NUM_SHARDS)]", "def list_all(train_dir):\r\n path = train_dir\r\n result = []\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n result.append(fn)\r\n return result", "def get_train_files(self):\n raise NotImplementedError", "def get_filenames(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'LIP_train5.record')]\n else:\n return [os.path.join(data_dir, 'LIP_val5.record')]", "def get_train_files(self):\n train_dir = os.path.join(self.data_dir, \"train_{}\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][0]), int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def get_filenames(is_training, data_dir):\n if is_training:\n return [\n os.path.join(data_dir, 'train-%05d-of-01024' % i)\n for i in range(_NUM_TRAIN_FILES)]\n else:\n return [\n os.path.join(data_dir, 'validation-%05d-of-00128' % i)\n for i in range(_NUM_VAL_FILES)]", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def train(self, testing = None):\n IFileList = []\n negWords = []\n posWords = []\n\n if not testing:\n for fFileObj in os.walk(\".\"):\n IFileList = fFileObj[2]\n print(IFileList)\n for file in IFileList[2:]:\n if re.search('movies\\u20131', file):\n review = self.loadFile(file)\n yy = self.tokenize(review)\n negWords.append(yy)\n\n elif re.search('movies\\u20135', file):\n review = self.loadFile(file)\n posWords.append(self.tokenize(review))\n\n else:\n print(\"The movie review didn't start with a 1 or 5.\")\n break\n negWords = [item for sublist in negWords for item in sublist]\n posWords = [item for sublist in posWords for item in sublist]\n\n mostCommon = [',', '.', 'the', 'of', 'and', 'to', 'a', 'in', 'for', 'is', 'on', 'that', 'by', 'this', 'with', 'I',\n 'you', 'it', 'not', 'or', 'be', 'are', 'from', 'at', 'as', 'your', 'all', 'have', 'new', 'more', 'an', 'was']\n \n for word in negWords:\n if word in mostCommon:\n negWords.remove(word)\n\n for word in posWords:\n if word in mostCommon:\n posWords.remove(word)\n else: #if training for 10 fold cross validation\n\n mostCommon = [',', '.', 'the', 'of', 'and', 'to', 'a', 'in', 'for', 'is', 'on', 'that', 'by', 'this', 'with', 'I',\n 'you', 'it', 'not', 'or', 'be', 'are', 'from', 'at', 'as', 'your', 'all', 'have', 'new', 'more', 'an', 'was']\n\n for file in testing:\n if \"movies-1\" in file:\n review = self.loadFile(file)\n yy = self.tokenize(review)\n for i in yy:\n if i not in mostCommon:\n negWords.append(i)\n\n elif \"movies-5\" in file:\n review = self.loadFile(file)\n yy = self.tokenize(review)\n for i in yy:\n if i not in mostCommon:\n posWords.append(i)\n\n else:\n print(\"The movie review didn't start with a 1 or 5.\")\n\n #negWords = negWords[0]\n #posWords = posWords[0]\n\n\n\n negFreq = nltk.FreqDist(negWords)\n posFreq = nltk.FreqDist(posWords)\n print posFreq\n print negFreq\n f = open(\"posFreq.dat\", \"w\")\n p = pickle.Pickler(f)\n p.dump(posFreq)\n f.close()\n f = open(\"negFreq.dat\", \"w\")\n p = pickle.Pickler(f)\n p.dump(negFreq)\n f.close()\n self.negWords = negFreq\n self.posWords = posFreq", "def get_filenames_reid(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'train-512-170.tfrecords')]\n else:\n return [os.path.join(data_dir, 'val-512-170.tfrecords')]", "def get_train_test_info(self):\n \n imagePaths = list(paths.list_images(self.cross_val_dir))\n\n train_path_info = defaultdict(list)\n test_path_info = defaultdict(list)\n\n for imagePath in imagePaths:\n path_parts = imagePath.split(os.path.sep)\n fold_number = path_parts[-3][-1]\n label = path_parts[-2]\n if(fold_number==str(self.fold)):\n test_path_info['path_list'].append(imagePath)\n test_path_info['label_list'].append(label)\n else:\n train_path_info['path_list'].append(imagePath)\n train_path_info['label_list'].append(label)\n\n return train_path_info, test_path_info", "def train(self, trainfile):", "def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)", "def read_train_files(indir, separator=\" \"):\n utils.print_success(\"Reading multiple train files\")\n indir = utils.abs_path_dir(indir) + \"/\"\n groundtruths = []\n features = []\n included_extenstions = [\"csv\"]\n filenames = [fn for fn in os.listdir(indir)\n if any(fn.endswith(ext) for ext in included_extenstions)]\n for index, filename in enumerate(filenames):\n print(str(index + 1) + \"/\" + str(len(filenames)) + \" \" + filename)\n sys.stdout.write(\"\\033[F\") # Cursor up one line \n sys.stdout.write(\"\\033[K\") # Clear line\n with open(indir + filename, \"r\") as filep:\n for row in filep:\n line = row.split(separator)\n features.append([float(i) for i in line[:-1]])\n groundtruths.append(line[-1][:-1])\n sys.stdout.write(\"\\033[K\") # Clear line\n return features, groundtruths", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def store_training_validation_file_list(data_paths, save_dir, train_num,\n logger):\n training_dir = data_paths[0]\n validation_dir = data_paths[1]\n\n save_list = os.path.join(save_dir, '{}_train_valid_file_list.txt'.format(\n train_num))\n\n\n with open(save_list, \"w\") as f:\n\n def get_images(path):\n\n sub_dirs = [x[0] for x in os.walk(path)]\n sub_dirs.sort()\n\n for sub_dir in sub_dirs:\n images = glob.glob(sub_dir + '/*.jpg')\n \n # for dirs containing jpgs, write the dir path and files to save_list\n if len(images) > 0:\n f.write(sub_dir + \"\\n\")\n for image in images:\n f.write(\" \" + pathlib.Path(image).name + \"\\n\")\n\n f.write(\"LIST OF FILES USED IN RUN {}\\n\".format(train_num))\n f.write(\"===============================\\n\")\n\n f.write(\"TRAINING\\n\")\n f.write(\"--------\\n\")\n\n get_images(training_dir)\n\n f.write(\"VALIDATION\\n\")\n f.write(\"----------\\n\")\n\n get_images(validation_dir)\n\n logger.info(\"File Generation: %s\",\n \"Training and validation files list generated.\")", "def get_training_data(self):\n labels = self.get_labels()\n\n print 'Loading training data from ', self.train_folder , '...'\n train_index = []\n #train_ans = []\n train_text = []\n cnt = 0\n\n for f in listdir(self.train_folder):\n file_path = join(self.train_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n #train_index.append(f[:-4])\n self.train_ans.append(labels[f[:-4]])\n with open(file_path, 'rb') as f:\n train_text.append( f.read() )\n\n return train_text", "def test_train_split(folder_name):\n\n class_folders = glob.glob(os.path.join(folder_name, '*'))\n\n class_names = [i.split('/')[-1] for i in class_folders]\n\n print(class_folders)\n\n train_folder_path = os.path.join(folder_name, 'train_dir')\n validation_folder_path = os.path.join(folder_name, 'val_dir')\n\n if not os.path.exists(train_folder_path):\n os.makedirs(train_folder_path)\n if not os.path.exists(validation_folder_path):\n os.makedirs(validation_folder_path)\n\n # Create the folder structure\n class_folders_train = []\n class_folders_val = []\n for class_name in class_names:\n # Create calss folder in the training directory\n class_folders_train.append(os.path.join(train_folder_path, class_name))\n if not os.path.exists(class_folders_train[-1]):\n os.makedirs(class_folders_train[-1])\n # Create class folder in the validation_directory\n class_folders_val.append(os.path.join(\n validation_folder_path, class_name))\n if not os.path.exists(class_folders_val[-1]):\n os.makedirs(class_folders_val[-1])\n\n class_files = []\n\n for idx, class_folder in enumerate(class_folders):\n class_files = glob.glob(os.path.join(class_folder, '*.jpg'))\n for file in class_files[:int(len(class_files) * 0.7)]:\n copyfile(file, os.path.join(\n class_folders_train[idx], file.split('/')[-1]))\n for file in class_files[int(len(class_files) * 0.7):]:\n print(file)\n print(os.path.join(class_folders_val[idx], file.split('/')[-1]))\n copyfile(file, os.path.join(\n class_folders_val[idx], file.split('/')[-1]))", "def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]", "def get_filenames(mode, data_dir):\n if mode == 'train':\n return [os.path.join(data_dir, 'encoder.train.input'), os.path.join(data_dir, 'encoder.train.target'),\n os.path.join(data_dir, 'decoder.train.target')]\n else:\n return [os.path.join(data_dir, 'encoder.test.input'), os.path.join(data_dir, 'encoder.test.target'),\n os.path.join(data_dir, 'decoder.test.target')]", "def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames", "def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames", "def get_file_names(self, prediction=False):\n if prediction is False:\n filename_list = []\n for objects in self.bucket.objects.filter(Prefix=self.training_data_dir):\n filename = str(objects.key).split('/')[-1]\n if filename != \"\":\n filename_list.append(filename)\n return filename_list\n else:\n filename_list = []\n for objects in self.bucket.objects.filter(Prefix=self.prediction_data_dir):\n filename_list.append(str(objects.key).split('/')[-1])\n return filename_list", "def load_input(io_config):\n\n fold = io_config['fold']\n\n train_data_fnames = io_config['file']['train_data']\n\n train_fname = [t[1] for t in train_data_fnames\n if t[0] == fold][0]\n\n return train_fname", "def create_data_folds(src_dir):\n \n # Creating the CV folds from the remaining events\n events = ['ch', 'ebola', 'ferg', 'german', 'gurlitt', 'ottawa', 'putin', 'sydney', 'toronto']\n for fold, event in enumerate(events):\n print(\"\\nCreating fold_{} with {} as test set\\n\".format(fold+1, event) + \"-\"*50 )\n train_labels, test_labels = [],[]\n test_event = event\n train_events = events.copy()\n train_events.remove(event)\n \n print(\"Test set: \\n\" + \"-\"*20)\n test_data_dir = os.path.join(src_dir, test_event)\n test_data_file = '../data/pheme/test_filtered_{}.tsv'.format(fold+1)\n c=0\n with open(test_data_file, 'a+', encoding = 'utf-8', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n for root, dirs, files in os.walk(test_data_dir):\n for file in files:\n if file.startswith('.') or file.startswith('structure') or root.endswith('reactions'):\n continue\n else:\n if file.startswith('annotation'):\n src_file_path = os.path.join(root, file)\n with open(src_file_path, 'r') as j:\n annotation = json.load(j)\n test_labels.append(convert_annotations(annotation, string = False))\n \n else:\n src_tweet_file = os.path.join(root, file)\n with open (src_tweet_file, 'r', encoding = 'utf-8') as j:\n src_tweet= json.load(j)\n text = src_tweet['text'].replace('\\n', ' ')\n text = text.replace('\\t', ' ')\n csv_writer.writerow([text, test_labels[c]])\n c+=1\n if c%100 == 0:\n print(\"{} done...\".format(c))\n true, false, unverif = get_label_distribution(test_labels)\n print(\"\\nTotal test instances = \", len(test_labels))\n print(\"True test labels = {:.2f} %\".format(true*100))\n print(\"False test labels = {:.2f} %\".format(false*100))\n print(\"Unverified test labels = {:.2f} %\".format(unverif*100))\n \n print(\"\\nTrain set: \\n\" + \"-\"*20)\n train_data_file = '../data/pheme/train_{}.tsv'.format(fold+1)\n c=0\n for train_event in train_events:\n train_data_dir = os.path.join(src_dir, train_event)\n with open(train_data_file, 'a+', encoding = 'utf-8', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n for root, dirs, files in os.walk(train_data_dir):\n for file in files:\n if file.startswith('.') or file.startswith('structure') or root.endswith('reactions'):\n continue\n else:\n if file.startswith('annotation'):\n src_file_path = os.path.join(root, file)\n with open(src_file_path, 'r') as j:\n annotation = json.load(j)\n train_labels.append(convert_annotations(annotation, string = False))\n \n else:\n src_tweet_file = os.path.join(root, file)\n with open (src_tweet_file, 'r', encoding = 'utf-8') as j:\n src_tweet= json.load(j)\n text = src_tweet['text'].replace('\\n', ' ')\n text = text.replace('\\t', ' ')\n csv_writer.writerow([text, train_labels[c]])\n c+=1\n if c%1000 == 0:\n print(\"{} done...\".format(c))\n true, false, unverif = get_label_distribution(train_labels)\n print(\"\\nTotal train instances = \", len(train_labels))\n print(\"True train labels = {:.2f} %\".format(true*100))\n print(\"False train labels = {:.2f} %\".format(false*100))\n print(\"Unverified train labels = {:.2f} %\".format(unverif*100))\n return None", "def _get_model_files(steps, path):\n if not isinstance(steps, list):\n steps = [steps]\n model_files = []\n for step in steps:\n model_pref = ckpt_prefix + str(step)\n model_files.extend([f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f)) and f[:len(model_pref)] == model_pref])\n return model_files", "def read_fivefold_list(dataset_dir, foldList):\n \n foldList_filename = os.path.join(dataset_dir, foldList)\n with tf.gfile.Open(foldList_filename, 'r') as f:\n lines = f.read()\n lines = lines.split('\\n')\n lines = lines[:-1]\n \n return lines", "def load_path_list(image_path, gt_path, batch_size, train = True):\r\n\r\n if train:\r\n print(\"Image Load Started..\")\r\n\r\n path_list = os.listdir(gt_path)\r\n \r\n image_size = len(path_list)\r\n Train_size = image_size // batch_size * batch_size\r\n Validation_size = image_size - Train_size\r\n \r\n if Validation_size < 10:\r\n Train_size -= batch_size\r\n Validation_size += batch_size\r\n \r\n print(\"Train data size : \", Train_size)\r\n print(\"Validation data size : \", Validation_size)\r\n else:\r\n path_list = os.listdir(gt_path)\r\n Train_size = 0\r\n Validation_size = 0\r\n print(\"Test data size : \", len(path_list))\r\n\r\n rd.shuffle(path_list)\r\n\r\n\r\n return path_list, Train_size, Validation_size", "def get_best_model_path(fold: int) -> str:\n def parse_accuracy(filename: str) -> float:\n m = re.search(r\"__fold_\\d+_val_([01]\\.\\d+)\", filename)\n assert(m)\n return float(m.group(1))\n\n models = list(glob.glob(\"../models/*__fold_%d_val_*.hdf5\" % fold))\n accuracy = list(map(parse_accuracy, models))\n best = accuracy.index(max(accuracy))\n\n print(\"fold=%d best_model=%s\" % (fold, models[best]))\n return models[best]", "def get_train_data(data, lang):\n train_files = open(os.path.join('traintestsplit', lang+'.trainlist')).read().split()\n return [data[lang][filename+'.npytxt'] for filename in train_files]" ]
[ "0.7803892", "0.7056508", "0.685751", "0.6802941", "0.6719647", "0.6520368", "0.6367892", "0.630924", "0.6265436", "0.6264137", "0.6258419", "0.6230313", "0.6188211", "0.6176572", "0.613282", "0.61127913", "0.61011595", "0.6052456", "0.6017384", "0.6011248", "0.60108817", "0.60108817", "0.6010142", "0.60079545", "0.5989858", "0.5970775", "0.596089", "0.5952251", "0.5923608", "0.5917091" ]
0.85289615
0
List the test files of the `fold`
def test_files(self, fold: int) -> List[str]: for fold_id, inputs in enumerate(self.folds): if fold_id == fold: return inputs return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_files(self, fold: int) -> List[str]:\n all_files = []\n for fold_id, inputs in enumerate(self.folds):\n if fold_id != fold:\n all_files += inputs\n\n return all_files", "def get_test_files():\n test_files = os.listdir('./test')\n return [\n create_test_file_name(test_file)\n for test_file in test_files\n if is_valid_test_file(test_files)\n ]", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def test(self, absList, modelfilename, fold=None):\n raise NotImplementedError(\"Need to implement test()\")", "def get_test_files():\n repo_fs()\n return TEST_FILES", "def test(self, absList, modelFilename, fold=None):\n raise NotImplementedError(\"Need to implement test()\")", "def _get_test_files(self):\n for dirpath, dirnames, filenames in os.walk(self.TestsDirectory):\n for f in filenames:\n if f.endswith('.py'):\n yield (path.join(dirpath, f), 'Python')", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def test_case_5():\n print(\"*********Test_case_5***********\")\n result = find_files('.c', \"\")\n print(result)", "def get_test_files(self):\n raise NotImplementedError", "def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))", "def generate_test_list(tdir):\n\n # Skip this if it already exists\n if os.path.exists(os.path.join(tdir.name, \"kstest-list\")):\n return\n\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n with open(kstest_log) as f:\n for line in f.readlines():\n if not line.startswith(\"Running tests: \"):\n continue\n\n tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]\n with open(os.path.join(tdir.name, \"kstest-list\"), \"wt\") as klf:\n for t in tests:\n print(t, file=klf)\n break", "def test_case_1():\n print(\"*********Test_case_1***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('.c', path)\n for file in result:\n print(file)", "def _list_test_files(self, results_list):\n return [results[INPUT_FILE_PATH] for results in results_list]", "def test_examples():\n tests = [d for d in listdir(ex) if path.isdir(path.join(ex, d))]\n for d in tests:\n yield check_examples, d", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)", "def __generate_test_file_list(self):\n allowed_tests = []\n exclude_tests = self.get_exclusions()\n exclude_tests.append('expected.')\n exclude_tests.append('actual.')\n\n #Allowed/exclude can be filenames or directory fragments.\n tests_to_run = []\n added_test = len(tests_to_run)\n allowed_path = ''\n\n #Check local dir first then the root package directory.\n checked_paths = []\n for test_dir in self.get_test_dirs():\n allowed_path = os.path.join(test_dir, self.test_pattern)\n checked_paths.append(allowed_path)\n if os.path.isfile(allowed_path):\n logging.debug(\"Adding file \" + allowed_path)\n tests_to_run.append(TestFile(test_dir, allowed_path))\n elif os.path.isdir(allowed_path):\n logging.debug(\"Iterating directory \" + allowed_path)\n for f in os.listdir(allowed_path):\n full_filename = os.path.join(allowed_path, f)\n if os.path.isfile(full_filename):\n logging.debug(\"Adding file \" + full_filename)\n tests_to_run.append(TestFile(test_dir, full_filename))\n else:\n for f in glob.glob(allowed_path):\n full_filename = os.path.join(allowed_path, f)\n if os.path.isfile(full_filename):\n logging.debug(\"Adding globbed file \" + full_filename)\n tests_to_run.append(TestFile(test_dir, full_filename))\n if tests_to_run:\n break\n\n if added_test == len(tests_to_run):\n logging.debug(\"Could not find any tests for [\" + \"] or [\".join(checked_paths) + \"]. Check the path.\")\n\n logging.debug(\"Found \" + str(len(tests_to_run)) + \" tests to run before exclusions.\")\n\n regexes = []\n for ex in exclude_tests:\n try:\n ex = ex.strip()\n if not ex:\n continue\n regex = re.compile(ex)\n regexes.append(regex)\n except BaseException as e:\n print (\"Error compiling regular expression for test file exclusions: '\" + str(ex) + \"' exception: \" +\n str(e))\n\n final_test_list = list(tests_to_run)\n for test in tests_to_run:\n for regex in regexes:\n if re.search(regex, test.test_path) and test in final_test_list:\n logging.debug(\"Removing test that matched: \" + str(regex))\n final_test_list.remove(test)\n\n logging.debug(\"Found \" + str(len(final_test_list)) + \" tests to run after exclusions.\")\n return sorted(final_test_list, key = lambda x: x.test_path)", "def tests(c):\n results = [test(c, i) for i, test_path in enumerate(TEST_PATHS)]\n print('\\n\\n\\n############## SUMMARY ##############')\n for i, test_path in enumerate(TEST_PATHS):\n print(i, test_path, 'PASSED' if result[i] == 0 else 'FAILED')", "def test_case_3():\n print(\"*********Test_case_3***********\")\n result = find_files('.c', None)\n print(result)", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def get_test_files(self):\n train_dir = os.path.join(self.data_dir, \"test_{}_new\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def test_003_get_all_games(self):\n __test = chess_storage.ChessStorage()\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __filenames_infolder = []\n for __filename_infolder in os.listdir(__dir_game_saves):\n if __filename_infolder != \"log\":\n __filenames_infolder.append(__filename_infolder)\n __test_filenames = __test.get_all_games()\n self.assertEqual(__test_filenames, __filenames_infolder)", "def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))", "def test_case_6():\n print(\"*********Test_case_6***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir', 't1.c')\n result = find_files('.c', path)\n print(result)", "def testList(self):\n def _check(results):\n self.assertEqual(results[0], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[1], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[2], [b'testRemoveFile', b'testRenameFile'])\n self.assertEqual(results[3], [b'.testHiddenFile', b'testRemoveFile',\n b'testRenameFile'])\n self.assertEqual(results[4], [b''])\n d = self.runScript('ls', 'ls ../' + self.testDir.basename(),\n 'ls *File', 'ls -a *File', 'ls -l testDirectory')\n d.addCallback(lambda xs: [x.split(b'\\n') for x in xs])\n return d.addCallback(_check)", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def test_collect_files():\n filelist = [\"test/a.ext\", \"test/b.asd\"]\n\n result = loader.collect_files(filelist, lambda x: x, lambda x: np.arange(0, 50))\n\n for k in filelist:\n assert np.array_equal(np.arange(0, 50), result[k])" ]
[ "0.72677034", "0.6791726", "0.6743439", "0.66508114", "0.6589474", "0.6483487", "0.64777064", "0.64353925", "0.63842374", "0.63672245", "0.63392895", "0.6315878", "0.628178", "0.6266603", "0.6184447", "0.61463225", "0.61328787", "0.6130215", "0.6115048", "0.60493886", "0.60454017", "0.6037569", "0.6017042", "0.59832656", "0.59451336", "0.59418404", "0.59318", "0.590452", "0.588297", "0.58824784" ]
0.84272116
0
Write the fold split to the `filepath` as json.
def write_folds_to_json(self, filepath: str): with open(filepath, "w") as f: json.dump( { "isH5": self.is_h5_dataset, "folds": self.folds, }, f, indent=4, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, filepath):\n with open(filepath, 'w') as f:\n json.dump(self, f, indent=2)", "def to_json_file(self, path):\n with open(path, 'w') as f:\n f.write(self.to_json())", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")", "def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)", "def make_json_file(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4)\n f.close", "def write_json(filepath, data):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(data, file_obj, ensure_ascii=False, indent=2)", "def write_json_file(self, fname, content):\n pass", "def write_json_file(self, fname, content):\n pass", "def to_json_file(self, path: Union[str, Path]):\n data_dict = self._to_list_dict()\n return json.dump(data_dict, Path(path).open(\"w\", encoding=\"utf8\"), cls=NumpyEncoder, indent=4)", "def to_json(self, filepath, pretty=False):\n # PATH = os.path.abspath(os.path.join(filepath, '..', 'data'))\n with open(filepath, 'w+') as fp:\n if pretty:\n json.dump(self.data, fp, sort_keys=True, indent=4)\n else:\n json.dump(self.data, fp)", "def to_json(self, fpath):\n import json\n with open(fpath, 'w') as fp:\n json.dump(self.to_dict(), fp)", "def write(self, filename, overwrite=False, format='pickle',\n file_per_mode=False):\n def write_dat(fname):\n with open(fname, 'w') as fh:\n for val in cf:\n fh.write(\"{}\\n\".format(val))\n return\n\n try:\n # Get filename\n if not overwrite:\n i = 0\n while True:\n msg = ''\n path = os.path.dirname(filename)\n f = os.path.basename(filename)\n f, ext = os.path.splitext(f)\n\n if os.path.exists(filename):\n msg += '\\033[93mSplittingfunction-file exist,'\n msg += 'not overwriting\\033[0m'\n if i == 0:\n f = \"%s_%s\" % (f, str(i))\n i += 1\n a = \"_%s\" % str(i-1)\n b = \"_%s\" % str(i)\n f = f.replace(a, b)\n filename = os.path.join(path, \"%s%s\" % (f, ext))\n else:\n print(msg)\n break\n\n if format == 'pickle':\n write_pickle(self, filename)\n\n elif format == 'json':\n data = {'SF': {}}\n for m, cst in self.cst.items():\n data['SF'][m] = {}\n for deg, c in cst.items():\n data['SF'][m][deg] = c.tolist()\n with open(filename, 'w') as fh:\n json.dump(data, fh)\n\n elif format == 'dat':\n if file_per_mode is False:\n cf = []\n for mode, c in self.cst.items():\n degs = sorted(list(self.cst[mode].keys()))\n for deg in degs:\n coeff = self.cst[mode][deg]\n if deg == '0':\n cf += [coeff[0]]\n cf += [self.dst[mode]['0'][0]]\n else:\n cf += coeff.tolist()\n write_dat(filename)\n\n else:\n for mode, c in self.cst.items():\n cf = []\n degs = sorted(list(self.cst[mode].keys()))\n for deg in degs:\n coeff = self.cst[mode][deg]\n if deg == '0':\n cf += [coeff[0]]\n cf += [self.dst[mode]['0'][0]]\n else:\n cf += coeff.tolist()\n print(mode)\n write_dat(\"{}.dat\".format(mode))\n\n else:\n raise IOError('Only support for pickle and json files.')\n\n except IOError:\n msg = \"\\033[91mCan't save file\\n\"\n msg += \"Error message: %s\\033[0m\" % sys.exc_info()[1]\n print(msg)\n return", "def save_json(self, file: Union[str, TextIO]) -> None:\n if hasattr(file, 'write'):\n file_ctx = nullcontext(file)\n else:\n file_ctx = open(file, 'w')\n\n with file_ctx as fp:\n for d in self:\n json.dump(d.dict(), fp)\n fp.write('\\n')", "def save_info_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)", "def save(self, filepath: str):\n with open(filepath, 'w') as fp:\n json.dump(\n self.dict(),\n fp,\n )", "def write_json(data, filepath):\n with open(filepath, \"w\") as f:\n content = json.dumps(data, indent=3)\n f.write(content + '\\n')", "def write(self, _filepath=None):\n if _filepath is None:\n _filepath = self.filepath\n _json_txt = json.dumps(self.json_dict, indent=2).splitlines()\n # json.dumps() puts a space bwetween :{ rF2 doesn't\n # So strip it out to make it easier to compare before and after\n _whitespace_removed = []\n for _line in _json_txt:\n _line = _line.replace(': {', ':{', 1)\n\n # For some reason rF2 escapes / in values\n _colon = _line.find(':')\n if _colon:\n _line = _line[:_colon] + _line[_colon:].replace('/', r'\\/')\n _whitespace_removed.append(_line)\n _json_txt = '\\n'.join(_whitespace_removed)\n\n super()._write_json_text(_json_txt, _filepath)", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def fullJSON(self, filename=None, downsample=None):\n d = self.robotGridDict(downsample)\n if filename is not None:\n with open(filename, \"w\") as f:\n json.dump(d, f, separators=(',', ':'))\n else:\n return json.dumps(d)", "def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))", "def write_to_json(self, export_fp: str):\n # TODO:\n pass", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def save_data(df, save_folder, filename):\n filepath = os.path.join(save_folder, filename)\n df.to_json(filepath,\n orient='records',\n lines=True)", "def writef(self, fileName):\n\t\tf = open(fileName, \"w\")\n\t\tjson.dump(self.writed(self.__world), f)\n\t\tf.close()", "def write_to_file(sec_in_projects, file_name):\n with open(file_name, 'w') as outfile:\n json.dump(sec_in_projects, outfile)", "def to_json(self, path):\n with open(path, \"w\") as dump:\n json.dump(self.__dict__, dump)", "def output_json(regions, outfile):\n with open(outfile, 'w') as outF:\n json.dump(regions, outF)", "def write(self):\r\n\r\n with open(self.filename + \".json\", mode='w') as json_file:\r\n json.dump(self.data, json_file, separators=(',', ':'))", "def write_json(\n data: Any,\n filepath: types.PathLike,\n *,\n mode: str = \"wt\",\n encoding: Optional[str] = None,\n make_dirs: bool = False,\n lines: bool = False,\n ensure_ascii: bool = False,\n separators: tuple[str, str] = (\",\", \":\"),\n sort_keys: bool = False,\n indent: Optional[int | str] = None,\n) -> None:\n io_utils._validate_write_mode(mode)\n with io_utils.open_sesame(\n filepath, mode=mode, encoding=encoding, make_dirs=make_dirs\n ) as f:\n if lines is False:\n f.write(\n json.dumps(\n data,\n indent=indent,\n ensure_ascii=ensure_ascii,\n separators=separators,\n sort_keys=sort_keys,\n cls=ExtendedJSONEncoder,\n )\n )\n else:\n newline: Union[str, bytes] = \"\\n\" if \"t\" in mode else b\"\\n\"\n for item in data:\n f.write(\n json.dumps(\n item,\n indent=indent,\n ensure_ascii=ensure_ascii,\n separators=separators,\n sort_keys=sort_keys,\n cls=ExtendedJSONEncoder,\n )\n + newline\n )" ]
[ "0.6139056", "0.6109978", "0.605159", "0.60373914", "0.5996056", "0.59922796", "0.5931721", "0.58890903", "0.58890903", "0.58382756", "0.583775", "0.58370245", "0.5836411", "0.5768725", "0.5767711", "0.57568604", "0.5755295", "0.57466227", "0.57201326", "0.5704674", "0.5670355", "0.5658591", "0.5644377", "0.5632221", "0.5629642", "0.5626544", "0.56230414", "0.5617767", "0.5605663", "0.55915827" ]
0.8364435
0
Return environment variables starting with prefix
def env_vars(prefix): return {k: v for k, v in os.environ.items() if k.startswith(prefix)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preload_existing_vars(prefix: str) -> Store:\n if not prefix:\n # If prefix is empty just return all the env variables.\n return environ\n\n prefixed = {}\n\n # Prefix is not empty, do the search and replacement:\n for env_name, env_value in environ.items():\n if not env_name.startswith(prefix):\n # Skip vars with no prefix.\n continue\n\n prefixed[env_name.replace(prefix, '', 1)] = env_value\n\n return prefixed", "def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)", "def RSA_KEYPAIR_PREFIX() :\n return os.environ.get( \"ATC_KEYPAIR_PREFIX\", \"atc-dev\" )", "def get_env_prefix(instrument):\n return \"crds://\"", "def _prefix_env_variable(environ, name, paths, subfolders):\n value = environ[name] if name in environ else ''\n environ_paths = [path for path in value.split(os.pathsep) if path]\n checked_paths = []\n for path in paths:\n if not isinstance(subfolders, list):\n subfolders = [subfolders]\n for subfolder in subfolders:\n path_tmp = path\n if subfolder:\n path_tmp = os.path.join(path_tmp, subfolder)\n # skip nonexistent paths\n if not os.path.exists(path_tmp):\n continue\n # exclude any path already in env and any path we already added\n if path_tmp not in environ_paths and path_tmp not in checked_paths:\n checked_paths.append(path_tmp)\n prefix_str = os.pathsep.join(checked_paths)\n if prefix_str != '' and environ_paths:\n prefix_str += os.pathsep\n return prefix_str", "def get_formatted_env_vars() -> str:\n res = \"\"\n for k, v in os.environ.items():\n res += '{0}={1}\\n'.format(k, v)\n return res", "def get_vars_by_prefix(self, prefix):\n\n t_vars = tf.global_variables()\n return [var for var in t_vars if prefix in var.name]", "def _include_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')\n if not paths:\n return []\n return paths.split(';')", "def get_env_vars():\n return [EnvVar(name=k, value=v) for (k, v) in os.environ.items() if k.startswith('LEAPP_') and k not in ENV_IGNORE]", "def env_prefix(self, path):\n if self.is_default:\n return self.root # FIXME: Is this guaranteed to be the right one?\n\n return os.sep.join([path, PROJECT_ENVS_FOLDER,\n self.default_environment])", "def env(var):\n return os.environ[var]", "def _prepend_env_paths(content, names):\n export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' %dict(\n k=name, v=os.environ.get(name, '')) for name in names]\n return '\\n'.join(export_env_vars + [content])", "def get_var_prefix(self):\n return self._var_prefix", "def getenv(self, var):\n return os.environ[var]", "def _default_getter(environ, metadata, prefix, name):\n ce = metadata[CNF_KEY]\n var = ce.name if ce.name is not None else \"_\".join((*prefix, name)).upper()\n log.debug(\"looking for env var '%s'.\", var)\n try:\n return environ[var]\n except KeyError:\n raise MissingEnvValueError(var) from None", "def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()", "def show_env():\n envs = [\"PATH\", \"ORACLE_HOME\", \"TNS_ADMIN\", \"NLS_LANG\"]\n result = {}\n for env in envs:\n if env in os.environ:\n result[env] = os.environ[env]\n return result", "def test_multiple_prefixes(monkeypatch):\n monkeypatch.setenv('SOM_TT_VALUE', '1')\n monkeypatch.setenv('ANOTHER_TT_VALUE', '2')\n\n variables = delegator.run('dump-env -p SOM_TT_ -p ANOTHER_TT_')\n assert variables.out == 'VALUE=2\\n'", "def prepend_env_variables(environ, env_var_subfolders, workspaces):\n lines = []\n lines.append(comment('prepend folders of workspaces to environment variables'))\n\n paths = [path for path in workspaces.split(os.pathsep) if path]\n\n prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')\n lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))\n\n for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):\n subfolder = env_var_subfolders[key]\n prefix = _prefix_env_variable(environ, key, paths, subfolder)\n lines.append(prepend(environ, key, prefix))\n return lines", "def __getitem__(self, key):\n return os.environ[key]", "def get_environment_vars():\n return {env: os.environ[env] for env in\n params.ENV_DIRS if env in os.environ}", "def test_expand_environment_variables(self):\n include_prefixes = ['-I']\n db = CppProperties(include_prefixes)\n environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'\n\n expected = [\n Flag('-I', path.normpath('/lib_include_dir')),\n ]\n\n scope = SearchScope(from_folder=_get_test_folder('environment'))\n self.assertEqual(expected, db.get_flags(search_scope=scope))", "def get_env_vars():\n env_vars = []\n leapp_vars = {k: v for (k, v) in os.environ.items() if k.startswith('LEAPP_') and k not in ENV_IGNORE}\n for k, v in leapp_vars.items():\n if k in ENV_MAPPING:\n env_vars.append(EnvVar(name=ENV_MAPPING.get(k), value=v))\n continue\n env_vars.append(EnvVar(name=k, value=v))\n\n return env_vars", "def get_prefix(self) -> str:\n return self.env_type.value + '_'", "def _env(env):\n return dict((key.strip(), val)\n for line in env.strip().splitlines()\n for key, _, val in [line.partition('=')])", "def getenv(self, key):\n return self._env[key]", "def environ_expansions():\r\n global _environ_expansions\r\n\r\n if _environ_expansions:\r\n return _environ_expansions\r\n\r\n _environ_expansions = {}\r\n for key, value in os.environ.items():\r\n _environ_expansions['ENV_%s' % key] = value\r\n\r\n return _environ_expansions", "def _get_interpreter_values(token='ZEPPELIN_INTERPRETER') -> dict:\n env_variables = {}\n for env_variable in os.environ:\n if env_variable.startswith(token):\n env_variables[env_variable] = os.environ[env_variable]\n\n LOG.info('%s-based token environment variables: %s', token, env_variables)\n\n return env_variables", "def find_environ_config_vars():\n # only allow secret key and database uri for now\n envvars = [\"SQLALCHEMY_DATABASE_URI\", \"SECRET_KEY\"]\n results = {}\n for key in envvars:\n if key in os.environ:\n results[key] = os.environ[key]\n return results", "def get_env_variable(var_name, prefix=VARIABLE_PREFIX, as_bool=False):\n if prefix is not None:\n var_name = prefix + var_name\n\n try:\n value = os.environ[var_name]\n if as_bool:\n value = value.lower() == 'true'\n return value\n except KeyError:\n raise ImproperlyConfigured('Set the %s environment variable' % var_name)" ]
[ "0.72910964", "0.67468673", "0.6712868", "0.65505886", "0.6517432", "0.63044226", "0.6295789", "0.62780964", "0.6245079", "0.62119097", "0.62084186", "0.6165885", "0.6108197", "0.6107439", "0.60423785", "0.60150474", "0.5988261", "0.59737426", "0.59527063", "0.5928965", "0.5927102", "0.5908471", "0.5880616", "0.58514506", "0.58425397", "0.5841613", "0.58239716", "0.580478", "0.579205", "0.57622075" ]
0.8657359
0
Get an object from cache, return `None` if not found.
def cache_get(item: str) -> object: item = str(item) cache = cache_find(item) # cache_find() will return none if the cache does not exist # the returned location is guaranteed to exist, so no point checking again. if cache is not None: try: cached = pickle.load(open(cache, "rb")) except EOFError as ex: # Cache file is corrupted, so print an error and act like it does # not exist. We do not delete the cache file incase the user wants # to recover the file. uux.show_error("Error when loading file from cache: " + str(ex)) return None except Exception as ex: raise ex uux.show_debug("Cache hit for " + item) return cached return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, key):\n if key:\n return self.cache_data.get(key)\n else:\n return None", "def cache_get(self, key: str) -> Optional[bytes]:\n if self.cache is not None:\n return self.cache.get(key)\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None", "def get(self, key):\n if key is None:\n return None\n return self.cache_data.get(key, None)", "def get(cls, obj: Model):\n return cache.get(cls._construct_key(obj))", "def get_cache(self, key):\n return self.r.get(key)", "def _get_cached_instance(self):\n\n try:\n identifier = self._get_identifier()\n except (ValueError, ObjectDoesNotExist) as error:\n if self._fail_silently:\n return None\n raise LazyModelObjectError(exc=error) from error\n\n # Get the cache key, basically just namespacing the identifier\n cache_key = model_cache_key(identifier)\n\n cache, timeout = self._cache\n cace: BaseCache\n if cache_key in cache:\n instance = cache.get(cache_key)\n else:\n instance = self._get_instance(identifier)\n cache.set(cache_key, instance, timeout=timeout)\n\n if instance is None and not self._fail_silently:\n raise LazyModelObjectError(f'{identifier} not found.')\n return instance", "def get(self, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n if os.path.isfile(cache_file_path):\n with open(cache_file_path, 'rb') as fp:\n result = pickle.load(fp)\n return result\n\n return None", "def _cache_get(self, objId, methodname):\n self._lock.acquire()\n entry = self.cache.get((objId, methodname))\n if entry is None: \n self._lock.release()\n return None\n if time.time() - entry[0] > CACHE_TIMEOUT: \n del self.cache[(objId, methodname)]\n self._lock.release()\n return None\n self._lock.release()\n return entry[1]", "def get(key):\n return Cache.cache_connector.get(key)", "def get(self, key):\n try:\n\n item = self._item_to_dict(self.client.get_item(**self._prepare_get_request(str(key))))\n\n # If item is empty, nothing in cache\n if not item:\n return None\n\n # If current time beyond expiry, nothing to return\n if time()>float(item[self._expiry_field.name]):\n return None\n\n return self.load_object(b64decode(item.get(self._value_field.name)))\n\n except Exception as e:\n logging.info('Error getting object from DynamoDB table %s (%s): %s',self.table_name,e.__class__.__name__,e)\n return None", "def __getitem__(self,key):\n result = None\n # check if it's tin the cache first\n if key in self._cache:\n result = self._cache[key]\n else:\n # it's not in the cache so retrieve it\n result = self._get_from_tree(key)\n # remove None values\n result = [x for x in result if x is not None]\n self._cache[key] = result\n\n return result", "def cache_get(key, default=None):\n mc = get_cache_client()\n try:\n return decode_value(mc.get(get_key(key))) or default\n except:\n return default", "def get(model_class, id):\n key = build_key(model_class, id)\n user = cache.get(key)\n if user is None: # Not in cache\n logger.info(\" CACHE MISS key=%s\", key)\n user = User.objects.filter(id=id).first()\n if user is not None: # Found in DB\n logger.info(\" CACHE POPULATE key=%s\", key)\n cache.set(key, user) # Add to cache\n else:\n logger.info(\" CACHE HIT key=%s\", key)\n return user", "def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result", "def get(self, key):\n with self._lock:\n self._check_expire()\n\n obj = self._obj_cache[key]\n self._log.debug(\"getting object '%s' (type=%s). \"\n \"updating access time.\",\n key, type(obj))\n self._obj_last_access[key] = time.time()\n return obj", "def _query_cache(self, key):\n if self._cache:\n cache_key = self._make_cache_key(key)\n try:\n val = self._cache[cache_key]\n self._log.debug('cache hit for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return val\n except KeyError:\n self._log.debug('cache miss for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return None\n else:\n self._log.debug('cache disabled (self._cache is None)')\n return None", "def get(node, user_id):\n key = NodeCache.generate_key(node, user_id)\n db_node_cache = get_db_connector().node_cache.find({\n 'key': key\n }).sort('insertion_date', -1).limit(1)\n caches = list(db_node_cache)\n if len(caches):\n return NodeCache.from_dict(caches[0])\n else:\n return None", "def get_cache(self, key, silent=False):\n value = self.get_local_cache(key)\n if value is not None:\n return value\n\n if self.cache is None:\n return None\n\n cache_key = key.cache_key\n try:\n value = self.cache.get(cache_key)\n except Exception:\n if not silent:\n logger.warn(CACHE_FETCH_ERR, key.name, extra={\n 'key': key.name,\n }, exc_info=True)\n value = None\n\n if value is not None and key.ttl > 0:\n self._local_cache[cache_key] = _make_cache_value(key, value)\n\n return value", "def get_from_cache(self, **kwargs):\n if not self.cache_fields or len(kwargs) > 1:\n return self.get(**kwargs)\n\n pk_name = self.model._meta.pk.name\n key, value = kwargs.items()[0]\n\n # Kill __exact since it's the default behavior\n if key.endswith('__exact'):\n key = key.split('__exact', 1)[0]\n\n if key in self.cache_fields or key in ('pk', pk_name):\n cache_key = self._get_from_cache_key(**{key: value})\n\n retval = cache.get(cache_key)\n if retval is None:\n result = self.get(**kwargs)\n # Ensure we're pushing it into the cache\n self._post_save(instance=result)\n return result\n\n # If we didn't look up by pk we need to hit the reffed\n # key\n if key not in (pk_name, 'pk'):\n return self.get(pk=retval)\n\n return retval", "def get(self, path):\n\t\treturn self.cache.get(path)", "def __getitem__(self, key):\n self._remove_expired()\n\n cache_entry = self._d.get(key, None)\n log.debug(\"__getitem__: {}\".format(cache_entry))\n\n return cache_entry", "def __getitem__(self, key):\n if self.contains(key):\n return self._cache[key][0]\n raise CacheMissException(key)", "def _get_cache(self, course_version_guid):\n if self.request_cache is None:\n return None\n\n return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)", "def get(self, key):\n return self.cache_data.get(key)", "def get(self, key):\n if key is None:\n raise TypeError\n\n index = self.__get_cache_set_index(key)\n cache_set = self.cache_sets[index]\n h_key = self.__ensure_hashable_key(key)\n return cache_set.get(h_key)", "def getCache(self, key):\n return self._cache.get(key, None)", "def get( key ):\n if ACTIVE is False:\n return None\n \n global CACHE, STATS_MISSES, STATS_HITS\n \n \"\"\" Return a key stored in the python instance cache or a None if it has expired or it doesn't exist \"\"\"\n if key not in CACHE:\n STATS_MISSES += 1\n return None\n \n value, expiry = CACHE[key]\n current_timestamp = time.time()\n if expiry == None or current_timestamp < expiry:\n STATS_HITS += 1\n return value\n else:\n STATS_MISSES += 1\n delete( key )\n return None" ]
[ "0.7674477", "0.76137364", "0.75440174", "0.75440174", "0.75393486", "0.7432243", "0.73063606", "0.72476906", "0.72312814", "0.72097397", "0.7185365", "0.71732783", "0.71528417", "0.713144", "0.7064178", "0.70147896", "0.6986404", "0.697731", "0.69466746", "0.6875618", "0.68736666", "0.6866384", "0.6865083", "0.6738546", "0.6729988", "0.6710537", "0.6707776", "0.67055774", "0.66974634", "0.6692788" ]
0.82087857
0
Save an item to cache, using a hashed ID.
def cache_save_hashed(item: str, obj: object) -> None: cache_save(md5(item), obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_save(item: str, obj: object) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tcache_create()\n\n\tpickle.dump(obj, open(cache, \"wb\"))\n\tuux.show_debug(\"Cached object to \" + cache)", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key is None or item is None:\n return\n self.cache_data[key] = item", "def put(self, key, item):\n raise NotImplementedError(\"put must be implemented in your cache class\")", "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "def put(self, key, item):\n if key or item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.last))\n del self.cache_data[self.last]\n self.last = key", "def put(self, key, item):\n if key is not None and item is not None:\n self.cache_data[key] = item\n if key not in self.cache_list:\n self.cache_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n popped_key = self.cache_list.pop(0)\n print(f\"DISCARD: {popped_key}\")\n del self.cache_data[popped_key]", "async def save_to_cache(self, item: T):\n path = self._build_cache_path(\n **{a: getattr(item, a) for a in self._unique_attribues}\n )\n if path.is_file():\n raise ValueError(f\"Trying to overwrite cache at {str(path)}\")\n path.parent.mkdir(parents=True, exist_ok=True)\n async with aiofiles.open(str(path), \"w\") as file:\n await file.write(item.to_json())", "def save_hash_file(self):\n\n cid = time.time()\n '''\n f = open(self.file_hash_name, 'w+')\n f.write(str(cid))\n f.close()\n '''\n\n with open(self.file_hash_name, \"w\") as f:\n logger.debug('CID salvo file {}'.format(cid))\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n f.write(str(cid))\n fcntl.flock(f, fcntl.LOCK_UN)\n\n self.set_cache_hash(cid)", "def set(id, model):\n key = build_key(type(model), id) # Get model class from model object\n logger.info(\" CACHE INVALIDATE key=%s\", key)\n cache.delete(key) # Invalidate from cache\n model.id = id\n model.save()", "def __setitem__(self, tid: int, result: bytes):\n if tid in self:\n raise KeyError(f\"transaction {tid} already cached\")\n\n self._cache[tid] = (result, time.monotonic())", "def save_to_cache(self):\n\n ckey = self.cache_key\n\n logger.debug(f\"Saving setting '{ckey}' to cache\")\n\n try:\n cache.set(\n ckey,\n self,\n timeout=3600\n )\n except TypeError:\n # Some characters cause issues with caching; ignore and move on\n pass", "def update(self, cache_key):\r\n self._write_sha(cache_key)", "def add_item (self, item):\n new_item = CacheItem (item)\n cached = self.cache.get(hash(item))\n if cached is None:\n self.evict_or_add (new_item)\n cached.hits += 1", "def put(self, key, item):\n if key and item:\n if key in self.cache_data.keys():\n self.cache_data[key] = item\n self.stack.append(key)\n else:\n if len(self.cache_data.keys()) < self.MAX_ITEMS:\n self.cache_data[key] = item\n self.stack.append(key)\n elif len(self.cache_data.keys()) == self.MAX_ITEMS:\n k = self.stack.pop(-1)\n self.cache_data.pop(k)\n print(\"DISCARD: {}\".format(k))\n self.cache_data[key] = item\n self.stack.append(key)", "def cache_get_hashed(item: str) -> object:\n\treturn cache_get(md5(item))", "def __setitem__(self, key, item):\n with self.__lock:\n cache_entry = CacheEntry(item, self._default_duration)\n log.debug(\"__setitem__: {}\".format(cache_entry))\n self._d[key] = cache_entry", "def put(self, item): \n self.__db.rpush(self.key, item)", "def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n # If Key exists, update Cache Item\n if key in self.cache_data:\n for ci in self.LFU:\n if ci.key is key:\n ci.updateItem(item)\n else:\n # Length will be longer than max capacity, make room\n if len(self.cache_data) == self.MAX_ITEMS:\n\n # Determine discarded cache item\n discard = self.LFU[0]\n for x in self.LFU:\n if x.freq < discard.freq or \\\n x.freq == discard.freq and x.age > discard.age:\n discard = x\n\n # Discard Cache Item\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LFU.remove(discard)\n\n # Add new Cache Item\n data = LFUCacheItem(key, item, 0, 0)\n self.LFU.append(data)\n\n # increase age of all items\n for x in self.LFU:\n x.age += 1\n\n self.cache_data[key] = item", "def put(self, id, data):\n assert isinstance(data, dict)\n self._shelf[str(id)] = data", "def store(self, hash, original_url):\n self.r.set(hash, original_url)", "def store(self, key, value):\n self._cache[key] = value", "def add_item_to_cache(self) -> None:\n item = self.get_selected_item(self.tree_db)\n if item is None:\n return\n\n data_node = item.data()\n json_cache = self._data_encoder.encode(data_node.get_instance())\n self.send_data_to_cache(json_cache)", "def store(self, key, obj, timeout=0):\n with self._lock:\n self._check_expire()\n\n self._log.debug(\"storing '%s' (type=%s) with TO=%f\",\n key, type(obj), timeout)\n self._obj_cache[key] = obj\n self._obj_last_access[key] = time.time()\n self._obj_timeouts[key] = timeout", "def save(self, data, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n # Create path directory\n if not os.path.isdir(self.cache_path):\n logging.info(\"Creating cache directory at {}\".format(self.cache_path))\n mkpath(self.cache_path, 0o755)\n\n with open(cache_file_path, 'wb') as fp:\n logging.debug(\"Storing result in cache file at {}\".format(cache_file_path))\n pickle.dump(data, fp)\n\n return True", "def hash_save(db: Redis[bytes], hash: hash_t) -> None:\n db.zadd(HASH_INDEX, {str(hash): \"0\"})", "def put(self, key, item):\n # Do nothing if key is NoneType or item is NoneType\n if (key is None or item is None):\n return\n\n if key in self.cache_data:\n self.LRU = [ci for ci in self.LRU if ci.key != key]\n\n # increase age of all items\n for x in self.LRU:\n x.age += 1\n\n self.cache_data[key] = item\n data = LRUCacheItem(key, item, 0)\n self.LRU.append(data)\n\n # Length is longer than max capacity, make room\n if len(self.cache_data) > self.MAX_ITEMS:\n discard = self.LRU[0]\n for x in self.LRU:\n if x.age > discard.age:\n discard = x\n print(\"DISCARD: {}\".format(discard.key))\n del self.cache_data[discard.key]\n self.LRU.remove(discard)", "def set(cls, obj: Model, data):\n cache.set(cls._construct_key(obj), data)" ]
[ "0.6912247", "0.6871242", "0.6871242", "0.68123543", "0.672946", "0.6583027", "0.65691537", "0.65477073", "0.6547111", "0.65152246", "0.6514978", "0.64275014", "0.631806", "0.6274006", "0.6242404", "0.6202362", "0.618209", "0.6162696", "0.61198336", "0.6114743", "0.60722595", "0.60573757", "0.605294", "0.60466063", "0.60381794", "0.60368526", "0.60267276", "0.6010863", "0.6001774", "0.5932004" ]
0.76418275
0
Delete an item from the cache, using a hashed ID.
def cache_remove_hashed(item: str) -> None: cache_remove(md5(item))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, cache_key):\r\n pass", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def __delitem__(self, tid: int):\n del self._cache[tid]", "def delete(key):\n return Cache.cache_connector.delete(key)", "def testDeletingItem(self):\n\n data = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n memcache.set('data', data)\n assert memcache.get('data') == data\n memcache.delete('data')\n assert memcache.get('data') == None", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def delete(self, key):\n # Initialize key variables\n result = self.cache.delete(key)\n\n # Return\n return result", "def delete_cache(self, key):\n self.r.delete(key)", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def remove(model_class, id):\n key = build_key(model_class, id)\n logger.info(\" CACHE INVALIDATE key=%s\", key)\n cache.delete(key) # Invalidate from cache\n User.objects.filter(id=id).delete()", "def cache_remove(item: str) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tif os.path.exists(cache):\n\t\tdelete_file(cache)", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def delete(self, key):\n try:\n self._cache.delete(self.prepare_key(key))\n except Exception as err:\n return self.warn_or_error(err)", "def __delitem__(self, key: Hashable) -> None:\n del self.contents[key]\n return", "def delete_cached_account(username, registry):\n hmac_secret = registry.settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n cache = registry.cache\n cache_result = cache.delete(cache_key)\n return cache_result", "def remove(self, item):\n del self._dict[item]", "def delete(self, key):\n del self._cache[key]", "def delete_from_cache(self, cache_key, cache_index):\n try:\n del MEM_CACHE[cache_key.lower()][cache_index]\n except KeyError:\n pass", "def delete(self, _id):", "def delete(self):\n result = self._delete_key()\n if self.cacheable:\n # delete cache\n self.init_cache()\n return result", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def dal_delete(key):\n global store\n return store.delete(urllib.quote(key))", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key: T) -> None:\n self.delete(key)", "def delete(self, *item_or_id):\n\n id_set = set()\n for i in item_or_id:\n if isinstance(i, Item):\n id_set.add(i.id)\n else:\n id_set.add(i)\n\n (_, content) = self._request(self.url, body={\n 'c': 0, # unknown\n 's': self.session_key,\n 'fc': id_set,\n 'delexec.x': 1,\n 'delexec.y': 1})", "def __delitem__(self, k):\n j = self._hash_function(k)\n self._bucket_delitem(j, k)\n self._n -= 1", "def delete_cached(task_id, broker=None):\n if not broker:\n broker = get_broker()\n broker.cache.delete('{}:{}'.format(broker.list_key, task_id))", "def delete(self, mapitem_id: int):\n pass", "def delete( key ):\n global CACHE, STATS_KEYS_COUNT\n if key in CACHE:\n STATS_KEYS_COUNT -= 1\n del CACHE[key]" ]
[ "0.73074245", "0.7108162", "0.7069796", "0.6890898", "0.6881778", "0.68032175", "0.6778153", "0.66678333", "0.65715957", "0.65388805", "0.65362626", "0.6529011", "0.6476025", "0.6450751", "0.6441844", "0.6438636", "0.6429002", "0.6412903", "0.6373064", "0.6361154", "0.63561386", "0.6327805", "0.6313344", "0.6313344", "0.6304651", "0.62904924", "0.6284296", "0.6273144", "0.62664324", "0.6257709" ]
0.75325364
0
Download the file from the provided url to the location. Uses the cache.
def download_file_cached(file_url: str, location: str) -> None: item = os.path.basename(location) local = cache_find(item) if local is None: # Cached item doesn't exist cache_create() download_file(file_url, "Cached/" + item) copy_file("Cached/" + item, location) return # Copy file from cache to location uux.show_debug("Cache hit for " + item) copy_file(local, location)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path", "def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()", "def download(url: str, to_dir: str) -> str:\n to_file = os.path.join(to_dir, get_filename_from_url(url))\n logger.debug(\"Download %s to %s\", url, to_file)\n\n h = httplib2.Http(\".cache\")\n (_, content) = h.request(url, \"GET\")\n with open(to_file, 'wb') as f:\n f.write(content)\n return to_file", "def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def download_file(url, local_filename, update=False):\n if os.path.isfile(local_filename):\n if not update:\n return\n else:\n os.remove(local_filename)\n\n r = requests.get(url, stream=True)\n # http://stackoverflow.com/questions/15352668/download-and-decompress-gzipped-file-in-memory\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def cache_download(url, path):\n # Prep cache path and make necessary dirs\n cache_path = os.path.join(CAMD_CACHE, path)\n\n # Download and write file\n if not os.path.isfile(cache_path):\n makedirs_p(os.path.split(cache_path)[0])\n r = requests.get(url, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n block_size = 1024 # 1 Kibibyte\n t = tqdm(total=total_size, unit='iB', unit_scale=True)\n with open(cache_path, 'wb') as f:\n for data in r.iter_content(block_size):\n t.update(len(data))\n f.write(data)", "def download(url, filename=None):\n\t# requirements os, shutil, urllib.parse, urllib.request\n\tif not filename:\n\t\turl_parts = urllib.parse.urlparse(url)\n\t\tfilename = os.path.basename(url_parts.path)\n\turl_h = urllib.request.urlopen(url)\n\twith open(filename, 'wb') as file_h:\n\t\tshutil.copyfileobj(url_h, file_h)\n\turl_h.close()\n\treturn", "def get(self, url, recache=False):\n\n cachedir = self._cachedir(url)\n cachefilename = self._cachefilename(cachedir)\n\n # If \"filename\" file exists, it's a hit; read the actual filename\n # from there and return the cached content file\n if cachefilename.exists() and not recache:\n logger.debug(f\"Cache hit for {url}\")\n with open(cachefilename) as f:\n filename = f.readline()\n return cachedir / filename\n\n # Cache miss; attempt to download the URL\n with requests.get(url, allow_redirects=True, stream=True,\n timeout=30.0) as r:\n r.raise_for_status()\n\n # Determine download filename\n filename = None\n cd = r.headers.get('content-disposition')\n if cd:\n filenames = re.findall('filename=([^;]+)', cd)\n if len(filenames) > 0:\n filename = filenames[0]\n if filename is None:\n filename = os.path.basename(urllib.parse.urlparse(url).path)\n logger.info(f\"Caching {url} ({filename})\")\n\n cachefile = cachedir / filename\n try:\n # Download file\n with open(cachefile, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n self._writefilename(cachedir, filename)\n\n except:\n if cachefile.exists():\n cachefile.unlink()\n if cachefilename.exists():\n cachefilename.unlink()\n raise\n\n logger.debug(\"Downloaded file\")\n return cachefile", "def url_retrieve(url, output_file):\n r = requests.get(url, allow_redirects=True)\n if r.status_code != 200:\n raise ConnectionError(f\"Could not download {url}\\nError code: {r.status_code}\")\n\n output_file.write_bytes(r.content)", "def download_from_url(file_name: str, url: str, download_dir: str, cache_dir: Optional[str] = None):\n if not isinstance(url, str):\n raise TypeError(f\"{url} must be str type.\")\n if not isinstance(file_name, str):\n raise TypeError(f\"{file_name} must be str type.\")\n if not isinstance(download_dir, str):\n raise TypeError(f\"{download_dir} must be str type.\")\n\n if cache_dir is None:\n cache_dir = URDUHACK_DIRECTORY\n\n Path(cache_dir).mkdir(parents=True, exist_ok=True)\n tf.keras.utils.get_file(fname=file_name, origin=url, cache_subdir=download_dir, cache_dir=cache_dir, extract=True)", "def download(self, url):\n try:\n logging.info(self.log_format((\"downloading \" + url)))\n webFile = urllib.urlopen(url)\n localFile = open(self.paths['workspace'] + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n logging.error(self.log_format((\"could not get url \" + url)))", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)", "def cache_file(url, prefix):\n cache_filepath = _get_cached_filepath(\n prefix=prefix,\n url=url,\n )\n # If the file exists, return path.\n if os.path.isfile(cache_filepath):\n logger.info('Returning cached file for {}.'.format(url))\n return cache_filepath\n # If the file does not exist, download and return path.\n else:\n r = requests.get(url, verify=False)\n\n with open(cache_filepath, 'wb') as f:\n f.write(r.content)\n\n logger.info('Caching file for {}.'.format(url))\n return cache_filepath", "def load_file_from_url(self, url: str) -> bytes:\n cached_content = self.cache_get(url)\n if cached_content is not None:\n return cached_content\n try:\n req = requests.get(url, timeout=self.requests_timeout)\n req.raise_for_status()\n content = req.content\n self.cache_set(url, content)\n except requests.RequestException as err:\n self.log_error(err)\n repl_content = self.get_replacement_file(url)\n if repl_content is None:\n raise ImageNotFound(err)\n content = repl_content\n return content", "def downloadAndReplaceFile(file_path, download_url):\r\n file = urllib.request.urlopen(download_url)\r\n with open(file_path, 'wb') as output:\r\n output.write(file.read())", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def download_file(url, output_filename):\n print(\"Downloading\", url, \"to\", output_filename)\n r = requests.get(url)\n r.raise_for_status()\n with open(output_filename, 'wb') as f:\n f.write(r.content)", "def _download_file(file_url: str, file_path: str) -> str:\n if os.path.exists(file_path):\n return file_path\n op_desc = f\"Downloading {os.path.basename(file_path)}\"\n try:\n with requests.Session() as req_sess:\n req_res = req_sess.get(file_url, stream=True)\n total_length = int(req_res.headers.get(\"Content-Length\"))\n with tqdm.wrapattr(req_res.raw, \"read\", total=total_length, desc=op_desc) as raw:\n with open(file_path , \"wb\") as file:\n shutil.copyfileobj(raw,file)\n return file_path\n except Exception as network_error:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise network_error", "def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path", "def download_file_from_url(url, PATH, file_name):\n with requests.get(url) as r:\n with open(PATH+'/'+file_name, 'wb') as f:\n f.write(r.content)", "def download(self, url):\n url = URL(url)\n downloader = getattr(self, 'download_%s' % url.scheme, None)\n if downloader is None:\n msg = \"We haven't implemented the '%s' protocol yet.\" % url.scheme\n raise NotImplementedError(msg)\n fp = None\n else:\n fp = downloader(url)\n return fp", "def download_file(url, file_name):\n conn = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n with conn.request('GET', url, preload_content=False) as resp, open(file_name, 'wb') as out:\n shutil.copyfileobj(resp, out)", "def _download(url, file_name):\n # File length can only be approximated from the resulting GET, unfortunately\n r = requests.get(url, stream=True)\n if 'Content-Length' in r.headers:\n file_len = int(r.headers['Content-Length'])\n elif 'X-Original-Content-Length' in r.headers:\n file_len = int(r.headers['X-Original-Content-Length'])\n else:\n file_len = 0\n r.raw.decode_content = True\n with open(file_name, 'wb') as f:\n _copyfileobj(r.raw, f, chunks=(file_len / (64. * 1024)))\n r.close()\n\n return file_name" ]
[ "0.77907306", "0.769478", "0.7670883", "0.7608222", "0.76022357", "0.75971365", "0.7591167", "0.7481028", "0.7422788", "0.73972017", "0.73849773", "0.7383973", "0.73804367", "0.73578197", "0.7338381", "0.7335585", "0.73251", "0.73097956", "0.7306151", "0.7297983", "0.7275222", "0.72738224", "0.72507995", "0.72412425", "0.72390497", "0.72340584", "0.7225022", "0.7224516", "0.7220617", "0.72034055" ]
0.79674214
0
Return true if a folder at the provided path exists.
def folder_exists(path: str) -> bool: return os.path.isdir(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_exists(self, path):\n\t\tos_path = self._get_os_path(path=path)\n\t\treturn is_folder(self.bucket, os_path)", "def is_dir(self, path):\n return self.dir_exists(path)", "def check_if_dir_exists(path):\n\n # From http://stackoverflow.com/questions/8933237/how-to-find-if-directory-exists-in-python\n return os.path.isdir(path)", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def _is_folder_exists() -> bool:\n\n pwd: str = os.getcwd()\n data_folder: str = os.path.join(pwd, \"data\")\n return os.path.isdir(data_folder)", "def exists(self, path):\n path = path.strip(\"/\")\n if not path: # it's a directory, for all narratives\n return True\n return self.file_exists(path)", "def check_folder_exists(path):\n if not os.path.isdir(path):\n raise ValueError(\"Error: \\\"\" + path + \"\\\" is not a folder\")", "def dir_exists(self, path):\n return self._dir_model.get_by_name(name=path) != []", "def DirExists(folder):\n return os.path.isdir(folder)", "def check_folder_exists(location: str) -> bool:\n if os.path.isdir(location):\n return True\n else:\n return False", "def folder_exists(self, path):\n bucket_name, save_path = self.split_name(path)\n if self.bucket_exists(bucket_name):\n try:\n result = self.s3_client.list_objects(Bucket=bucket_name, Prefix=save_path)\n if result[\"Contents\"]:\n return True\n except (botocore.exceptions.ClientError, KeyError):\n # The object does not exist.\n return False\n return False", "def dfs_exists(self, path):\n try:\n df = self.dfs_ls(path)\n except Exception as e:\n if \"No such file or directory\" in str(e):\n return False\n else:\n raise e\n if len(df) == 0:\n # it is a folder\n return True\n ex = df[df.name == path]\n if len(ex) > 0:\n return True\n ex = df[df.apply(lambda r: r[\"name\"].startswith(path + \"/\"), axis=1)]\n if len(ex) > 0:\n return True\n return False", "def exists(self, path):\n return self.dir_exists(path) or self.file_exists(path)", "def exists(path: str) -> bool:\n pass", "def exists(path: str) -> bool:\n return _fs().exists(path)", "def non_empty_folder(folder):\n if not os.path.exists(folder):\n return False\n if not os.path.isdir(folder):\n return False\n if not os.listdir(folder):\n return False\n return True", "def is_dir(path: str) -> bool:\n return _fs().is_dir(path)", "def Exists(self, path: str) -> bool:\n ...", "def is_dir(self, path):", "def isdir(path):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n if path.endswith(\"/\"):\r\n path = path[:-1]\r\n\r\n return samba.folder_exists(os.path.basename(path), os.path.dirname(path))\r\n else:\r\n return os.path.isdir(path)", "def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False", "def check_folder(dir, folder_name): \n \n items = os.listdir(dir)\n if folder_name in items: \n print(' has a folder named: ' + folder_name)\n return True\n else:\n print(' does not have a folder named: ' + folder_name)\n return False", "def is_dir(self, path: PathLike):", "def exists(self, path: str) -> bool:\n pass", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def exists(self, path: str) -> bool:\n return self.fs.exists(self._full_path(path))", "def isdir(self, path):\n return os.path.isdir(path)", "def isFolderValid(folderPath):\n if os.path.isdir(folderPath):\n return True\n else:\n return False", "def folder_is_empty(folder_path):\n\n if os.path.isdir(folder_path):\n return (len(os.listdir(folder_path)) == 0)\n \n return True", "def folder_is_empty(folder_path):\n\n if os.path.isdir(folder_path):\n return (len(os.listdir(folder_path)) == 0)\n \n return True" ]
[ "0.8179201", "0.79332864", "0.79051244", "0.79029113", "0.7897626", "0.7854854", "0.7750175", "0.76297736", "0.762717", "0.7530441", "0.74888587", "0.7445432", "0.7384682", "0.7375725", "0.7344606", "0.73166764", "0.7307144", "0.7302061", "0.72769064", "0.72708213", "0.726542", "0.7249166", "0.72412825", "0.72275823", "0.72207636", "0.7193557", "0.7183599", "0.7159562", "0.71484655", "0.71484655" ]
0.88796943
0
Relabel array with consecutive values.
def relabel_consecutive(lab, start_from=0): new_lab = np.empty_like(lab) new_lab[:] = np.unique(lab, return_inverse=True)[1] new_lab += start_from return new_lab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _relabel(self, a):\n\n labels = list(np.unique(a))\n if 0 in labels:\n labels.remove(0)\n\n if len(labels) == 0:\n return a.copy()\n old_values = np.asarray(labels)\n new_values = np.arange(1, len(labels) + 1, dtype=old_values.dtype)\n\n try:\n values_map = np.arange(int(a.max() + 1), dtype=new_values.dtype)\n except ValueError as e:\n raise ValueError(f\"{e}, arange length: {int(a.max() + 1)}\")\n values_map[old_values] = new_values\n\n return values_map[a.copy()]", "def repair_labels(labels):\n ret = np.copy(labels)\n ret[:, 0] = 10 # overwrite length to be stop seq\n ret = np.roll(ret, -1, axis=1) # move first to last\n return ret", "def reformat_label_values(self, labels_array):\n if isinstance(labels_array, np.ndarray):\n reformat_labels_array = labels_array - 1\n return reformat_labels_array\n else:\n print(\"type({}) is not np.ndarray\".format(labels_array))\n return labels_array", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def array2(self):\r\n profbox(whoami())\r\n # research\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n labelnode = slicer.mrmlScene.GetNodeByID(inputLabelID)\r\n i = labelnode.GetImageData()\r\n shape = list(i.GetDimensions())\r\n shape.reverse()\r\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\r\n labels = []\r\n val = [[0, 0, 0] for i in range(a.max() + 1)]\r\n for i in xrange(2, a.max() + 1):\r\n w = numpy.transpose(numpy.where(a == i))\r\n # labels.append(w.mean(axis=0))\r\n val[i] = [0, 0, 0]\r\n val[i][0] = w[int(round(w.shape[0] / 2))][2]\r\n val[i][1] = w[int(round(w.shape[0] / 2))][1]\r\n val[i][2] = w[int(round(w.shape[0] / 2))][0]\r\n if val[i] not in self.previousValues:\r\n labels.append(val[i])\r\n self.previousValues.append(val[i])\r\n return labels", "def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels", "def transform_labels(self, y):\n y = np.repeat(y, self.n_ensemble, axis=0)\n return y", "def relabel_with_map_array(image, label_list, measurement_list):\n from skimage.util import map_array\n return map_array(np.asarray(image), np.asarray(label_list), np.array(measurement_list))", "def combine_labels_predicting(output_array):\n shape = output_array.shape[-3:]\n if len(output_array.shape) == 5:\n bs = output_array.shape[0]\n res_array = np.zeros((bs, ) + shape)\n res_array[output_array[:, 0, :, :, :] == 1] = 2\n res_array[output_array[:, 1, :, :, :] == 1] = 1\n res_array[output_array[:, 2, :, :, :] == 1] = 4\n elif len(output_array.shape) == 4:\n res_array = np.zeros(shape)\n res_array[output_array[0, :, :, :] == 1] = 2\n res_array[output_array[1, :, :, :] == 1] = 1\n res_array[output_array[2, :, :, :] == 1] = 4\n return res_array", "def range_to_label(arange):\r\n # pass\r\n C = arange.size - 1\r\n label = np.ones((arange[-1], ), dtype=np.int)\r\n for i in xrange(1, C):\r\n label[arange[i]: arange[i+1]] *= (i+1)\r\n return label", "def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array", "def create_label_array(el):\n num_digits = len(el) # first element of array holds the count\n labels_array = np.ones([MAX_LABELS+1], dtype=int) * 10\n labels_array[0] = num_digits\n for n in range(num_digits):\n if el[n] == 10: el[n] = 0 # reassign 0 as 10 for one-hot encoding\n labels_array[n+1] = el[n]\n return labels_array", "def initialization_based(input_array):\n\n # search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n # set the predicted class on 1, and all the other classes on 0\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n out[np.arange(out.shape[0]), oh_array.ravel()] = 1\n return out", "def _alter(self, label):\n altered = np.full(self.n, -1)\n altered[np.where(self.y_train == label)] = +1\n return altered", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def create_TargetLabel(dataset):\n label_Array = dataset['close_-1_r'].shift(-1)\n label_Array = label_Array.apply(lambda x:1 if x>0.0000 else 0)\n return label_Array", "def labels_array(self):\n return _build_label_vector_rows(\n [[(label, 1)] for label in self.labels], self.training_labels)[1:].T", "def regroup_dataset(labels):\r\n batch_y = labels.copy()\r\n for i, label in enumerate(labels):\r\n if label in [0, 15, 19]:\r\n batch_y[i]=0\r\n if label in [1, 2, 3, 4, 5,]:\r\n batch_y[i]=1\r\n if label in [6]:\r\n batch_y[i]=2\r\n if label in [7,8,9,10]:\r\n batch_y[i]=3\r\n if label in [11,12,13,14]:\r\n batch_y[i]=4\r\n if label in [16,17,18]:\r\n batch_y[i]=5\r\n \r\n print('regrouped label', batch_y.shape)\r\n return batch_y", "def value_to_class_label(bin_arr, val_arr,cla_arr=string.ascii_lowercase):\n\n return [cla_arr[i] for i in value_to_class_index(bin_arr, val_arr)]", "def value_to_class_label(bin_arr, val_arr,cla_arr=string.ascii_lowercase):\n\n return [cla_arr[i] for i in value_to_class_index(bin_arr, val_arr)]", "def fluxes_to_labels(fluxes: np.ndarray) -> np.ndarray:\n return ((1 - fluxes) // 2).astype(np.int8)", "def _relabel(array, renames):\n\n att_renames = []\n dim_renames = []\n\n for k, v in renames.items():\n if k in array.att_names:\n att_renames.extend([k, v])\n elif k in array.dim_names:\n dim_renames.extend([k, v])\n else:\n raise ValueError(\"Invalid array attribute: %s\" % k)\n\n return array.attribute_rename(*att_renames).dimension_rename(*dim_renames)", "def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)", "def bootstrap_resample(labels):\n idxs = np.arange(len(labels))\n num_labels = max(labels) + 1\n bootstrap_idxs = np.zeros_like(idxs)\n ptr = 0\n for i in range(num_labels):\n strat = idxs[labels == i]\n bootstrap_idxs[ptr:ptr + len(strat)] = np.random.choice(strat, len(strat), replace=True)\n ptr += len(strat)\n return bootstrap_idxs", "def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]", "def relabel_partial(df):\n df = df.reset_index()\n\n df['label_shifted'] = df['label'].shift(-1)\n df['label'] = np.where(df['label'] < df['label_shifted'],\n df['label_shifted'],\n df['label'])\n df = df.drop(['label_shifted'], axis=1)\n\n # Make it multiindex\n df['event'] = df.index\n df = df.set_index(['sample_nr', 'event'])\n df = df.reset_index('event', drop=True)\n df = df.set_index(df.groupby(level=0).cumcount().rename('event'), append=True)\n df = df.sort_index()\n\n return df", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def encode_labels(self, lilabs):\n\n y = []\n for lab in lilabs:\n y.append([1 if l in lab else 0 for l in self.labels])\n\n return np.array(y, dtype=float)", "def initialization_based(input_array):\n\n # Search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n\n # Define the shape of the one hot encoded array\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n\n # Set the predicted class on 1, and all the other classes stays at 0\n out[np.arange(out.shape[0]), oh_array] = 1\n\n return out", "def renumber_labels_ordered(a,correspondence=0):\n assert amin(a)>=0\n assert amax(a)<=2**25\n labels = sorted(unique(ravel(a)))\n renum = zeros(amax(labels)+1,dtype='i')\n renum[labels] = arange(len(labels),dtype='i')\n if correspondence:\n return renum[a],labels\n else:\n return renum[a]" ]
[ "0.69688505", "0.64422935", "0.6288373", "0.61246604", "0.60274583", "0.59455514", "0.5938729", "0.57887626", "0.5761393", "0.5696665", "0.56867516", "0.56867516", "0.56587917", "0.562061", "0.561991", "0.560544", "0.5561162", "0.5536896", "0.5529303", "0.5529303", "0.5501358", "0.54987705", "0.54945046", "0.54893076", "0.5482258", "0.54755473", "0.54166675", "0.54009014", "0.5389888", "0.53821236" ]
0.72478616
0
Relabel according to overlap with reference.
def relabel_by_overlap(lab, ref_lab): u1 = np.unique(lab) u2 = np.unique(ref_lab) if u1.size > u2.size: thresh = lab.max() + 1 lab_shifted = lab + thresh lab_corr = find_label_correspondence(lab_shifted, ref_lab) lab_shifted = relabel(lab_shifted, new_labels=lab_corr) ulab = np.unique(lab_shifted) ulab = ulab[ulab >= thresh] map_seq = dict(zip(ulab, np.arange(ulab.size) + ref_lab.max() + 1)) return relabel(lab_shifted, new_labels=map_seq) lab_corr = find_label_correspondence(lab, ref_lab) return relabel(lab, new_labels=lab_corr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_labels(mask1, mask2):\n # Find the object in mask2 that has maximum overlap with an object in max1,\n # (as a fraction of the objects pixels in mask1)\n def get_max_overlap(mask1, mask2, label1):\n # Count overlapping pixels.\n labels, counts = np.unique(mask2[mask1 == label1], return_counts=True)\n # Sort labels by counts (ascending).\n labels_sorted = labels[np.argsort(counts)]\n counts_sorted = counts[np.argsort(counts)]\n # Select new label with maximum overlap.\n max_overlap = labels_sorted[-1]\n return max_overlap\n \n def main(mask1, mask2):\n if not (mask1.shape == mask2.shape):\n raise ValueError(\"Masks do not have the same shape.\")\n # Initialize blank mask.\n updated_mask = np.zeros(mask2.shape)\n # Go one-by-one through the labels in mask2\n for label in np.unique(mask2)[1:]:\n # Find label in mask1 with maximum overlap with nuc from mask2.\n mask1_besthit = get_max_overlap(mask2, mask1, label)\n # Find reverse: best hit for the mask1 label in mask2.\n mask2_besthit = get_max_overlap(mask1, mask2, mask1_besthit)\n # If the labels are reciprocal best hits, update label in \n # new mask to have the shape of the object in mask 2 with \n # the label propagated from mask1.\n if ((mask2_besthit == label) and (mask1_besthit != 0)):\n updated_mask[mask2 == label] = mask1_besthit\n\n return updated_mask\n return main(mask1, mask2)", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def relabel_labelmask(labelmask, preserve_order=True):\n mask = np.copy(labelmask)\n # Get all object labels and their counts.\n labels, counts = np.unique(mask, return_counts=True)\n # Get the indexes of sorted counts, descending.\n ordered_indexes = np.argsort(counts)[::-1]\n # Set largest object as background (ID=0).\n background_label = labels[ordered_indexes[0]]\n mask[mask == background_label] = 0\n # Renumber the rest of the objects 1..n.\n obj_num=1\n if (preserve_order):\n oldlabels = labels\n else:\n oldlabels = labels[ordered_indexes]\n for old_label in oldlabels:\n if (old_label != background_label):\n mask[labelmask == old_label] = obj_num\n obj_num = obj_num + 1\n return mask", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def _relabel(self, a):\n\n labels = list(np.unique(a))\n if 0 in labels:\n labels.remove(0)\n\n if len(labels) == 0:\n return a.copy()\n old_values = np.asarray(labels)\n new_values = np.arange(1, len(labels) + 1, dtype=old_values.dtype)\n\n try:\n values_map = np.arange(int(a.max() + 1), dtype=new_values.dtype)\n except ValueError as e:\n raise ValueError(f\"{e}, arange length: {int(a.max() + 1)}\")\n values_map[old_values] = new_values\n\n return values_map[a.copy()]", "def _rectified_relabel(infr, cc_subgraphs):\n # Determine which names can be reused\n from wbia.scripts import name_recitifer\n\n infr.print('grouping names for rectification', 3)\n grouped_oldnames_ = [\n list(nx.get_node_attributes(subgraph, 'name_label').values())\n for count, subgraph in enumerate(cc_subgraphs)\n ]\n # Make sure negatives dont get priority\n grouped_oldnames = [\n [n for n in group if len(group) == 1 or n > 0] for group in grouped_oldnames_\n ]\n infr.print(\n 'begin rectification of %d grouped old names' % (len(grouped_oldnames)), 2\n )\n new_labels = name_recitifer.find_consistent_labeling(\n grouped_oldnames, verbose=infr.verbose >= 3\n )\n infr.print('done rectifying new names', 2)\n new_flags = [\n not isinstance(n, int) and n.startswith('_extra_name') for n in new_labels\n ]\n\n for idx in ut.where(new_flags):\n new_labels[idx] = infr._next_nid()\n\n for idx, label in enumerate(new_labels):\n if label < 0 and len(grouped_oldnames[idx]) > 1:\n # Remove negative ids for grouped items\n new_labels[idx] = infr._next_nid()\n return new_labels", "def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]", "def relabel_variables(self, mapping, inplace=True):\n graph = self.graph\n ising_linear_ranges = self.ising_linear_ranges\n ising_quadratic_ranges = self.ising_quadratic_ranges\n\n try:\n old_labels = set(mapping.keys())\n new_labels = set(mapping.values())\n except TypeError:\n raise ValueError(\"mapping targets must be hashable objects\")\n\n for v in new_labels:\n if v in graph and v not in old_labels:\n raise ValueError(('A variable cannot be relabeled \"{}\" without also relabeling '\n \"the existing variable of the same name\").format(v))\n\n if not inplace:\n return Specification(nx.relabel_nodes(graph, mapping, copy=True), # also checks the mapping\n tuple(mapping.get(v, v) for v in self.decision_variables),\n self.feasible_configurations, # does not change\n vartype=self.vartype, # does not change\n ising_linear_ranges={mapping.get(v, v): ising_linear_ranges[v] for v in graph},\n ising_quadratic_ranges={mapping.get(v, v): {mapping.get(u, u): r\n for u, r in neighbors.items()}\n for v, neighbors in ising_quadratic_ranges.items()})\n else:\n # now we need the ising_linear_ranges and ising_quadratic_ranges\n shared = old_labels & new_labels\n\n if shared:\n # in this case we need to transform to an intermediate state\n # counter will be used to generate the intermediate labels, as an easy optimization\n # we start the counter with a high number because often variables are labeled by\n # integers starting from 0\n counter = itertools.count(2 * len(self))\n\n old_to_intermediate = {}\n intermediate_to_new = {}\n\n for old, new in mapping.items():\n if old == new:\n # we can remove self-labels\n continue\n\n if old in new_labels or new in old_labels:\n\n # try to get a new unique label\n lbl = next(counter)\n while lbl in new_labels or lbl in old_labels:\n lbl = next(counter)\n\n # add it to the mapping\n old_to_intermediate[old] = lbl\n intermediate_to_new[lbl] = new\n\n else:\n old_to_intermediate[old] = new\n # don't need to add it to intermediate_to_new because it is a self-label\n\n Specification.relabel_variables(self, old_to_intermediate, inplace=True)\n Specification.relabel_variables(self, intermediate_to_new, inplace=True)\n return self\n\n # modifies graph in place\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n # this is always a new object\n self.decision_variables = tuple(mapping.get(v, v) for v in self.decision_variables)\n\n # we can just relabel in-place without worrying about conflict\n for v in old_labels:\n if v in mapping:\n ising_linear_ranges[mapping[v]] = ising_linear_ranges[v]\n del ising_linear_ranges[v]\n\n # need to do the deeper level first\n for neighbors in ising_quadratic_ranges.values():\n for v in list(neighbors):\n if v in mapping:\n neighbors[mapping[v]] = neighbors[v]\n del neighbors[v]\n\n # now the top level\n for v in old_labels:\n if v in mapping:\n ising_quadratic_ranges[mapping[v]] = ising_quadratic_ranges[v]\n del ising_quadratic_ranges[v]\n\n return self", "def update_ref_label_by_prediction(\n ref_label: torch.Tensor,\n pre_label: torch.Tensor) -> torch.Tensor:\n ref_label = utils.update_ref_label_with_mask(\n ref_label, pre_label == 1, utils.REF_POSSIBLY_FOREGROUND)\n ref_label = utils.update_ref_label_with_mask(\n ref_label, pre_label == 0, utils.REF_POSSIBLY_BACKGROUND)\n return ref_label", "def relabel_partial(df):\n df = df.reset_index()\n\n df['label_shifted'] = df['label'].shift(-1)\n df['label'] = np.where(df['label'] < df['label_shifted'],\n df['label_shifted'],\n df['label'])\n df = df.drop(['label_shifted'], axis=1)\n\n # Make it multiindex\n df['event'] = df.index\n df = df.set_index(['sample_nr', 'event'])\n df = df.reset_index('event', drop=True)\n df = df.set_index(df.groupby(level=0).cumcount().rename('event'), append=True)\n df = df.sort_index()\n\n return df", "def relabel(self, old: int, new: int) -> None:\n if not (isinstance(old, int) and isinstance(new, int)):\n try:\n old = int(old)\n new = int(new)\n except ValueError:\n msg = f'Expecting integer arguments, got {type(old)} and {type(new)}!'\n raise ValueError(msg)\n\n if new in set(self.infos.keys()):\n msg = f'New label < {new} > is in existing labels {set(self.infos.keys())}!'\n raise ValueError(msg)\n\n # modify corresponding SegmentInfo object\n seginfo = self.infos[old]\n seginfo.label_value = new\n # modify array data\n self.data = relabel(self.data, old, new)\n # propagate state changes\n self._update_state_from_infos()", "def updateLabels(correspondance_list,labels_list,i,image):\n l_prev_index = len(labels_list[(i-1)%2])\n l_curr_index = len(labels_list[i%2])\n prev_index = labels_list[(i-1)%2]\n index_changes = []\n ref_image = np.copy(image)\n\n if l_prev_index==l_curr_index: #Same number of cells\n \n for x,y in correspondance_list: \n labels_list[i%2][y] = prev_index[x]\n image[ref_image==y+1] = prev_index[x]\n \n if l_curr_index > l_prev_index: # Apparition\n new_index = max(prev_index)+2\n for x,y in correspondance_list: \n if x<l_prev_index:\n labels_list[i%2][y] = prev_index[x] \n image[ref_image==y+1] = prev_index[x]\n \n else:\n labels_list[i%2][y] = new_index\n image[ref_image==y+1] = new_index\n index_changes.append(new_index)\n new_index+=1\n \n if l_curr_index < l_prev_index: # Disparition\n for x,y in correspondance_list: \n if y<l_curr_index:\n labels_list[i%2][y] = prev_index[x]\n image[ref_image==y+1] = prev_index[x]\n else:\n index_changes.append(prev_index[x])\n return index_changes", "def relabel_consecutive(lab, start_from=0):\n\n new_lab = np.empty_like(lab)\n new_lab[:] = np.unique(lab, return_inverse=True)[1]\n new_lab += start_from\n return new_lab", "def update_label(label1, label2, idx):\n for i in range(0, len(idx)):\n label1[i] = label2[idx[i]]\n return label1", "def relabeled(self, label, memo=None):\n from copy import deepcopy\n self._deepcopy_relabel_ = label\n new = deepcopy(self, memo)\n del self._deepcopy_relabel_\n return new", "def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2", "def label(linkage_matrix):\n N = linkage_matrix.shape[0] + 1\n Union = UnionFind(N)\n\n for i in range(N - 1):\n a, b = np.int(linkage_matrix[i, 0]), np.int(linkage_matrix[i, 1])\n a_new, b_new = Union.find(a), Union.find(b)\n\n # link-by-index: linking the root node with\n # lower index to the root node with higher index\n if a_new < b_new:\n linkage_matrix[i, :2] = [a_new, b_new]\n else:\n linkage_matrix[i, :2] = [b_new, a_new]\n\n linkage_matrix[i, 3] = Union.union(a_new, b_new)\n\n return linkage_matrix", "def change_labels(labels, cluster_name, idx_to_change, target_labels):\n assert(type(idx_to_change) == list)\n assert(type(target_labels) == list)\n assert(len(idx_to_change) == len(target_labels))\n\n sub_list = labels[labels == cluster_name]\n\n for idx, target in zip(idx_to_change, target_labels):\n sub_list[idx] = target\n\n labels[labels == cluster_name] = sub_list\n\n return labels", "def relabel(peak_ids, oldparams, mask):\n spot_data = {}\n peak_num = 1\n for peak in peak_ids:\n #coords = np.where(mask == peak)\n paramsnew = oldparams[peak-1,:] # object 1 will be fitparams row 0\n # Rearrange params from fit function so coordinates lead.\n spot_data[peak_num] = paramsnew[[1,2,3,0,4,5,6]]\n peak_num = peak_num + 1\n return spot_data", "def splitCell(buff,index,ref_label,new_label):\n cell_before = np.copy(buff[:,:,index-1])\n cell_after = np.copy(buff[:,:,index])\n \n mask_after = cell_after ==ref_label\n \n cell_before[np.logical_not(mask_after)] = 0\n \n mask_ref_label = cell_before ==ref_label\n mask_new_label = cell_before==new_label\n \n after_sure_ref = np.logical_and(mask_ref_label,mask_after)\n after_sure_new = np.logical_and(mask_new_label,mask_after)\n after_unsure = np.logical_and(mask_after,np.logical_not(np.logical_or(after_sure_ref,after_sure_new) ) )\n\n xref,yref = np.where(after_sure_ref)\n ref_pts = np.concatenate((xref.reshape(-1,1),yref.reshape(-1,1)),axis=1)\n xnew,ynew = np.where(after_sure_new)\n new_pts = np.concatenate((xnew.reshape(-1,1),ynew.reshape(-1,1)),axis=1)\n \n labels_ref = np.ones(xref.shape[0])\n labels_new = np.zeros(xnew.shape[0])\n labels = np.concatenate((labels_ref,labels_new),axis=0)\n labels.reshape(-1,1)\n X= np.concatenate((ref_pts,new_pts),axis = 0)\n \n xu,yu = np.where(after_unsure)\n u_pts = np.concatenate((xu.reshape(-1,1),yu.reshape(-1,1)),axis=1)\n neigh = KNeighborsClassifier(n_neighbors=5)\n neigh.fit(X, labels)\n pred = neigh.predict(u_pts)\n for i in range(pred.shape[0]):\n #if pred is 1 goes to ref if 0 goes to new\n if pred[i]==1:\n after_sure_ref[u_pts[i,0],u_pts[i,1]]=True\n else:\n after_sure_new[u_pts[i,0],u_pts[i,1]]=True\n #Assigning the new values to the thing:\n buff[after_sure_ref,index] = ref_label\n buff[after_sure_new,index] = new_label", "def autolabel(rects):", "def label_segments(segs, truths, detected):\n for seg in segs:\n for truth in truths:\n if time_overlap(seg, truth): \n seg[\"label\"] = truth[\"label\"]\n for det in detected:\n if time_overlap(seg, det):\n if det[\"label\"] == truth[\"label\"]:\n seg[\"match\"] = True\n else:\n seg[\"match\"] = False\n return segs", "def UpdateLabel(self) -> _n_6_t_0:", "def calc_rpn_label_regr(img_data, width, height, resized_width, resize_height, downsampling_ratio,\n anchor_box_sizes, anchor_box_ratios, rpn_max_overlap, rpn_min_overlap):\n num_anchors = len(anchor_box_sizes) * len(anchor_box_ratios)\n num_anchor_ratios = len(anchor_box_ratios)\n num_bboxes = len(img_data[\"bboxes\"])\n # get the output feature map size based on the model architecture downsampling ratio\n fm_width, fm_height = resized_width / downsampling_ratio, resize_height / downsampling_ratio\n\n # stores the label of each anchor, indicating whether this anchor contains an object or not\n y_rpn_label = np.zeros((fm_height, fm_width, num_anchors))\n # stores the validness of each anchor, indicating whether this anchor has a label or not\n y_is_box_valid = np.zeros((fm_height, fm_width, num_anchors))\n # stores the delta regressions of each anchor,\n # [delta_center_x, delta_center_y, delta_width, delta_height]\n y_rpn_regr = np.zeros((fm_height, fm_width, num_anchors * 4))\n\n # number of anchors that one bounding box contains\n num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)\n # the best anchor that one bounding box contains\n # [ feature_map_row_pixel_index, feature_map_column_pixel_index, anchor_ratio_index, anchor_size_index ]\n best_anchor_for_bbox = -1 * np.ones((num_bboxes, 4)).astype(int)\n # the best iou that one bounding box intersects with anchors\n best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32)\n # the best anchor regression target that one bounding box contains\n # [ delta_center_x, delta_center_y, delta_width, delta_height ]\n best_delta_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32)\n\n # convert bounding boxes in original images to that in resized images\n # columns: [ x1, x2, y1, y2 ]\n gta = np.zeros((num_bboxes, 4))\n for index, bbox in enumerate(img_data[\"bboxes\"]):\n gta[index, 0] = bbox[\"x1\"] * (resized_width * 1.0 / width)\n gta[index, 1] = bbox[\"x2\"] * (resized_width * 1.0 / width)\n gta[index, 2] = bbox[\"y1\"] * (resized_height * 1.0 / height)\n gta[index, 3] = bbox[\"y2\"] * (resized_height * 1.0 / height)\n\n # we start to iterate each combination of anchors\n for anchor_size_idx in range(len(anchor_box_sizes)):\n for anchor_ratio_idx in range(num_anchor_ratios):\n # first we determine the (width, height) of the anchor\n anchor_width = anchor_box_sizes[anchor_size_idx] * anchor_box_ratios[anchor_ratio_idx][0]\n anchor_height = anchor_box_sizes[anchor_size_idx] * anchor_box_ratios[anchor_ratio_idx][1]\n\n # then we traverse the feature map plane\n for ix in range(fm_width):\n # the anchor coordinates in resized image input\n anchor_x1 = downsampling_ratio * (ix + 0.5) - anchor_width / 2\n anchor_x2 = downsampling_ratio * (ix + 0.5) + anchor_width / 2\n\n if anchor_x1 < 0 or anchor_x2 > resized_width:\n continue\n\n for jy in range(fm_height):\n # the anchor coordinates in resized image input\n anchor_y1 = downsampling_ratio * (yj + 0.5) - anchor_height / 2\n anchor_y2 = downsampling_ratio * (yj + 0.5) + anchor_height / 2\n\n if anchor_y1 < 0 or anchor_y2 > resized_height:\n continue\n\n # ok, until now we get the specific anchor in resized image \n # (anchor_x1, anchor_x2, anchor_y1, anchor_y2)\n current_anchor_coord = [ anchor_x1, anchor_y1, anchor_x2, anchor_y2 ]\n\n anchor_label = \"neg\"\n best_iou_for_anchor = 0.0\n\n for bbox_idx in range(num_bboxes):\n current_bbox_coord = [ gta[bbox_idx, 0], gta[bbox_idx, 2], gta[bbox_idx, 1], gta[bbox_idx, 3] ]\n current_iou = iou.iou(current_bbox_coord, current_anchor_coord)\n\n # calculate regression target\n center_bbox_x = (gta[bbox_idx, 0] + gta[bbox_idx, 1]) / 2.0\n center_bbox_y = (gta[bbox_idx, 2] + gta[bbox_idx, 3]) / 2.0\n center_anchor_x = (anchor_x1 + anchor_x2) / 2.0\n center_anchor_y = (anchor_y1 + anchor_y2) / 2.0\n bbox_width = gta[bbox_idx, 1] - gta[bbox_idx, 0]\n bbox_height = gta[bbox_idx, 3] - gta[bbox_idx, 2]\n\n delta_center_x = (center_bbox_x - center_anchor_x) / anchor_width\n delta_center_y = (center_bbox_y - center_anchor_y) / anchor_height\n delta_width = np.log(bbox_width / anchor_width)\n delta_height = np.log(bbox_height / anchor_height)\n\n # we should help non-background bounding box find a best anchor\n if img_data[\"bboxes\"][bbox_idx][\"class\"] != \"bg\":\n if current_iou > best_iou_for_bbox[bbox_idx]:\n best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx]\n best_iou_for_bbox[bbox_num] = current_ious\n best_delta_for_bbox[bbox_num, :] = [delta_center_x, delta_center_y, delta_width, delta_height]\n\n # if the current iou surpasses the upper threshold, we will set the anchor\n # label to be true\n if current_iou > rpn_max_overlap:\n anchor_label = \"pos\"\n num_anchors_for_bbox[bbox_num] += 1\n # we should find the best regression target\n if current_iou > best_iou_for_anchor:\n best_iou_for_anchor = current_iou\n best_regr = (delta_center_x, delta_center_y, delta_width, delta_height)\n\n # if the current iou is in between lower and upper threshold, we will not\n # set the anchor label\n if current_ious > rpn_min_overlap and current_ious < rpn_max_overlap:\n if anchor_label != \"pos\":\n anchor_label = \"neutral\"\n\n # determine the classification target\n if anchor_label == \"neg\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n elif anchor_label == \"neutral\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n elif anchor_label == \"pos\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n start = 4 * (num_anchor_ratios * anchor_size_idx + anchor_ratio_idx)\n y_rpn_regr[jy, ix, start: start + 4] = best_regr\n\n\n # maybe some ground truth bounding box has no anchors iou more than upper threshold,\n # we should assign the best anchor for the ground truth\n for idx in range(num_anchors_for_bbox.shape[0]):\n if num_anchors_for_bbox[idx] == 0:\n if best_anchor_for_bbox[idx, 0] == -1:\n continue\n jy, ix, ratio_index, size_index = best_anchor_for_bbox[idx, :]\n y_is_box_valid[jy, ix, num_anchor_ratios * size_index + ratio_index] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * size_index + ratio_index] = 1\n start = 4 * (num_anchor_ratios * size_index + ratio_index)\n y_rpn_regp[jy, ix, start: start + 4] = best_delta_for_bbox[idx, :]\n\n y_rpn_label = np.expand_dims(y_rpn_label, axis=0)\n y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)\n\n positives = np.where(np.logical_and(y_rpn_label[0, :, :, :] == 1, y_is_box_valid[0, :, :, :] == 1))\n negatives = np.where(np.logical_and(y_rpn_label[0, :, :, :] == 0, y_is_box_valid[0, :, :, :] == 1))\n\n num_positives = len(positives[0])\n num_negatives = len(negatives[0])\n\n # normally the rpn has more negatives than positives, so we close some negatives, and limit the\n # total number\n num_regions = 256\n\n if num_positives > num_regions / 2:\n sampled_positives = random.sample(range(num_positives), num_positives - num_regions / 2)\n y_is_box_valid[0, positives[0][sampled_positives], positives[1][sampled_positives], positives[2][sampled_positives]] = 0\n num_positives = num_regions / 2\n\n if num_negatives + num_positives > num_regions:\n sampled_negatives = random.sample(range(num_negatives), num_negatives + num_positives - num_regions)\n y_is_box_valid[0, negatives[0][sampled_negatives], negatives[1][sampled_negatives], negatives[2][sampled_negatives]] = 0\n num_negatives = num_regions - num_positives\n\n # the result rpn classification labels, for the last axis, the first half part indicates whether\n # this anchor is a sample of not(contribute to the loss), the second half part indicates the\n # true labels\n result_rpn_labels = np.concatenate([y_is_box_valid, y_rpn_label], axis=3)\n # the result rpn regression targets, for the last axis, the first half part indicates whether the\n # (index + half length) postision should contribute to the regression loss, you know only the\n # anchors containing objects calculate the loss\n result_rpn_regr = np.concatenate([np.repeat(y_rpn_label, 4, axis=3), y_rpn_regr], axis=3)\n\n return np.copy(result_rpn_labels), np.copy(result_rpn_regr)", "def add_label(self, label, name, label_type):\n assert label_type in ['label', 'prediction', 'guide'], \\\n \"{} not in ['label', 'prediction', 'guide']: Must select an acceptable type\".format(label_type)\n check_numpy_table(label, req_fields=('raw_start', 'raw_length', 'reference_index',\n 'kmer', 'posterior_probability'))\n\n # label.sort(order=['raw_start'], kind='mergesort')\n # check the labels are in the correct format\n assert min(label[\"raw_start\"]) >= 0, \"Raw start cannot be less than 0\"\n assert 0 <= max(label[\"posterior_probability\"]) <= 1, \\\n \"posterior_probability must be between zero and one {}\".format(row[\"posterior_probability\"])\n\n # make sure last label can actually index the signal correctly\n try:\n self.scaled_signal[label[-1][\"raw_start\"]:label[-1][\"raw_start\"] + label[-1][\"raw_length\"]]\n except IndexError:\n raise IndexError(\"labels are longer than signal\")\n\n label1 = np.sort(label, order=['raw_start'], kind='mergesort')\n\n # infer strand alignment of read\n if label1[0][\"reference_index\"] >= label1[-1][\"reference_index\"]:\n minus_strand = True\n else:\n minus_strand = False\n if self.minus_strand is not None:\n if label[0][\"raw_start\"] != label[-1][\"raw_start\"]:\n assert self.minus_strand == minus_strand, \"New label has different strand direction, check label\"\n else:\n self.minus_strand = minus_strand\n\n # set label with the specified name\n if label_type == 'label':\n self.label[name] = label\n elif label_type == 'prediction':\n self.prediction[name] = label\n elif label_type == 'guide':\n self.guide[name] = label", "def _remove_and_relabel_blobs(labeled, wanted_blobs):\n labeled = labeled.copy()\n wanted_blobs = np.array(wanted_blobs)\n no_blobs = len(wanted_blobs)\n unwanted_blobs = np.arange(1, no_blobs+1)[np.logical_not(wanted_blobs)]\n wanted_blobs = np.arange(1, no_blobs+1)[wanted_blobs]\n\n for unwanted_blob in unwanted_blobs:\n labeled[labeled == unwanted_blob] = 0\n\n for new_label, wanted_blob in enumerate(wanted_blobs):\n new_label += 1\n labeled[labeled == wanted_blob] = -new_label\n\n return -labeled", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def construct_new_ref_map(labels: np.ndarray, samples: list, ref_map_shape: list):\n new_ref_map = np.zeros(shape=ref_map_shape) + BG_CLASS\n for i, indexes in enumerate(samples):\n new_ref_map[indexes[ROW_AXIS], indexes[COLUMNS_AXIS]] = labels[i]\n return new_ref_map.astype(int)", "def relabel_one_file(file_path: Path,\n result_file: Path = None,\n label_items=LABEL_ITEMS):\n global all_files_labels\n logger.info(f'Processing file: {file_path}')\n file_name = file_path.stem\n\n # Load data\n df = pd.read_csv(file_path, index_col=False, comment='#')\n ts = df['EventTimestamp(ns)'].values\n if 'CurrentTimeMillis' in df.columns:\n utc_ref_ts = df['CurrentTimeMillis'].values[0]\n else:\n try:\n time_str = file_name.split('-')[1]\n utc_ref = datetime.strptime(time_str, '%Y_%m_%d_%H_%M_%S_%f')\n utc_ref_ts = utc_ref.timestamp()\n except ValueError as e:\n print(e)\n exit(1)\n\n old_labels = df['Activity'].values\n # Old labels to idx\n labels_idx = []\n previous_type = old_labels[0]\n start_idx = 0\n for i, t in enumerate(old_labels[1:], 1):\n if t != previous_type:\n if previous_type != 0:\n labels_idx.append([previous_type, start_idx, i])\n previous_type = t\n start_idx = i\n print(f'Current labels: {labels_idx}')\n\n acc = df[['AccelX', 'AccelY', 'AccelZ']].values\n guess_type_name = file_name.split('-')[0]\n if guess_type_name in AILAB_LABEL_NAMES_CONVERT_MAP.keys():\n guess_type_name = AILAB_LABEL_NAMES_CONVERT_MAP.get(guess_type_name)\n guess_type = LABEL_ITEMS_INDEX_DICT.get(guess_type_name, 0)\n logger.debug(f'Current guess type name: {guess_type_name}:{guess_type}')\n\n labeler = DataLabeler(label_items)\n labels = labeler.process(utc_ref_ts,\n ts,\n acc,\n file_name,\n selected=guess_type,\n labels=labels_idx)\n labels = merge_labels(labels)\n if len(labels) > 0:\n all_files_labels[file_name] = labels\n # Write to record fold label result file\n # Write to global result file\n if result_file is not None:\n with result_file.open('a+') as f_result:\n f_result.write(file_name)\n for v, s, e in labels:\n f_result.write(f',{v}_{ts[s]}_{ts[e]}')\n f_result.write('\\n')\n\n # Write back to data file:\n new_activity = np.zeros(len(df), dtype=np.int)\n for label in labels:\n new_type = label[0]\n start = label[1]\n end = label[2]\n new_activity[start:end] = new_type\n\n df['Activity'] = new_activity.tolist()\n df.to_csv(file_path, index=False)", "def relabeled(self, memo=None, labels=None):\n from copy import deepcopy\n\n self._deepcopy_relabel_ = True\n self._deepcopy_labels_ = labels\n new = deepcopy(self, memo)\n del self._deepcopy_relabel_\n del self._deepcopy_labels_\n return new" ]
[ "0.6455704", "0.6364087", "0.6258506", "0.6226747", "0.60531056", "0.60333496", "0.60238916", "0.59531003", "0.5953097", "0.5881179", "0.5791674", "0.57539934", "0.57377", "0.56943", "0.56618845", "0.5648628", "0.5617306", "0.5584542", "0.55834746", "0.5574411", "0.55666256", "0.55656874", "0.5555395", "0.5543065", "0.5511993", "0.5486078", "0.54733384", "0.5457674", "0.54541844", "0.54533225" ]
0.8048359
0
Map data in source to target according to their labels. Target labels are sorted in ascending order, such that the smallest label indexes the value at position 0 in `source_val`. If `source_lab` is specified, any label in `target_lab` must be in `source_lab`.
def map_to_labels(source_val, target_lab, mask=None, fill=0, axis=0, source_lab=None): if mask is not None: if not isinstance(mask, np.ndarray): mask = target_lab != mask target_lab = target_lab[mask] mapped = map_to_labels(source_val, target_lab, axis=axis, source_lab=source_lab) return map_to_mask(mapped, mask, fill=fill, axis=axis) if axis == 1 and source_val.ndim > 1: source_val = source_val.T values2d = np.atleast_2d(source_val) ulab, idx = np.unique(target_lab, return_inverse=True) if ulab.size > values2d.shape[1]: raise ValueError('There are more target labels than source values.') if source_lab is not None: if source_lab.size != values2d.shape[1]: raise ValueError('Source values and labels must have same size.') if not np.isin(ulab, source_lab).all(): raise ValueError('Cannot find target labels in source labels.') uq_sl, idx_sl = np.unique(source_lab, return_inverse=True) if source_lab.size != uq_sl.size: raise ValueError('Source labels must have distinct labels.') values2d = values2d[:, idx_sl] mapped = values2d[:, idx] if source_val.ndim == 1: return mapped[0] if axis == 1: return mapped.T return mapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2", "def convert_label(in_volume, label_convert_source, label_convert_target):\n mask_volume = np.zeros_like(in_volume)\n convert_volume = np.zeros_like(in_volume)\n for i in range(len(label_convert_source)):\n source_lab = label_convert_source[i]\n target_lab = label_convert_target[i]\n if(source_lab != target_lab):\n temp_source = np.asarray(in_volume == source_lab)\n temp_target = target_lab * temp_source\n mask_volume = mask_volume + temp_source\n convert_volume = convert_volume + temp_target\n out_volume = in_volume * 1\n out_volume[mask_volume>0] = convert_volume[mask_volume>0]\n return out_volume", "def convert_label(in_volume, label_convert_source, label_convert_target):\n mask_volume = np.zeros_like(in_volume)\n convert_volume = np.zeros_like(in_volume)\n for i in range(len(label_convert_source)):\n source_lab = label_convert_source[i]\n target_lab = label_convert_target[i]\n if(source_lab != target_lab):\n temp_source = np.asarray(in_volume == source_lab)\n temp_target = target_lab * temp_source\n mask_volume = mask_volume + temp_source\n convert_volume = convert_volume + temp_target\n out_volume = in_volume * 1\n out_volume[mask_volume>0] = convert_volume[mask_volume>0]\n return out_volume", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def _map_dims_(\n cls,\n source_array: List[int],\n target_array: List[int],\n source_idx: int,\n start_target_idx: int,\n source_to_target_map: DIMENSION_MAP,\n target_to_source_map: DIMENSION_MAP,\n ) -> Tuple[bool, int]:\n res, last_target_index = cls._can_reach_number_by_multiply(\n number_to_reach=source_array[source_idx], array=target_array, start_idx=start_target_idx\n )\n if not res:\n return (res, last_target_index)\n source_to_target_map[source_idx] = list(range(start_target_idx, last_target_index + 1))\n for idx in range(start_target_idx, last_target_index + 1):\n target_to_source_map[idx] = [source_idx]\n return (res, last_target_index)", "def map_targets(y, mapping=None):\r\n y_converted = []\r\n\r\n if mapping is None:\r\n y_converted = y\r\n else:\r\n if isinstance(mapping, list) or isinstance(mapping, (np.ndarray, np.generic)):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array1\")\r\n\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = mapping[y_tmp]\r\n else: # if list\r\n print(\"array1\")\r\n\r\n y_converted = np.array(mapping[y])\r\n\r\n elif isinstance(mapping, dict):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array2\")\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = [mapping.get(y_tmp2) for y_tmp2 in y_tmp]\r\n else:\r\n print(\"array2\")\r\n\r\n y_converted = np.array([mapping.get(y_tmp) for y_tmp in y])\r\n else:\r\n raise TypeError('y must be list, ndarray, dict or None')\r\n\r\n return y_converted", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def reduce_by_labels(values, labels, weights=None, target_labels=None,\n red_op='mean', axis=0, dtype=None):\n\n if axis == 1 and values.ndim == 1:\n axis = 0\n\n if target_labels is None:\n uq_tl = np.unique(labels)\n idx_back = None\n else:\n uq_tl, idx_back = np.unique(target_labels, return_inverse=True)\n\n if weights is not None:\n weights = np.atleast_2d(weights)\n\n v2d = np.atleast_2d(values)\n if axis == 1:\n v2d = v2d.T\n\n if isinstance(red_op, str):\n fred = _get_redop(red_op, weights=weights, axis=1)\n else:\n fred = red_op\n\n if dtype is None:\n dtype = np.float64\n if red_op in {'min', 'max', 'sum', 'mode'}:\n dtype = values.dtype\n\n mapped = np.empty((v2d.shape[0], uq_tl.size), dtype=dtype)\n for i, lab in enumerate(uq_tl):\n mask = labels == lab\n wm = None if weights is None else weights[:, mask]\n\n if isinstance(red_op, str):\n mapped[:, i] = fred(v2d[:, mask], wm)\n\n else:\n for idx in range(v2d.shape[0]):\n mapped[idx, i] = fred(v2d[idx, mask], wm)\n\n if idx_back is not None:\n mapped = mapped[:, idx_back]\n\n if axis == 1:\n mapped = mapped.T\n\n if values.ndim == 1:\n return mapped[0]\n return mapped", "def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)", "def relabel_with_map_array(image, label_list, measurement_list):\n from skimage.util import map_array\n return map_array(np.asarray(image), np.asarray(label_list), np.array(measurement_list))", "def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping", "def convert_to_one_hot_labels(input, target, val=0):\n \n tmp = input.new(target.size(0), target.max() + 1).fill_(-1)\n tmp.scatter_(1, target.view(-1, 1), 1.0)\n # for some activation functions, e.g. relu\n if val == 0:\n ret = (tmp + 1) / 2\n # for some activation functions, e.g. tanh\n if val == -1:\n ret = tmp\n return ret", "def map_measurements_on_labels(labels_layer:\"napari.layers.Labels\", column:str = \"label\", viewer:\"napari.Viewer\" = None) -> \"napari.types.ImageData\":\n import pandas as pd\n import dask.array as da\n from dask import delayed\n from functools import partial\n\n labels = labels_layer.data\n table = pd.DataFrame(labels_layer.properties)\n\n # special treatment for time series\n if len(labels.shape) == 4:\n # determine how the Frame column is called; in case there is any\n frame_column = None\n for potential_frame_column in ['frame', 'Frame']:\n if potential_frame_column in table.keys():\n frame_column = potential_frame_column\n break\n\n # Relabel one timepoint\n output_sample = relabel_timepoint_with_map_array(labels, table, column, frame_column, 0)\n\n lazy_arrays = []\n for i in range(labels.shape[0]):\n # build a delayed function call for each timepoint\n lazy_processed_image = delayed(\n partial(relabel_timepoint_with_map_array, labels, table, column, frame_column, i)\n )\n lazy_arrays.append(\n lazy_processed_image()\n )\n\n # build an array of delayed arrays\n dask_arrays = [\n [da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype)]\n if len(output_sample.shape) == 2\n else da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype\n )\n for delayed_reader in lazy_arrays\n ]\n # Stack into one large dask.array\n stack = da.stack(\n dask_arrays,\n axis=0)\n return stack\n else:\n label_list = np.asarray(table['label']).tolist()\n measurement_list = np.asarray(table[column]).tolist()\n\n return relabel_with_map_array(labels, label_list, measurement_list)", "def fit(self, train: DataFrame, y: Series, target_label=None):\n logger = logging.getLogger(__name__)\n\n self._target_label = target_label\n\n self._labels = None\n self._rule_map = None\n\n self._nominal_column_encoders = None\n self._column_name_list = None\n self._column_name_index_map = None\n self._condition_mat = None\n\n self._gain_n = 0\n\n # Check that X and y have correct shape\n assert train.shape[0] == y.shape[0], 'X, y should have same length'\n\n self._nominal_column_encoders = init_encoder(train)\n encode_nominal(self._nominal_column_encoders, train)\n self._column_name_list = train.columns.values\n\n self._column_name_index_map = {name: index for index, name in enumerate(self._column_name_list)}\n\n unique_labels, counts = np.unique(y, return_counts=True)\n label_count_dict = dict(zip(unique_labels, counts))\n\n assert len(unique_labels) > 1, 'label has only one value'\n\n # Check whenever target_label is given that label has only two values and target_label is one of them\n assert (target_label is None) or (len(unique_labels) == 2), 'Positive label value given but label not binary.'\n assert (target_label is None) or (\n target_label in unique_labels), 'Positive label value given is not a label value.'\n\n self._labels = sorted(unique_labels, key=lambda e: label_count_dict[e],\n reverse=False) # returns list from ndarray;\n # the label values are sorted according to their occurrence; least frequent value first\n # Set reverse=True for reversing that order\n if (target_label is not None) and (self._labels[0] != target_label):\n # reverse value order for binary classification when target_label is not the least frequent\n self._labels.reverse()\n self.default_label = self._labels[-1] # the default value is the last label in the sorting order\n logger.debug('Label value ordering: ' + str(self._labels))\n\n self._condition_mat = _encoding_for_parallel(train, self._column_name_index_map)\n self._gain_n = self._condition_mat[0].shape[0]\n\n train = train.to_numpy(dtype=np.dtype(float))\n y = y.values\n\n x_irep_list = train\n y_irep = y\n\n self._rule_map = OrderedDict()\n\n for i in range(len(self._labels) - 1):\n pos = self._labels[i]\n neg = self._labels[i + 1:]\n\n indexer_pos = y_irep == pos\n indexer_neg = np.isin(y_irep, neg)\n\n train_pos = x_irep_list[indexer_pos]\n train_neg = x_irep_list[indexer_neg]\n\n irep_res = self._irep_plus_outer_loop(\n train_pos,\n train_neg,\n d=self.d,\n ratio=2 / 3,\n pruning_threshold=self.pruning_threshold\n )\n if len(irep_res) > 0:\n self._rule_map[pos] = irep_res\n bool_vec_index = unbound_rule_list_index(x_irep_list, self._rule_map[pos])\n x_irep_list = x_irep_list[bool_vec_index]\n y_irep = y_irep[bool_vec_index]\n\n logger.debug('# begin optimization')\n\n for _ in range(self.k):\n x_optimize = train\n y_optimise = y\n for i in range(len(self._labels) - 1):\n if not self._rule_map.get(self._labels[i]):\n continue\n pos = self._labels[i]\n neg = self._labels[i + 1:]\n\n indexer_pos = y_optimise == pos\n indexer_neg = np.isin(y_optimise, neg)\n\n train_pos = x_optimize[indexer_pos]\n train_neg = x_optimize[indexer_neg]\n\n self._rule_map[pos] = self._optimize(\n train_pos,\n train_neg,\n ratio=2 / 3,\n rules_input=self._rule_map[pos]\n )\n\n bool_vec_index = unbound_rule_list_index(x_optimize, self._rule_map[pos])\n x_optimize = x_optimize[bool_vec_index]\n y_optimise = y_optimise[bool_vec_index]\n\n return self", "def change_labels(labels, cluster_name, idx_to_change, target_labels):\n assert(type(idx_to_change) == list)\n assert(type(target_labels) == list)\n assert(len(idx_to_change) == len(target_labels))\n\n sub_list = labels[labels == cluster_name]\n\n for idx, target in zip(idx_to_change, target_labels):\n sub_list[idx] = target\n\n labels[labels == cluster_name] = sub_list\n\n return labels", "def encode_target(df, target_column, label_map):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n # map_to_int = {name: n for n, name in enumerate(targets)}\n df_mod[target_column] = df_mod[target_column].replace(label_map)\n\n return (df_mod, targets)", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = [0, 1, 2, 3, 4]\n print(len(x_text))\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']] \n label[datasets['target'][i]] = labels[i]\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def set_target(self, target):\n # parse target objects\n res = []\n targets = target.split(',')\n for item in targets:\n res.append(item)\n self.target = res\n \n # create conversion table for new index\n self.conversion = {}\n for i, cat in enumerate(self.target):\n self.conversion[cat] = f'{i}'", "def _lcs_table(source, target):\n rows = len(source)\n cols = len(target)\n lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)]\n for i in range(1, rows + 1):\n for j in range(1, cols + 1):\n if source[i - 1] == target[j - 1]:\n lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1\n else:\n lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1])\n return lcs_table", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def tfds_map(self, example):\r\n if len(self.get_labels()) > 1:\r\n example.label = self.get_labels()[int(example.label)]\r\n return example", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = []\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']]\n #print('target={}, i={}'.format(datasets['target'], i))\n label[datasets['target'][i]] = 1\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def _convert_to_not_cut(\n cls,\n source_indexes_not_cut_map: List[int],\n target_indexes_not_cut_map: List[int],\n source_to_targets_map: DIMENSION_MAP,\n ) -> DIMENSION_MAP:\n source_to_targets_map_not_cut = {}\n for source_index, target_indexes in source_to_targets_map.items():\n source_index_not_cut = source_indexes_not_cut_map[source_index]\n target_indexes_not_cut = list(map(lambda x: target_indexes_not_cut_map[x], target_indexes))\n source_to_targets_map_not_cut[source_index_not_cut] = target_indexes_not_cut\n return source_to_targets_map_not_cut", "def build_label_mapping(\n grouped_targeted_labels: List[Set[str]],\n nontargeted_labels: Optional[Set[str]] = None,\n) -> Dict[str, int]:\n mapping = {\n label: i + 1\n for i, label_group in enumerate(grouped_targeted_labels)\n for label in label_group\n }\n\n if nontargeted_labels:\n mapping.update({label: 0 for label in nontargeted_labels})\n\n return mapping", "def coupling_to_coupling_cost_matrix(source, target):\n n_source = source.shape[0]\n n_target = target.shape[0]\n\n cost_matrix = np.zeros((n_source*n_target, n_source*n_target))\n\n source_distances = pairwise_squared_distances(source)\n target_distances = pairwise_squared_distances(target)\n\n #UPDATE: vectorize\n for source_1 in range(n_source):\n for target_1 in range(n_target):\n index_1 = source_1*n_target + target_1\n for source_2 in range(source_1, n_source):\n for target_2 in range(n_target):\n index_2 = source_2*n_target + target_2\n \n cost_matrix[index_1, index_2] = (source_distances[source_1, source_2]\n + target_distances[target_1, target_2])\n cost_matrix[index_2, index_1] = cost_matrix[index_1, index_2]\n \n return cost_matrix", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n target_list_1 = []\n target_list_2 = []\n\n for k in range(self.teacher_num):\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n\n target_image_2 = self.random_color_aug(img)\n\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n target_list_1.append(target_image_1)\n target_list_2.append(target_image_2)\n target_list_1 = mx.nd.concat(*target_list_1, dim=0)\n target_list_2 = mx.nd.concat(*target_list_2, dim=0)\n return target_list_1, target_list_2", "def labels(self, labels: MutableMapping[str, str]):\n self._labels = labels", "def tfds_map(self, example):\n if len(self.get_labels()) > 1:\n example.label = self.get_labels()[int(example.label)]\n return example", "def levenshtein(source, target):\n if len(source) < len(target):\n return levenshtein(target, source)\n\n # So now we have len(source) >= len(target).\n if len(target) == 0:\n return len(source)\n\n # We call tuple() to force strings to be used as sequences\n # ('c', 'a', 't', 's') - numpy uses them as values by default.\n source = np.array(tuple(source))\n target = np.array(tuple(target))\n\n # We use a dynamic programming algorithm, but with the\n # added optimization that we only need the last two rows\n # of the matrix.\n previous_row = np.arange(target.size + 1)\n for s in source:\n # Insertion (target grows longer than source):\n current_row = previous_row + 1\n\n # Substitution or matching:\n # Target and source items are aligned, and either\n # are different (cost of 1), or are the same (cost of 0).\n current_row[1:] = np.minimum(\n current_row[1:],\n np.add(previous_row[:-1], target != s))\n\n # Deletion (target grows shorter than source):\n current_row[1:] = np.minimum(\n current_row[1:],\n current_row[0:-1] + 1)\n\n previous_row = current_row\n\n return previous_row[-1]", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass" ]
[ "0.56942385", "0.56030554", "0.56030554", "0.54778296", "0.5465966", "0.52517056", "0.52104473", "0.5193827", "0.51464367", "0.5123239", "0.51058954", "0.5102379", "0.50587285", "0.50403357", "0.5035835", "0.50214636", "0.50138366", "0.50128025", "0.5004057", "0.49926925", "0.49408358", "0.49314055", "0.49213868", "0.49068725", "0.48897326", "0.4883276", "0.4871716", "0.4856838", "0.4825154", "0.4820416" ]
0.80103874
0
return a string of verify statement.
def gen_v_stmt(q1n, q2n): return "verify {} {};\n".format(q1n, q2n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify(self):\n\n # tekstlig testing om koden fungerer\n text = self.klar_tekst_start + \" ble sendt til mottaker som krypteringen \" + \\\n self.crypto + \".\\nMottaker dekrypterte dette til \" + self.klar_tekst_slutt\n\n return text", "def format_verification(verification: Optional[str]) -> str:\n if not verification:\n return \"null\"\n return verification.replace('\"', \"`\")", "def verify():", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def verify(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"verify\")", "async def verify(self, ctx, *, verification_string: str):\r\n\r\n await ctx.message.delete()\r\n\r\n veriflogs_channel = ctx.guild.get_channel(config.veriflogs_chanid)\r\n verification_role = ctx.guild.get_role(config.read_rules_roleid)\r\n verification_wanted = config.verification_code\\\r\n .replace(\"[discrim]\", ctx.author.discriminator)\r\n\r\n # Do checks on if the user can even attempt to verify\r\n if ctx.channel.id != config.verification_chanid:\r\n resp = await ctx.send(\"This command can only be used \"\r\n f\"on <#{config.verification_chanid}>.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n return await resp.delete()\r\n\r\n if verification_role in ctx.author.roles:\r\n resp = await ctx.send(\"This command can only by those without \"\r\n f\"<@&{config.read_rules_roleid}> role.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n return await resp.delete()\r\n\r\n # Log verification attempt\r\n await self.bot.update_logs(\"Verification Attempt\",\r\n ctx.author.id,\r\n veriflogs_channel,\r\n log_text=verification_string,\r\n digdepth=50, result=-1)\r\n\r\n # Check verification code\r\n if verification_string.lower().strip() == verification_wanted:\r\n resp = await ctx.send(\"Success! Welcome to the \"\r\n f\"club, {str(ctx.author)}.\")\r\n await self.bot.update_logs(\"Verification Attempt\",\r\n ctx.author.id,\r\n veriflogs_channel,\r\n digdepth=50, result=0)\r\n await asyncio.sleep(config.sleep_secs)\r\n await ctx.author.add_roles(verification_role)\r\n await resp.delete()\r\n else:\r\n resp = await ctx.send(f\"Incorrect password, {str(ctx.author)}.\")\r\n await asyncio.sleep(config.sleep_secs)\r\n await resp.delete()", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def verify(verification_code):\n verification.verify(verification_code)", "def check(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"check\")", "def test_command_verify():\n wozardry.parse_args([\"verify\", kValid1])\n wozardry.parse_args([\"verify\", kValid2])", "async def verify_proof(self, proof_req: dict, proof: dict, schema: dict, claim_def: dict) -> str: # verifier\n\n return json.dumps(\n await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps({ # schemas_json\n claim_uuid: schema for claim_uuid in proof['proofs']\n }),\n json.dumps({ # claim_defs_json\n claim_uuid: claim_def for claim_uuid in proof['proofs']\n }),\n json.dumps({}) # revoc_regs_json\n )\n )", "def verify_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_in = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n filename_certs = f'{filename}.crt'\n with open(filename_in, 'w') as in_file:\n in_file.write(message)\n in_file.close()\n\n cmd = [\n \"openssl\",\n \"cms\",\n \"-verify\",\n \"-inform\", \"PEM\",\n \"-in\", f'{filename_in}',\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-CAfile\", server_key_files[\"ca\"],\n \"-out\", f'{filename_plain}',\n \"-certsout\", f'{filename_certs}'\n ]\n try:\n res = exec_cmd(cmd)\n with open(filename_plain, \"r\") as f_plain_text:\n plain_text = f_plain_text.read()\n f_plain_text.close()\n with open(filename_certs, \"r\") as f_certs:\n certs = f_certs.read()\n f_certs.close()\n return {\"content\": plain_text,\n \"certs\": certs,\n \"result\": res.stderr.decode(\"utf8\").find(\"Verification successful\") != -1}\n except OSError as err:\n logging.error(\"verify_str failed: %s\", err)\n finally:\n unlink_filenames = [filename_in, filename_plain, filename_certs]\n for unlink_filename in unlink_filenames:\n os.unlink(unlink_filename)", "async def verify(self, ctx):\r\n embed = discord.Embed(color=0x0C8B18)\r\n self.ctx = ctx\r\n role = discord.utils.get(ctx.guild.roles, name=c.verified)\r\n guild = ctx.message.guild\r\n author = str(ctx.author)\r\n embed.title = f\"{ctx.author.name}\"\r\n if role in ctx.author.roles:\r\n embed.description = f\"🇵🇹 Já estás verificado\\n🇺🇸 You are already verified\"\r\n return await ctx.send(embed=embed)\r\n if os.path.exists(c.detailfile):\r\n for line in open(c.detailfile, 'r'):\r\n data = json.loads(line)\r\n if data[\"Discord\"] == author:\r\n await ctx.author.add_roles(role)\r\n embed.description = f\"🇵🇹 Verificação completa!\\n🇺🇸 Verification complete!\"\r\n return await ctx.send(embed=embed)\r\n\r\n embed.description = f\"🇵🇹 Por favor verifique-se [aqui](https://discordapp.com/oauth2/authorize?response_type=code&client_id=517177680375054336&redirect_uri=http%3A%2F%2F46.101.184.126%3A5000%2Fcallback&scope=identify+email+connections+guilds) e volte a correr o comando `!verify`\\n🇺🇸 Please complete the verification [here](https://discordapp.com/oauth2/authorize?response_type=code&client_id=517177680375054336&redirect_uri=http%3A%2F%2F46.101.184.126%3A5000%2Fcallback&scope=identify+email+connections+guilds) and run the `!verify` command again\"\r\n return await ctx.send(embed=embed)\r\n else:\r\n await ctx.send(\"Error, file not exist\")\r\n return \"Error, file\"", "def verified(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"verified\")", "def verify(self):", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL) as proc:\n verifier_uuid = proc.stdout.readline().rstrip()\n return verifier_uuid.decode(\"utf-8\")", "def status(self):\n return str(self.verificationRun.status())", "def code_challenge(verifier):\n digest = hashlib.sha256(verifier).digest()\n return base64.urlsafe_b64encode(digest).rstrip(b'=')", "def __get_verify_mode(self):\n ...", "def helptext():\n return \"Validate JWT tokens\"", "def check_body(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"check_body\")", "def verification_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"verification_token\")", "def text_summary_message(self):\n failed = [e for e in self.evaluations if not e.passes]\n if failed == []:\n return \"SUCCESS - all constraints evaluations pass\"\n else:\n return \"FAILURE: %d constraints evaluations failed\" % len(failed)", "def verify(self):\n pass", "def verify(self):\n pass", "def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n verifier_uuid = proc.stdout.readline().rstrip()\n return verifier_uuid", "def verify_match(password, verify):\n return password == verify", "def verify(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"verify\")" ]
[ "0.6132083", "0.6103445", "0.6096352", "0.57968384", "0.5759049", "0.57580143", "0.5546414", "0.5314555", "0.53042316", "0.53004193", "0.5224447", "0.51929504", "0.51896125", "0.518826", "0.5155716", "0.51303595", "0.512667", "0.5108908", "0.50595164", "0.505352", "0.50520885", "0.5037622", "0.5026522", "0.50229126", "0.5019038", "0.49875018", "0.49875018", "0.49818623", "0.4975317", "0.49276212" ]
0.6741842
0
Prints text to the screen, using the currently active alignment.
def msg(text): for line in text.splitlines(): if JS.alignment == "left": print(demarkup(line)) elif JS.alignment == "center": print(demarkup(line).center(get_terminal_size()[0] - 1)) else: print(demarkup(line).rjust(get_terminal_size()[0] - 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PrintAt(self,x=0,y=0,text=''):\n self.SetCursor(x,y)\n self.Print(text)", "def print_text_line():\n print_indentation()\n print(STYLES[parameters[\"Style\"]][\"Vertical line\"], end=\"\")\n for _ in range(parameters[\"Surrounding spaces\"]):\n print(parameters[\"Fill char\"], end=\"\")\n print(text, end=\"\")\n for _ in range(parameters[\"Surrounding spaces\"]):\n print(parameters[\"Fill char\"], end=\"\")\n print(STYLES[parameters[\"Style\"]][\"Vertical line\"])", "def showText(self, context, text, size=1, color=colors.WHITE, conversion=True):\n context.print(text, self.components, size, color=color, conversion=conversion)", "def print(self,text,position=None,size=None,color=mycolors.WHITE,font=None,conversion=True):\n if conversion:\n if not position: position=(0,0)\n if not size: size=1\n position=self.draw.plane.getToScreen(position,self.draw.window)\n ux,uy=self.draw.plane.units\n size=int(size*ux/50)\n else:\n if not position: position=(10,10)\n if not size: size=20\n self.window.print(text,position,size,color,font)", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def Print(self, text):\n pass", "def print(self, my_screen, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n my_screen.blit(text_bitmap, [self.x_pos, self.y_pos])\n self.y_pos += self.line_height", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def print_step(text):\n\n panel = Panel(Text(text, justify=\"left\"))\n console.print(panel)", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tp = self.options['sep']\r\n\t\tt = self.options['tab']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.abbrev+s+p+s+self.text)", "def display_text(self, text, size=None, colr=None,\r\n x = None, y = None,\r\n new_line = None):\r\n if size is None:\r\n size = self.dt_size\r\n self.size = size\r\n if colr is None:\r\n colr = self.text_color\r\n self.text_color = colr\r\n if new_line is not None:\r\n if x is not None or y is not None:\r\n raise Exeception(\"Must not have new_line and x,y\")\r\n else:\r\n if x is not None or y is not None:\r\n new_line = False\r\n else:\r\n new_line = True\r\n if new_line:\r\n x = self.dt_x = self.disp_left\r\n self.dt_y -= size*self.font_size_to_ch\r\n y = self.dt_y\r\n #print(f\"new_line: y:{y} dt_y:{self.dt_y}\")\r\n else:\r\n if x is None:\r\n x = dt_x\r\n self.dt_x = x\r\n if y is None:\r\n y = self.dt_y\r\n self.dt_y = y\r\n #print(f\"display_text: text:{text} x:{x}, y:{y}\")\r\n tu.penup()\r\n if y < self.disp_bottom + self.disp_boarder:\r\n continue_msg = \"Press ENTER to continue\"\r\n inp = input(continue_msg)\r\n self.clear_text() # Only option \r\n \r\n tu.goto(x,y)\r\n tu.pendown()\r\n \r\n tu.color(colr)\r\n font = (\"Arial\", size, \"normal\")\r\n #print(f\"colr:{colr} text:{text} font:{font}\")\r\n #print(f\"xcor():{tu.xcor()} ycor():{tu.ycor()}\")\r\n tu.write(text, align=\"left\", font=font)", "def display(self):\n row = (' ' * self.__x) + (Rectangle.print_symbol * self.__width) + '\\n'\n print(('\\n' * self.__y) + (row * self.__height), end=\"\")", "def text_output(self):\n print(self.board)\n print()", "def print_space(self,text,width,w=1,h=1):\n texlen = len(text)\n if texlen > width:\n text = text[:width]\n self.lesprint(text,width)", "def print_align(aln, seqwidth=59, spacing=2, extra=fasta.FastaDict(),\n out=sys.stdout, order=None):\n\n if order is None:\n order = aln.keys()\n\n namewidth = max(map(len, order)) + spacing\n\n def mkname(name, namewidth):\n name2 = name[:namewidth]\n name2 += \" \" * (namewidth - len(name2))\n return name2\n\n identity = calc_conservation_string(aln)\n\n # print alignment\n for i in xrange(0, len(aln.values()[0]), seqwidth):\n # print sequences\n for name in order:\n print >>out, \"%s %s\" % (mkname(name, namewidth),\n aln[name][i:i+seqwidth])\n\n # print extra\n for name in extra.keys():\n print >>out, \"%s %s\" % (mkname(name, namewidth),\n extra[name][i:i+seqwidth])\n\n # print identity\n print >>out, (\" \"*namewidth) + \" \" + identity[i:i+seqwidth]\n print >>out", "def text_display(self, indent):\r\n result = \" \" * indent + self.name + \"\\n\"\r\n if self.left_child != None:\r\n result += self.left_child.text_display(indent + 4)\r\n if self.right_child != None:\r\n result += self.right_child.text_display(indent + 4)\r\n return result", "def put_text(self, text, color, point):\n x1, y1 = self.pos_shift\n x2, y2 = point\n if not self.in_display((x2 - x1, y2 - y1)):\n return\n font = pygame.font.SysFont(\"monospace\", 18, bold=True)\n label = font.render(text, 1, color)\n self.screen.blit(label, (\n x2 - x1,\n y2 - y1\n ))", "def text_align(text, x_align, y_align, height, d, style= \"TIMES_ITALIC\", rotation=0, color=K_BLACK, mirror=dxfwrite.MIRROR_X):\n text = dxf.text(text, height = height,mirror=mirror,halign=CENTER, alignpoint = (x_align,y_align),\n style=style, layer='LINES', rotation=rotation, color=color, linetype='ByBlock')\n d.add(text)", "def text_draw(self, x, y, text, style={}):", "def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence\n text_obj = font.render(text, True, color)\n text_rect = text_obj.get_rect()\n text_rect.center = (x, y)\n surface.blit(text_obj, text_rect)", "def print_text(self, text1, text2):", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def esprint(self,text,w=1,h=1):\n self.dps.set(height=h,width=w)\n self.dps.text(text)", "def display(self):\n disptxt = str(self)\n if self.width == 0 or self.has_output:\n print(disptxt)\n else:\n print(\"\\r\", end='')\n print(disptxt, end='')\n sys.stdout.flush()", "def format_alignment(self, alignment):\n raise NotImplementedError(\"This method should be implemented\")\n ###################################################\n # You MUST implement this method in the subclass. #\n ###################################################", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text", "def _paintText(text, textrect, alignment):\r\n \r\n txt = text.splitlines()\r\n if alignment != wx.ALIGN_LEFT and len(txt):\r\n yorigin = textrect.Y\r\n for t in txt:\r\n w, h = dc.GetTextExtent(t)\r\n plus = textrect.Width - w\r\n if alignment == wx.ALIGN_CENTER:\r\n plus /= 2\r\n dc.DrawLabel(t, wx.Rect(textrect.X + plus, yorigin, w, yorigin+h))\r\n yorigin += h\r\n return\r\n dc.DrawLabel(text, textrect)", "def _print(txt):\n\n # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Style: DIM, NORMAL, BRIGHT, RESET_ALL\n print('{0}{1}'.format(Style.BRIGHT + txt, Fore.RESET + Back.RESET + Style.RESET_ALL))", "def print(self, assignment):\n letters = self.letter_grid(assignment)\n for i in range(self.crossword.height):\n for j in range(self.crossword.width):\n if self.crossword.structure[i][j]:\n print(letters[i][j] or \" \", end=\"\")\n else:\n print(\"█\", end=\"\")\n print()", "def print(self, assignment):\n letters = self.letter_grid(assignment)\n for i in range(self.crossword.height):\n for j in range(self.crossword.width):\n if self.crossword.structure[i][j]:\n print(letters[i][j] or \" \", end=\"\")\n else:\n print(\"█\", end=\"\")\n print()" ]
[ "0.70358264", "0.6626184", "0.64501715", "0.6403515", "0.63804656", "0.63767767", "0.63763213", "0.63579285", "0.6340895", "0.63053125", "0.62978816", "0.62961745", "0.62925726", "0.6286042", "0.6282825", "0.62492776", "0.62384284", "0.6171509", "0.61666787", "0.614422", "0.6143485", "0.6126223", "0.6115459", "0.61131585", "0.61041254", "0.60874224", "0.60870713", "0.60794646", "0.60725605", "0.60725605" ]
0.68693316
1
Convert Tags to Entities
def get_entities(tags): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bio_tag_to_entity_mentions(bio_tags):\n within_entity_mention = False\n mentions = []\n for i, tag in enumerate(bio_tags):\n if not within_entity_mention:\n # start of entity mention\n if 'B-' in tag:\n start = i\n within_entity_mention = True\n else:\n if tag == 'O':\n end = i\n mentions.append({\n 'start' : start,\n 'end' : end,\n 'labels' : []\n })\n within_entity_mention = False\n # two entities side by side without O tag\n if 'B-' in tag:\n end = i\n mentions.append({\n 'start' : start,\n 'end' : end,\n 'labels' : []\n })\n start = i\n if within_entity_mention:\n end = len(bio_tags)\n mentions.append({\n 'start' : start,\n 'end' : end,\n 'labels' : []\n })\n return mentions", "def convert_all_tags(self):\n self.ratings = self.tag_converter.convert_ratings()\n self.categories = self.tag_converter.convert_categories()\n self.classes = self.tag_converter.convert_classes()\n\n old_characters = self.sql.read_table_to_dict(self.working_original, \"characters\")\n self.characters = self._convert_characters(old_characters)", "def normalize_tags(tags):\n return {normalize(tag) for tag in tags}", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def parse(self, tokens, pred_tags):\n entities = []\n entity = None\n tag = ''\n for idx, st in enumerate(pred_tags):\n if entity is None:\n if st.startswith('B'):\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n else:\n if st == 'O':\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = None\n tag = ''\n elif st.startswith('B'):\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n return entities", "def extend_entity(cls):\n # Generate the corresponding tag entity\n tag_entity = f\"{cls.__name__}Tag\"\n plural = f\"{cls.__name__.lower()}s\"\n fields = {\n \"_table\": tag_entity,\n plural: Set(cls.__name__),\n \"objects\": property(lambda t: getattr(t, plural, [])),\n }\n #fields['_indexes_'] = [Index(fields['first_name'],fields['last_name'],is_pk=False,is_unique=False)]\n entity = type(tag_entity, (Tag, ), fields)\n cls.db_tags = Set(tag_entity)\n cls.tags = lazy_property(_get_tag_handler)", "def _process_tags(tags: dict):\n\n def process_val(value):\n if isinstance(value, (list, tuple)):\n # Array type of json\n return [process_val(item) for item in value]\n elif isinstance(value, dict):\n # Object type of json\n return {k: process_val(v) for k, v in value.items()}\n elif isinstance(value, (str, int, float, bool)) or value is None:\n # Other supported type of json\n return value\n elif isinstance(value, (torch.Tensor, np.ndarray)):\n return value.tolist()\n # Drop unsupported values.\n\n processed_tags = OrderedDict(process_val(tags))\n\n return processed_tags", "def intf_ENTCHTAG(E):\n if ( not inc.entid_or_LST_of_entids(E.The,3) or\n not inc.TXT(E,2) or not inc.TXT(E,1) ):\n print(\"Input Error: chtag\")\n print(intf_ENTCHTAG.__doc__)\n return # Without doing much of anything.\n refreshview= False # No need unless view attributes (@) have been affected.\n newtag= E.The.StackPop().val\n oldtag= E.The.StackPop().val\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of VALs.\n myeids= [x.val for x in myeids] # Should now be a list of VALs.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n if MMEL.El[myeid].has_tag(oldtag):\n print(\"Untagging entity #%d with tag ''%s''\" % (myeid,oldtag))\n MMEL.El[myeid].del_tag(oldtag)\n MMEL.El[myeid].add_tag(newtag)\n if '@' in oldtag or '@' in newtag: refreshview= True\n else:\n print(\"Warning: No entity #%d. Skipping.\" % myeid)\n if refreshview: OUT.default(MMEL,E) # AUTODUMP ", "def tags(self):\n # See also. Sentence.__repr__().\n ch, I,O,B = self.chunk, INSIDE+\"-\", OUTSIDE, BEGIN+\"-\"\n tags = [OUTSIDE for i in range(len(self.sentence.token))]\n for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]\n if tag == WORD:\n tags[i] = encode_entities(self.string)\n elif tag == POS and self.type:\n tags[i] = self.type\n elif tag == CHUNK and ch and ch.type:\n tags[i] = (self == ch[0] and B or I) + ch.type\n elif tag == PNP and self.pnp:\n tags[i] = (self == self.pnp[0] and B or I) + \"PNP\"\n elif tag == REL and ch and len(ch.relations) > 0:\n tags[i] = [\"-\".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]\n tags[i] = \"*\".join(tags[i])\n elif tag == ANCHOR and ch:\n tags[i] = ch.anchor_id or OUTSIDE\n elif tag == LEMMA:\n tags[i] = encode_entities(self.lemma or \"\")\n elif tag in self.custom_tags:\n tags[i] = self.custom_tags.get(tag) or OUTSIDE\n return tags", "def add_entities(doc):\n\n # Calls function to tokenize the document, stores as list of strings\n tokens = tokenize(doc)\n\n # Calls function to find named entities in the tokens, stores as list of strings\n chunks = chunk(tokens)\n\n return chunks", "def setEntities(self, tags):\n return self._set(entities=tags)", "def tags():", "def tag_mapping(sentences):\n tags = [[char[-1] for char in s] for s in sentences]\n dico = create_dico(tags)\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag", "def transform_tags(self, instance):\n return instance.tags.split(',')", "def tag_mapping(sentences):\n tags = [[word[-1] for word in s] for s in sentences]\n dico = create_dico(tags)\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag", "def extract_entities(event):\n # TODO The text should probably already be tagged and tokenized before this step\n tree = ne_chunk(event.pos_tagged)\n entities = set([])\n\n people = tree.subtrees(lambda x: x.node == \"PERSON\")\n for person in people:\n entities.add(\" \".join([leaf[0] for leaf in person.leaves()]))\n\n places = tree.subtrees(lambda x: x.node == \"GPE\")\n for place in places:\n entities.add(\" \".join([leaf[0] for leaf in place.leaves()]))\n\n organizations = tree.subtrees(lambda x: x.node == \"ORGANIZATION\")\n for org in organizations:\n entities.add(\" \".join([leaf[0] for leaf in org.leaves()]))\n \n return entities", "def intf_ENTDETAG(E):\n if not inc.entid_or_LST_of_entids(E.The,1):\n print(\"Input Error: detag\")\n print(intf_ENTDETAG.__doc__)\n return # Without doing much of anything.\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of ints.\n myeids= [x.val for x in myeids] # Should now be a list of ints.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n MMEL.El[myeid].tags= list()\n else:\n print(\"WARNING: Entity ID# %d does not exist.\" % myeid)\n OUT.default(MMEL,E) # AUTODUMP ", "def find_named_entities(pos_tags):\n contains_proper_noun = False\n tokens = list()\n for tags in pos_tags:\n if tags['tag'] == '^':\n contains_proper_noun = True\n\n if contains_proper_noun:\n for tags in pos_tags:\n if len(tags['token']) == 1:\n tags['token'] = NLPUtils.character_to_unicode(tags['token'])\n tokens.append(tags['token'])\n try:\n text = ' '.join(tokens)\n headers = {\n 'Accept': 'application/json',\n }\n # print(text)\n data = [\n ('text', text),\n ('confidence', '0.25'),\n ('support', '20')\n ]\n\n r = requests.post('http://model.dbpedia-spotlight.org/en/annotate', headers=headers, data=data,\n timeout=10)\n # print(str(r.content.decode()))\n res = r.json()\n\n entities = list()\n if 'Resources' in res:\n for i in res['Resources']:\n # res_str = str(i).replace(',','\\n')\n # print(res_str)\n\n if i['@types'] is not None:\n original = i['@surfaceForm']\n entity_tmp = i['@URI']\n entity_tmp = re.sub('.*/', '', entity_tmp)\n entity_tmp = re.sub('\\(.*\\)', '', entity_tmp)\n entity = re.sub('_', ' ', entity_tmp).strip()\n\n if entity.lower() in text.lower() and ' ' in entity:\n entities.append((entity, int(i['@offset'])))\n # print(entities)\n new_pos_tags = list()\n curr_pos = 0\n tokens_to_omit = 0\n for tags in pos_tags:\n # if re.match(\"U\\+[a-zA-Z0-9]{1,5}\",tags['token']):\n # print(tags['token'])\n # tags['token'] = NLPUtils.unicode_to_character(tags['token'])\n # print(tags['token'])\n\n token = tags['token']\n for e in entities:\n curr_dict = dict()\n if curr_pos == e[1]:\n tokens_to_omit = len(re.split(' ', e[0]))\n curr_dict['token'] = e[0]\n curr_dict['tag'] = '^'\n new_pos_tags.append(curr_dict)\n # +1 for whitespace\n curr_pos += len(token) + 1\n if tokens_to_omit == 0:\n new_pos_tags.append(tags)\n else:\n tokens_to_omit -= 1\n\n # decode unicode sequence\n new_pos_tags = NLPUtils.unicode_to_character_pos_tagged(new_pos_tags)\n return new_pos_tags\n # decode uniocde character\n pos_tags = NLPUtils.unicode_to_character_pos_tagged(pos_tags)\n except Exception as e:\n print(e)\n return None\n\n return pos_tags", "def intf_ENTTAG(E):\n if not inc.entid_or_LST_of_entids(E.The,2) or not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: tag\")\n print(intf_ENTTAG.__doc__)\n return # Without doing much of anything.\n refreshview= False # No need unless view attributes (@) have been set.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of VALs.\n myeids= [x.val for x in myeids] # Should now be a list of VALs.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n for mytag in mytags:\n if len(mytag) > 1 and '@' == mytag[1]:\n refreshview= True\n existing_att_tags= MMEL.El[myeid].has_tag_starting_with(mytag[0:2])\n if existing_att_tags:\n for et in existing_att_tags:\n MMEL.El[myeid].del_tag(et)\n print(\"Tagging entity #%d with tag ''%s''\" % (myeid,mytag))\n if not MMEL.El[myeid].has_tag(mytag):\n MMEL.El[myeid].add_tag(mytag)\n else:\n print(\"Warning: No entity #%d. Skipping.\" % myeid)\n if refreshview: OUT.default(MMEL,E) # AUTODUMP ", "def entity_tag_sentence(sentence):\n return ne_chunk(sentence)", "def generate_entities(self, data):\r\n\t\t# create an empty dictionary to hold entities\r\n\t\tent_dic = {}\r\n\r\n\t\tfor row in data.itertuples():\r\n\t\t\t# feed nlp the first line's set of keywords\r\n\t\t\tdoc = self.nlp(row.keywords)\t\r\n\t\t\t# begin iterating through the nlp's entities\r\n\t\t\tfor ent in doc.ents:\r\n\r\n\t\t\t\t# For each entity, check if the label exists in 'ent_dic'.\r\n\t\t\t\t# If it does, append the entity into the key, value pair.\r\n\t\t\t\t# If it doesn't, create a new key, value pair\r\n\t\t\t\tkey = str(ent.label_) + ''\r\n\t\t\t\tif ent.label_ in ent_dic:\r\n\t\t\t\t\tent_dic[key].append(str(ent)) if not str(ent) in ent_dic[key] else print(f'The entity: {ent} is already in the array')\r\n\t\t\t\telse: \r\n\t\t\t\t\tent_dic[key] = [str(ent)]\r\n\r\n\t\t# return the dictionary of entities\r\n\t\treturn ent_dic", "def entity_tags(self):\n return self._entity_tags", "def encode_tags(taglist, lang_name):\n tagvec = [None]*len(UNIMORPH_CATEGORIES)\n for tag in taglist:\n if tag in UNIMORPH_TAGTYPES:\n tagtype = UNIMORPH_TAGTYPES[tag]\n set_tagtype(tagvec, tagtype, tag, lang_name)\n\n if tag in UNIMORPH_POS_MAP:\n pos = UNIMORPH_POS_MAP[tag]\n set_tagtype(tagvec, \"POS\", pos, lang_name)\n\n return tagvec", "def _make_natural_type(self):\n for tag in self.tags:\n if self.tags[tag] is None or str(self.tags[tag]).strip() == \"\":\n self.tags[tag] = None\n else:\n if tag.lower() in VASP_TAG_INT_LIST:\n try:\n self.tags[tag] = int(self.tags[tag])\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to int\")\n elif tag.lower() in VASP_TAG_FLOAT_LIST:\n try:\n self.tags[tag] = float(self.tags[tag].lower().replace('d','e'))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to float\")\n elif tag.lower() in VASP_TAG_BOOL_LIST:\n if not self.tags[tag].lower() in ['.true.','.false.']:\n raise IncarError(\"Could not find '\" + tag + \"' : '\" + self.tags[tag].lower() + \"' in ['.true.','.false.']\")\n else:\n self.tags[tag] = (self.tags[tag].lower() == '.true.')\n elif tag.lower() in VASP_TAG_SITEF_LIST + VASP_TAG_SPECF_LIST:\n temp = []\n for value in self.tags[tag].split():\n try:\n item=value.split('*')\n if len(item)==1:\n temp.append(float(value))\n else:\n if item[0] != 0:\n temp.append(str(item[0])+'*'+str(float(item[1])))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to float list\")\n self.tags[tag] = temp\n elif tag.lower() in VASP_TAG_SPECI_LIST:\n temp = []\n for value in self.tags[tag].split():\n try:\n temp.append(int(value))\n except ValueError:\n raise IncarError(\"Could not convert '\" + tag + \"' : '\" + self.tags[tag] + \"' to int list\")\n self.tags[tag] = temp\n elif tag.lower() in VASP_TAG_STRING_LIST:\n self._check_string_tag(tag,self.tags[tag])", "def extract_entities_from_dependency_parse(dtrees, postag):\n sents = []\n for x in range(0,len(dtrees)):\n tok_list = []\n for node_index in dtrees[x].nodes:\n if node_index != 0:\n node = dtrees[x].nodes[node_index]\n if node['ctag'] == postag:\n tok_list.append((node['word'],postag))\n else:\n tok_list.append((node['word'],'O'))\n sents.append(tok_list)\n return sents", "def tag_sents(self, sents):\n # WORK HERE!!", "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]", "def get_tag_objects(session, tags):\n\n tag_objs = []\n\n for key, value in tags:\n tag = TagsDbHandler().get_tag(session, key)\n if tag:\n tag_objs.append(tag)\n\n continue\n\n new_tag = Tags(key, value)\n tag_objs.append(new_tag)\n\n session.add(new_tag)\n\n return tag_objs", "def get_entities(self, clean=False):\n return list(self.iter_entities(clean=clean))" ]
[ "0.62833726", "0.61343277", "0.60737306", "0.5947258", "0.5923205", "0.58831906", "0.58670056", "0.5811215", "0.58097756", "0.57908505", "0.5778241", "0.57619965", "0.575832", "0.5747811", "0.5735009", "0.5708459", "0.5707962", "0.5704061", "0.5694413", "0.56721485", "0.5652814", "0.56041497", "0.5601817", "0.55898386", "0.55725497", "0.5549629", "0.5539022", "0.55226326", "0.5454361", "0.54531837" ]
0.7193571
0