query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test the correct errors are raised if Biopython is not available. | def test_config_no_biopython(monkeypatch):
monkeypatch.setattr(core, 'HAVE_BIOPYTHON', False)
assert core.HAVE_BIOPYTHON is False
args = Namespace(extended_validation='all')
with pytest.raises(ValueError):
core.Config.from_args(args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_config_have_biopython():\n assert core.HAVE_BIOPYTHON\n args = Namespace(extended_validation='all')\n config = core.Config.from_args(args)\n assert config.extended_validation == 'all'",
"def test_fastqc_notexec():\n try:\n obj = fastqc.FastQC(\"LICENSE\")\n except NotExecutableError:\n return True\n else:\n return False",
"def should_build_ib():\n ib_util_found = False\n ib_lib_found = False\n ib_header_found = False\n\n try:\n # If the command doesn't exist, we can directly return instead of\n # making a subprocess call\n full_cmd_path = get_command_path(IB_DEVINFO_CMD)\n if not full_cmd_path:\n ib_util_found = False\n subprocess.check_output([full_cmd_path, \"--list\"])\n # Here we just would like to simply run the command to test if IB\n # related tools / lib are installed without parsing the output. We\n # will enable IB build as long as the command runs successfully.\n #\n # The output should look like either:\n #\n # > ibv_devinfo --list\n # 0 HCAs founds:\n #\n # or\n #\n # > ibv_devinfo --list\n # 4 HCAs found:\n # mlx5_3\n # mlx5_2\n # mlx5_1\n # mlx5_0\n ib_util_found = True\n except Exception:\n # We just take all the exceptions here without affecting the build\n ib_util_found = False\n\n lib_paths = list(filter(bool, [\n \"/usr/lib/\",\n \"/usr/lib/x86_64-linux-gnu/\",\n \"/usr/lib/powerpc64le-linux-gnu/\",\n \"/usr/lib/aarch64-linux-gnu/\",\n ] + gather_paths([\n \"LIBRARY_PATH\",\n ]) + gather_paths([\n \"LD_LIBRARY_PATH\",\n ])))\n\n include_paths = [\n \"/usr/include/\",\n ]\n\n if IS_CONDA:\n lib_paths.append(os.path.join(CONDA_DIR, \"lib\"))\n include_paths.append(os.path.join(CONDA_DIR, \"include\"))\n\n for path in lib_paths:\n if path is None or not os.path.exists(path):\n continue\n ib_libraries = sorted(glob.glob(os.path.join(path, \"libibverbs*\")))\n if ib_libraries:\n ib_lib_found = True\n break\n\n for path in include_paths:\n if path is None or not os.path.exists(path):\n continue\n if os.path.exists(os.path.join(path, \"infiniband/verbs.h\")):\n ib_header_found = True\n break\n\n return ib_util_found and ib_lib_found and ib_lib_found",
"def test_failure():\n with pytest.raises(ModuleNotFoundError):\n import torch # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import tensorflow # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import horovod # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n from ray import tune # noqa: F401",
"def numba_check():\n numba = importlib.util.find_spec(\"numba\")\n return numba is not None",
"def check_errors(self) -> None:",
"def test_ensure_bzr_available():\n assert is_bzr_installed()",
"def test_bit_driver_error(self):\n\n with pytest.raises(ValueError, match=r\"'b' must be either 0 or 1\"):\n qaoa.bit_driver(range(3), 2)",
"def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())",
"def test():\n if not MpUsbApi.__get_dll():\n return \"Error loading library mpusbapi.dll, it is missing or not installed!\"\n return None\n #end test()",
"def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisOutput, 'error')",
"def test_py2(self):\n if sys.version_info >= self.MIN_SUPPORTED_VERSION:\n return\n try:\n import miflora # noqa: F401 # pylint: disable=unused-import,import-outside-toplevel\n\n self.fail(\"Should have thrown an exception\")\n except ValueError as val_err:\n self.assertIn(\"version\", str(val_err))",
"def check_for_setup_error(self):\n\n # If configuration is incorrect we will get exception here\n self._rpc_call('bdev_get_bdevs')",
"def test_warningsAreErrors(self):\n output = StringIO()\n self.patch(sys, \"stdout\", output)\n self.createFakeSphinxProject()\n with self.sphinxDir.child(\"index.rst\").open(\"a\") as f:\n f.write(b\"\\n.. _malformed-link-target\\n\")\n exception = self.assertRaises(\n SystemExit, self.builder.main, [self.sphinxDir.parent().path]\n )\n self.assertEqual(exception.code, 1)\n self.assertIn(\"malformed hyperlink target\", output.getvalue())\n self.verifyBuilt()",
"def check_requirements():\n process_output = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0] for r in process_output.split()]\n if 'pandas' and 'matplotlib' in installed_packages:\n return True\n else:\n print('You don`t have one of required libralies\\n'\n 'I can`t create histogram\\n'\n 'Required libralies: \\n'\n '->pandas\\n'\n '->matplotlib\\n')\n return False",
"def troubleshoot():\n libraries = (sys, pd, openpyxl, matplotlib, pip)\n for i in libraries:\n try:\n print(str(i), 'version:', i.__version__)\n except AttributeError:\n pass\n except ModuleNotFoundError:\n print('You do not have', str(i), 'installed.')\n print('You can do so via your interpreter or:')\n print('py -m pip install', '-' + str(i))\n print('in command prompt')",
"def test_exc_on_missing_brack(self):\n with self.assertRaises(ExecutionException):\n pyint = Interpreter(limit=1)\n pyint.run(code=BF_MISSING_BRACK)",
"def test_init_success(self):\n found = False\n try:\n pyint = Interpreter()\n except InitializationException: \n found = True\n self.assertFalse(found)",
"def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))",
"def check_supported_features(self):",
"def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')",
"def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)",
"def test_blast_supported_version(self):\r\n acceptable_version = (2, 2, 22)\r\n self.assertTrue(which('blastall'),\r\n \"blast not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = 'blastall | grep blastall'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip().split(' ')[1].strip()\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version == acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported blast version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))",
"def test_denoiser_supported_version(self):\r\n\r\n pass_test = True\r\n try:\r\n check_flowgram_ali_exe()\r\n except (ApplicationNotFoundError, ApplicationError):\r\n pass_test = False\r\n\r\n self.assertTrue(pass_test,\r\n \"Denoiser flowgram aligner not found or not \"\r\n \"executable. This may or may not be a problem \"\r\n \"depending on which components of QIIME you plan to \"\r\n \"use.\")",
"def test_umap_unavailable():\n from yellowbrick.text.umap_vis import UMAP\n\n assert UMAP is None\n\n with pytest.raises(\n YellowbrickValueError, match=\"umap package doesn't seem to be installed\"\n ):\n UMAPVisualizer()",
"def test_failToBuild(self):\n # note no fake sphinx project is created\n self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)",
"def test_invalid_method():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n pytest.raises(AttributeError, atom.sgd.predict_proba, X_bin)",
"def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()",
"def setUpModule():\n args = [\"bowtie-build\",REF_FILES[\"yeast_fasta\"],_ebwt]\n with open(os.devnull,\"a\") as null:\n try:\n subprocess.check_call(args,stdout=null,stderr=subprocess.STDOUT)\n except CalledProcessError as e:\n print(\"Could not build bowtie index for crossmap test.\")\n raise e",
"def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule"
]
| [
"0.60472417",
"0.59132737",
"0.5679767",
"0.55962723",
"0.55735046",
"0.55669904",
"0.5553229",
"0.5509644",
"0.5472223",
"0.54605126",
"0.5453642",
"0.5441282",
"0.54319507",
"0.5415938",
"0.5410456",
"0.5409874",
"0.53916454",
"0.53776276",
"0.53769064",
"0.5348316",
"0.53466266",
"0.5337339",
"0.5335885",
"0.5320768",
"0.5314617",
"0.53033894",
"0.5302445",
"0.53004086",
"0.5290229",
"0.52833664"
]
| 0.62092185 | 0 |
Test we detect Biopython. | def test_config_have_biopython():
assert core.HAVE_BIOPYTHON
args = Namespace(extended_validation='all')
config = core.Config.from_args(args)
assert config.extended_validation == 'all' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect():\n pass",
"def _detect(self):\n return True",
"def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)",
"def should_build_ib():\n ib_util_found = False\n ib_lib_found = False\n ib_header_found = False\n\n try:\n # If the command doesn't exist, we can directly return instead of\n # making a subprocess call\n full_cmd_path = get_command_path(IB_DEVINFO_CMD)\n if not full_cmd_path:\n ib_util_found = False\n subprocess.check_output([full_cmd_path, \"--list\"])\n # Here we just would like to simply run the command to test if IB\n # related tools / lib are installed without parsing the output. We\n # will enable IB build as long as the command runs successfully.\n #\n # The output should look like either:\n #\n # > ibv_devinfo --list\n # 0 HCAs founds:\n #\n # or\n #\n # > ibv_devinfo --list\n # 4 HCAs found:\n # mlx5_3\n # mlx5_2\n # mlx5_1\n # mlx5_0\n ib_util_found = True\n except Exception:\n # We just take all the exceptions here without affecting the build\n ib_util_found = False\n\n lib_paths = list(filter(bool, [\n \"/usr/lib/\",\n \"/usr/lib/x86_64-linux-gnu/\",\n \"/usr/lib/powerpc64le-linux-gnu/\",\n \"/usr/lib/aarch64-linux-gnu/\",\n ] + gather_paths([\n \"LIBRARY_PATH\",\n ]) + gather_paths([\n \"LD_LIBRARY_PATH\",\n ])))\n\n include_paths = [\n \"/usr/include/\",\n ]\n\n if IS_CONDA:\n lib_paths.append(os.path.join(CONDA_DIR, \"lib\"))\n include_paths.append(os.path.join(CONDA_DIR, \"include\"))\n\n for path in lib_paths:\n if path is None or not os.path.exists(path):\n continue\n ib_libraries = sorted(glob.glob(os.path.join(path, \"libibverbs*\")))\n if ib_libraries:\n ib_lib_found = True\n break\n\n for path in include_paths:\n if path is None or not os.path.exists(path):\n continue\n if os.path.exists(os.path.join(path, \"infiniband/verbs.h\")):\n ib_header_found = True\n break\n\n return ib_util_found and ib_lib_found and ib_lib_found",
"def bm_and_dvr_supported(self):",
"def hpb_supported(self):",
"def hbnb():\n return \"HBNB\"",
"def hbnb():\n return \"HBNB\"",
"def check_supported_features(self):",
"def test_denoiser_supported_version(self):\r\n\r\n pass_test = True\r\n try:\r\n check_flowgram_ali_exe()\r\n except (ApplicationNotFoundError, ApplicationError):\r\n pass_test = False\r\n\r\n self.assertTrue(pass_test,\r\n \"Denoiser flowgram aligner not found or not \"\r\n \"executable. This may or may not be a problem \"\r\n \"depending on which components of QIIME you plan to \"\r\n \"use.\")",
"def hbnb():\n return 'HBNB'",
"def detect(cls):\n return False",
"def test_config_no_biopython(monkeypatch):\n monkeypatch.setattr(core, 'HAVE_BIOPYTHON', False)\n assert core.HAVE_BIOPYTHON is False\n args = Namespace(extended_validation='all')\n with pytest.raises(ValueError):\n core.Config.from_args(args)",
"def is_mobu():\n\n return 'pyfbsdk' in main.__dict__",
"def HBNB():\n return 'HBNB'",
"def test_ann_features():\n CQT(file_struct, FeatureTypes.ann_beatsync, sr=11025).features",
"def test_blast_supported_version(self):\r\n acceptable_version = (2, 2, 22)\r\n self.assertTrue(which('blastall'),\r\n \"blast not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = 'blastall | grep blastall'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip().split(' ')[1].strip()\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version == acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported blast version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))",
"def test_adaptability():\n assert chap2.adaptability()",
"def test_humann_fastq_biom_output(self):\n \n # create a temp directory for output\n tempdir = utils.create_temp_folder(\"fastq\")\n \n # run humann test\n command = [\"humann\",\"--input\",cfg.demo_fastq,\"--output\",tempdir,\n \"--output-format\", \"biom\"]\n utils.run_humann(command)\n \n # check the output files are as expected\n for expression, message in utils.check_output(cfg.expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression,message)\n\n # remove the temp directory\n utils.remove_temp_folder(tempdir)",
"def test_mbd():\n example.control.mbd_comparison('mbd_comparison.pdf')\n assert path.isfile('mbd_comparison.pdf')",
"def testPBSIdentify():\n # Test output from TORQUE\n versionString = \"version: 2.4.16\"\n actual = pbsIdentify(versionString)\n assert actual.arrayFlag == \"-t\"\n assert actual.arrayIDVariable == \"PBS_ARRAYID\"\n assert actual.qdelForceFlags == [\"-W\", \"0\"]\n assert actual.flavour == \"TORQUE\"\n\n versionString = \"pbs_version = PBSPro_11.1.0.111761\"\n actual = pbsIdentify(versionString)\n assert actual.arrayFlag == \"-J\"\n assert actual.arrayIDVariable == \"PBS_ARRAY_INDEX\"\n assert actual.qdelForceFlags == [\"-Wforce\"]\n assert actual.flavour == \"PBSPro\"",
"def test_bay_bridge(self):\n # import the experiment variable from the example\n exp = bay_bridge_example(render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)\n\n # import the experiment variable from the example with inflows\n exp = bay_bridge_example(render=False, use_inflows=True)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)\n\n # import the experiment variable from the example with traffic lights\n exp = bay_bridge_example(render=False, use_traffic_lights=True)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)",
"def test_clang(self):\n self.assertEqual(\n self.ndk.clang,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/clang\",\n )",
"def test_active_inference_SPM_1b(self):",
"def test_python_bool(self):\n\n m = Mothur(**self.init_vars)\n self.set_current_dirs(m)\n m.pcr.seqs(fasta='test_fasta_1.fasta', start=20, keepdots=False)\n m.pcr.seqs(fasta='test_fasta_1.fasta', start=20, keepdots=True)\n\n return",
"def mba_supported():\n return common.MBA_CAP in SYSTEM_CAPS",
"def test_flowgramAli_bin(self):\r\n ali_exe = get_flowgram_ali_exe()\r\n\r\n self.assertTrue(which(ali_exe) is not None, \"The alignment program %s \"\r\n \"is not accessible via the PATH environment variable.\"\r\n % ali_exe)\r\n\r\n # test if its callable and actually works\r\n command = \"%s -h\" % ali_exe\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n\r\n if (proc.wait() != 0):\r\n self.fail(\"Calling %s failed. Check permissions and that it is in fact an executable.\"\r\n % ali_exe)\r\n\r\n result = proc.stdout.read()\r\n # check that the help string looks correct\r\n self.assertTrue(result.startswith(\"Usage\"))",
"def bioinfo():\n\n pass",
"def test_generate_barcode_upca(self):\n pass",
"def is_bias(self):\n if self.is_power_onoff():\n return False\n btest = re.compile('\\Wbias\\W|^bias\\W|^bias$|\\Wbias$', re.IGNORECASE)\n return btest.search(self['target']) != None"
]
| [
"0.60462487",
"0.5662381",
"0.55574375",
"0.55203646",
"0.5468003",
"0.54534155",
"0.53503704",
"0.53503704",
"0.5321535",
"0.52881396",
"0.52752477",
"0.52368546",
"0.52000886",
"0.5146673",
"0.5136819",
"0.51240057",
"0.509844",
"0.50787854",
"0.50721985",
"0.50653446",
"0.5054922",
"0.5007176",
"0.4984226",
"0.49829438",
"0.49675837",
"0.49508962",
"0.49494484",
"0.49460527",
"0.49405316",
"0.49388796"
]
| 0.5940255 | 1 |
Test appending multiple downloads into a single file. | def test_download_to_file_append(req, tmpdir):
req.get(ENTREZ_URL, text='This works.\n')
outdir = tmpdir.mkdir('outdir')
filename = outdir.join('foo.txt')
expected = outdir.join('foo.txt')
config = core.Config(molecule='nucleotide', verbose=False, out='foo.txt')
core.download_to_file('FOO', config, filename=str(filename), append=False)
core.download_to_file('BAR', config, filename=str(filename), append=True)
core.download_to_file('BAZ', config, filename=str(filename), append=True)
assert expected.check()
assert len(expected.readlines()) == 3 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_download1(self):\n pass",
"def test_download2(self):\n pass",
"def download_files(self):",
"def download(all):\n print(\"Downloading\")",
"def test_download(self):\n pass",
"def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1",
"def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')",
"def test_download(self, file_list, _):\n url = reverse(\"report_download\")\n response = self.client.get(url)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)\n\n url_w_params = url + \"?provider_uuid=1&bill_date=2021-04-01\"\n response = self.client.get(url_w_params)\n body = response.json()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Download Request Task ID\", body)",
"def test_downloading_mocked_mp3_files(\n requests_mock: rm_Mocker,\n mp3_file1_mock: bytes,\n mp3_file2_mock: bytes,\n tmp_path: Path,\n lep_dl: LepDL,\n) -> None:\n test_downloads: LepFileList = LepFileList()\n file_1 = LepFile(\n filename=\"Test File #1.mp3\",\n primary_url=\"https://traffic.libsyn.com/secure/teacherluke/733._A_Summer_Ramble.mp3\", # noqa: E501,B950\n )\n file_2 = LepFile(\n filename=\"Test File #2.mp3\",\n primary_url=\"https://audioboom.com/posts/5678762-episode-169-luke-back-on-zep-part-4.mp3\", # noqa: E501,B950\n )\n test_downloads.append(file_1)\n test_downloads.append(file_2)\n\n requests_mock.get(\n \"https://traffic.libsyn.com/secure/teacherluke/733._A_Summer_Ramble.mp3\", # noqa: E501,B950\n content=mp3_file1_mock,\n )\n requests_mock.get(\n \"https://audioboom.com/posts/5678762-episode-169-luke-back-on-zep-part-4.mp3\", # noqa: E501,B950\n content=mp3_file2_mock,\n )\n\n lep_dl.non_existed = test_downloads\n lep_dl.download_files(tmp_path)\n expected_file_1 = tmp_path / \"Test File #1.mp3\"\n expected_file_2 = tmp_path / \"Test File #2.mp3\"\n assert expected_file_1.exists()\n assert 21460 < expected_file_1.stat().st_size < 22000\n assert expected_file_2.exists()\n assert 18300 < expected_file_2.stat().st_size < 18350\n assert len(lep_dl.downloaded) == 2",
"def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames",
"def start_downloads():\n todownload = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='retrying' \" \\\n \"ORDER BY created_at ASC\")\n todownload += jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='new' \" \\\n \"ORDER BY created_at ASC\")\n\n for file in todownload:\n if can_download():\n dlm_cout.outs(\"Initiating download of %s\" % \\\n os.path.split(file['filename'])[-1])\n\n # Update file status and insert entry into download_attempts\n queries = []\n queries.append(\"UPDATE files \" \\\n \"SET status='downloading', \" \\\n \"details='Initiated download', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n jobtracker.query(queries)\n queries = []\n queries.append(\"INSERT INTO download_attempts (\" \\\n \"status, \" \\\n \"details, \" \\\n \"updated_at, \" \\\n \"created_at, \" \\\n \"file_id) \" \\\n \"VALUES ('%s', '%s', '%s', '%s', %d)\" % \\\n ('downloading', 'Initiated download', jobtracker.nowstr(), \\\n jobtracker.nowstr(), file['id']))\n insert_id = jobtracker.query(queries, fetchone=True)\n attempt = jobtracker.query(\"SELECT * FROM download_attempts \" \\\n \"WHERE id=%d\" % insert_id, fetchone=True)\n \n # download(attempt)\n DownloadThread(attempt).start()\n else:\n break",
"def test_try_auxiliary_download_links(\n requests_mock: rm_Mocker,\n mp3_file1_mock: bytes,\n tmp_path: Path,\n lep_dl: LepDL,\n) -> None:\n test_downloads: LepFileList = LepFileList()\n file_1 = LepFile(\n filename=\"Test File #1.mp3\",\n primary_url=\"https://traffic.libsyn.com/secure/teacherluke/733._A_Summer_Ramble.mp3\", # noqa: E501,B950\n secondary_url=\"https://hotenov.com/d/lep/some_auxiliary_1.mp3\",\n tertiary_url=\"https://hotenov.com/d/lep/some_auxiliary_2.mp3\",\n )\n test_downloads.append(file_1)\n\n requests_mock.get(\n \"https://traffic.libsyn.com/secure/teacherluke/733._A_Summer_Ramble.mp3\", # noqa: E501,B950\n text=\"Response not OK\",\n status_code=404,\n )\n requests_mock.get(\n \"https://hotenov.com/d/lep/some_auxiliary_1.mp3\",\n text=\"Response not OK\",\n status_code=404,\n )\n requests_mock.get(\n \"https://hotenov.com/d/lep/some_auxiliary_2.mp3\",\n content=mp3_file1_mock,\n )\n\n lep_dl.files = test_downloads\n lep_dl.detach_existed_files(tmp_path)\n lep_dl.download_files(tmp_path)\n expected_file_1 = tmp_path / \"Test File #1.mp3\"\n assert expected_file_1.exists()\n assert len(list(tmp_path.iterdir())) == 1\n assert len(lep_dl.downloaded) == 1",
"def download_files(file_uris):\n\n if os.path.exists(LOG_FILE):\n log_file = open(LOG_FILE, \"rU+\")\n downloaded_podcasts = strip_newlines(log_file)\n else:\n log_file = open(LOG_FILE,\"w\")\n downloaded_podcasts = []\n\n for uri in file_uris:\n # if the current file URI is not found in the log, it is a new file, and\n # is thus downloaded\n if uri not in downloaded_podcasts:\n # extract filename from the URI \n uri_split = re.split(\"/\", uri)\n filename = uri_split[len(uri_split) - 1]\n \n # download the file\n if OUTPUT:\n print \"downloading \" + uri\n urllib.urlretrieve(uri, DEST_DIR + os.sep + filename)\n log_file.write(uri + os.linesep)\n\n log_file.close()",
"def download(urls, dest_folder):\n pass",
"def test_scraper(self):\n\n for entry in tests:\n command = ['./mozdownload/scraper.py',\n '--base_url=%s' % self.wdir,\n '--destination=%s' % self.temp_dir]\n p = processhandler.ProcessHandler(command + entry['options'])\n p.run()\n p.wait()\n dir_content = os.listdir(self.temp_dir)\n self.assertTrue(entry['fname'] in dir_content)\n\n mozfile.remove(os.path.join(self.temp_dir, entry['fname']))",
"def test_download_simfile(self):\n scrape_category.download_simfile(self.simfile, self.dest,\n tidy=False,\n use_logfile=True,\n extract=True,\n link=self.link)\n\n # There should now be three files - a download log, a zip, and\n # an unzipped simfile.\n self.check_saved_files(log=True, unzipped=True, zipped=True)\n\n records = {\"100\": self.simfile}\n updated_records = scrape_category.update_records_from_log(records, self.dest)\n assert len(updated_records) == 1\n assert \"100\" in updated_records\n # The records should be updated to reflect where the simfile\n # was actually saved\n assert updated_records[\"100\"].name == \"foo\"",
"def downloadFiles (downloadFileParser, outputFolder):\n counter = 0\n errorCounter = 0 \n url = downloadFileParser.getNextUrl()\n while (url != None):\n outputPath = FileDownloader.getOutputPath(url, outputFolder)\n LogfileIO.writeLogfile(downloadFileParser._filePath, counter, outputFolder, outputPath)\n downloadSuccessful = FileDownloader.downloadFile(url, outputPath)\n if (downloadSuccessful == False):\n DownloadErrors.writeLink(url)\n errorCounter += 1\n counter += 1\n url = downloadFileParser.getNextUrl()\n \n print str(counter-errorCounter) + \" file(s) downloaded\"\n print \"Failed to download \" + str(errorCounter) + \" file(s) - see erroneousLinks.txt\" \n return",
"async def download(urls, num_workers, show_only_success, outputfile, only_urls):\n outputfiledata = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:\n loop = asyncio.get_event_loop()\n futures = []\n response = []\n for url in urls:\n futures.append(loop.run_in_executor(executor, make_request, url))\n\n for response in await asyncio.gather(*futures):\n outputfiledata.append(response)\n if not response['status'] == -1:\n if show_only_success:\n if response['status'] < 400 or response['status'] >= 500:\n if only_urls:\n print(response['url'])\n else:\n print(\n '{:70.70} {}'.format(\n response['url'],\n response['status']))\n else:\n if only_urls:\n print(response['url'])\n else:\n print(\n '{:70.70} {}'.format(\n response['url'],\n response['status']))\n if outputfile:\n for host in outputfiledata:\n if not host['status'] == -1:\n if only_urls:\n outputfile.write('{}\\n'.format(host['url']))\n else:\n outputfile.write(\n '{},{}\\n'.format(\n host['url'], host['status']))",
"def download_second_request(url=None, filename=None, **kwargs):\n with open(filename, 'w') as output:\n output.write('some successful second response XML')",
"def test_get_files_list(self):\n files = self.download.get_files_list()\n self.assertTrue(len(files) > 0)",
"def write_downloaded_links():\n global downloaded_links_fn\n text_file = open(downloaded_links_fn,\"w\")\n for link in downloaded_links.items():\n text_file.write(link[0] + \"\\n\")\n text_file.close()",
"def test_write_chunks(self, mock_open):\n request = mock.Mock(path=\"/tmp/foo\")\n\n chunks = [b\"foo\", b\"bar\", b\"baz\"]\n file_size = sum(len(d) for d in chunks)\n transfer_status = TransferStatus(file_size)\n\n completed_futures = []\n expected_seeks = []\n expected_writes = []\n expected_print_transfer_progresses = []\n\n byte_start = 0\n for chunk in chunks:\n future = mock.Mock(\n result=mock.Mock(return_value=(byte_start, mock.Mock(content=chunk)))\n )\n completed_futures.append(future)\n expected_seeks.append(mock.call(byte_start))\n expected_writes.append(mock.call(chunk))\n\n byte_start += len(chunk)\n expected_print_transfer_progresses.append(\n mock.call(\n byte_start,\n file_size,\n \"Downloading \",\n os.path.basename(request.path),\n dt=mock.ANY,\n )\n )\n\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n downloader._write_chunks(request, completed_futures, transfer_status)\n\n # with open (as a context manager)\n mock_write = mock_open.return_value.__enter__.return_value\n assert expected_seeks == mock_write.seek.call_args_list\n assert expected_writes == mock_write.write.call_args_list\n\n assert sum(len(c) for c in chunks) == transfer_status.transferred\n assert (\n expected_print_transfer_progresses\n == downloader._syn._print_transfer_progress.call_args_list\n )",
"def download_files(service, file_list, out_path):\n total = len(file_list)\n for i, file_id in enumerate(file_list, 1):\n name = get_file(service, file_id)['title']\n print('Downloading {}... ({}/{}) [{}%]'.format(name, i, total,\n round(i / total * 100)))\n path = os.path.join(out_path, name)\n try:\n download_file(service, file_id, path)\n except errors.HttpError as error:\n os.remove(path) # Remove broken file\n print('Could not download file: {}'.format(error), file=sys.stderr)",
"def test_write_file(self):\n test = Server()\n test.cur_dir = os.getcwd()\n inputs = [['write_file', 'test_file1.txt', 'Hello world'],\n ['write_file', 'test_file2.txt', 'Hello world'],\n ['write_file', 'test_file1.txt']]\n response = ['written successfully',\n 'file created and written successfully',\n 'contents erased successfully']\n res = []\n for val in inputs:\n res.append(test.write_file(val))\n self.assertListEqual(res, response)",
"def test_download_links():\n\n # dir to download data to\n out_dir = 'test/download_data'\n\n # remove out_dir if it already exists and make a new one\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.system('mkdir -p %s'%out_dir)\n\n # List of all available fits\n fit_names = surfinBH.fits_collection.keys()\n for name in fit_names:\n surfinBH.DownloadData(name=name, data_dir=out_dir)\n\n # allow for both naming formats surfinBH7dq2 and NRSur7dq4Remnant\n if 'surfinBH' in name:\n name_tag = name.split('surfinBH')[-1]\n else:\n name_tag = name.split('NRSur')[-1].split('Remnant')[0]\n\n # check that it has the right name\n assert(os.path.isfile('%s/fit_%s.h5'%(out_dir, name_tag)))\n # check that the fit_name matches with the name in the attributes\n # of h5 file.\n h5file = h5py.File('%s/fit_%s.h5'%(out_dir, name_tag), 'r')\n assert(name_tag == h5file.attrs['name'].decode('utf-8'))\n h5file.close()",
"def test_download_file(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n assets_calculated_sha = 'notasha'\n sha_dict = {}\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == os.path.basename(TEST_FILENAME):\n\n # the uploaded asset\n request = requests.get(check_asset.browser_download_url)\n open(TEST_DOWNLOAD, 'wb').write(request.content)\n\n # recalc hash of downloaded file\n assets_calculated_sha = Arguments.get_hash(TEST_DOWNLOAD)\n\n elif check_asset.name == sha_filename:\n\n # the sha hash file\n request = requests.get(check_asset.browser_download_url)\n sha_dict = request.json()\n\n assert assets_calculated_sha == sha_dict[os.path.basename(TEST_FILENAME)]",
"def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")",
"def test_download_write_item_asstes(self, mock_get, mock_save):\n # Arrange\n\n collection = {\n \"collection_name\": self.name,\n \"collection_slug\": slugify(self.name),\n \"collection_task_id\": \"123\",\n \"subcollection_name\": self.project,\n \"subcollection_slug\": slugify(self.project),\n }\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n\n # Act\n download_write_item_assets(self.name, self.project, self.item_id)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)",
"def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)",
"def test_send_second_file():\n\n # Generate the blocks for the test file which is not present on the server\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/debashis-rc-biswas-3U4gGsGNsMY-unsplash.jpg\")\n # Ask the server for the hash of the last block\n response = client.get(\"/latest_block_hash\")\n last_block_hash = response.json()[\"last_block_hash\"]\n blocks = generate_blocks(test_file, last_block_hash)\n # Collect all blocks into a single binary file using pickle\n blocks_pickled = pickle.dumps(blocks)\n # Send the collected blocks in a single transfer to the test server\n response = client.post(\"/send\",\n files={\"file\": blocks_pickled})\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"415d4f66e1b8b9083014dcdca5ddd7d1dcca3f5a4a120603169b951b1c5fa0c9\",\n \"index_all\": 1704}"
]
| [
"0.6777141",
"0.6730461",
"0.6622148",
"0.64797187",
"0.6425932",
"0.63330853",
"0.6318154",
"0.62658906",
"0.62465984",
"0.61807144",
"0.61734396",
"0.61005056",
"0.6097292",
"0.60680044",
"0.60422486",
"0.60179025",
"0.598955",
"0.59725004",
"0.5953696",
"0.5926606",
"0.5914362",
"0.5898206",
"0.58972466",
"0.587365",
"0.5862284",
"0.58580756",
"0.58523947",
"0.58390427",
"0.5829536",
"0.5819319"
]
| 0.7592539 | 0 |
Test extended validation before writing. | def test_validate_and_write_extended_validation(req):
handle = StringIO()
req.get('http://fake/', text=u'>foo\nMAGIC')
r = requests.get('http://fake/')
config = core.Config(extended_validation='loads', molecule='protein')
core._validate_and_write(r, handle, 'FAKE', config)
assert handle.getvalue() == u'>foo\nMAGIC' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_validate(self):\n pass",
"def __validate():\n # TODO: implement",
"def test_validators():",
"def test_validation(self):\n self.validationFails()",
"def validate():",
"def validate(self):\n ...",
"def validate(self):",
"def validate(self):",
"def test_field_rules():",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\n pass",
"def validate(self):\r\n return 1",
"def validate(self):\r\n return 1",
"def validate(self):\r\n return 1",
"def validate(self):\r\n return 1",
"def validate(self):\r\n return 1",
"def _validate(self):\n pass",
"def __validate__(self):",
"def validate(self):\n return 1",
"def __validate(self):\n pass",
"def test_extensions(self):\n field = TypedFileField(required=False, ext_whitelist=self.good_extensions)\n\n for ext in self.good_extensions:\n name = 'somefooname.%s' % ext\n file = UploadedFile(name=name, size=1)\n assert field.clean(file) is file\n\n for ext in self.bad_extensions:\n name = 'somefooname.%s' % ext\n file = UploadedFile(name=name, size=1)\n with pytest.raises(forms.ValidationError):\n field.clean(file)",
"def validate(self):\n raise NotImplementedError",
"def validate(self):\n raise NotImplementedError",
"def validate(ob):"
]
| [
"0.7042656",
"0.6733468",
"0.66923016",
"0.666434",
"0.6635853",
"0.66091865",
"0.65776545",
"0.65776545",
"0.6520776",
"0.64949775",
"0.64949775",
"0.64949775",
"0.64949775",
"0.64949775",
"0.64949775",
"0.64949775",
"0.64949775",
"0.6398088",
"0.6398088",
"0.6398088",
"0.6398088",
"0.6398088",
"0.63880503",
"0.6347585",
"0.633163",
"0.6317107",
"0.62971884",
"0.62795496",
"0.62795496",
"0.62159324"
]
| 0.74485034 | 0 |
Test getting a download stream handles exceptions. | def test_get_stream_exception(req):
req.get(ENTREZ_URL, exc=requests.exceptions.RequestException)
params = dict(id='FAKE')
with pytest.raises(DownloadError):
core.get_stream(ENTREZ_URL, params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_stream(self):\n pass",
"def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))",
"def test_file_download_fail(self):\n with mock.patch(\"JumpScale.j\") as j_mock:\n from JumpScale import j\n import JumpScale.tools.cuisine.CuisineCore\n JumpScale.tools.cuisine.CuisineCore.j = j\n from JumpScale.tools.cuisine.CuisineCore import CuisineCore\n from JumpScale.core.errorhandling import JSExceptions\n executor_mock = mock.MagicMock()\n j.tools.executor.getLocal.return_value = executor_mock\n executor = j.tools.executor.getLocal()\n cuisine = j.tools.cuisine.local\n cuisine_core = CuisineCore(executor, cuisine)\n url = 'http://hallo.com/downloadme.txt'\n to = '/tmp/path'\n cuisine_core.file_exists = mock.MagicMock()\n cuisine_core.file_exists.return_value = False\n cuisine_core.createDir = mock.MagicMock()\n cuisine_core.file_unlink = mock.MagicMock()\n cuisine_core.run = mock.MagicMock()\n cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]\n cuisine_core.touch = mock.MagicMock()\n j.exceptions.RuntimeError = JSExceptions.RuntimeError\n self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)",
"def test_download(self):\n pass",
"def test_download_view_invalid_url(self):\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n try:\n response = views.download(request, 'fake_id');\n except: \n error = True\n\n self.assertTrue(error)",
"def test_download_missing_file(self):\n key = \"badkey\"\n\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader.download_file(key)",
"def test_file_chunk_generator_with_bad_url(mock_get):\n url = 'www.example.com'\n mock_get.return_value.status_code = 404\n chunk = list(presentation.file_chunk_generator(url))\n assert chunk == []\n mock_get.assert_called_once_with(url, stream=True)",
"def test_download(self):\n\n # Test for all correct data\n self.assertEqual(\n download(TestSteelEye.url, self.xmlfilepath, \"sourcefile.xml\"),\n self.xmlfilepath + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect url\n self.assertEqual(\n download(\"http://example.com\", self.xmlfilepath, \"sourcefile.xml\"), \"\"\n )\n\n # Test for different download path\n self.assertEqual(\n download(\n TestSteelEye.url,\n os.path.join(os.getcwd(), \"anotherpath\"),\n \"sourcefile.xml\",\n ),\n os.path.join(os.getcwd(), \"anotherpath\") + os.sep + \"sourcefile.xml\",\n )\n\n # Test for incorrect download path\n self.assertEqual(download(TestSteelEye.url, \"E:\", \"sourcefile.xml\"), \"\")",
"def test_download_nonexistent(client: FlaskClient):\n response = util.download_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"",
"def test_download2(self):\n pass",
"def test_get_audio_stream_does_not_raise(self):\n youtube_url = \"https://www.youtube.com/watch?v=jIxas0a-KgM\"\n _ = utils.get_audio_stream(youtube_url)\n assert True # No error",
"def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())",
"def test_download_image(self, mock_get):\n\n # Test the good url first\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name: \n full_filename = self.retriever._download_image(dir_name, image_url)\n with open(full_filename, \"rb\") as read_back_file:\n chunk = read_back_file.read(100)\n\n self.assertEqual(bytes.fromhex(image_data), \\\n chunk, msg = \"For the 'good' image URL, the image data written must match the test data\")\n\n # Test the url that missing file name\n image_url = self.test_data[\"bad_image_url\"][\"url\"]\n image_data = self.test_data[\"bad_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name:\n with self.assertRaises(ValueError, msg = \"URLs without file name in them must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)\n\n # Test the rection to a HTTP error\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(status = 500, raise_for_status = HTTPError('Server-side error'))\n\n with tempfile.TemporaryDirectory() as dir_name: \n with self.assertRaises(Exception, msg = \"HTTP errors must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)",
"def test_download1(self):\n pass",
"def make_get_request(session, url, headers, stream=False):\n response = session.get(url, headers=headers, stream=stream, timeout=60)\n\n if response.status_code == 401:\n raise BaseSpaceDownloadError(f\"Authentication failed on URL {url}\")\n elif response.status_code == 404:\n raise BaseSpaceDownloadError(f\"BaseSpace file {url} not found\")\n elif response.status_code != 200:\n raise BaseSpaceDownloadError(f\"Failed to retrieve content from {url}\")\n\n return response",
"def test_download_artifact_as_stream(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.download_artifact(\n \"org_slug\", \"pipe_slug\", \"build_no\", 123, \"artifact\", as_stream=True\n )\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/jobs/123/artifacts/artifact/download/\"\n fake_client.get.assert_called_with(\n url, headers={\"Accept\": \"application/octet-stream\"}, as_stream=True\n )",
"def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):\n base_url = 'http://www.example.com/somepackage.tgz'\n contents = b'downloaded'\n download_hash = hashlib.new('sha1', contents)\n link = Link(base_url + '#sha1=' + download_hash.hexdigest())\n\n session = Mock()\n session.get = Mock()\n response = session.get.return_value = MockResponse(contents)\n response.headers = {'content-type': 'application/x-tar'}\n response.url = base_url\n\n download_dir = mkdtemp()\n try:\n downloaded_file = os.path.join(download_dir, 'somepackage.tgz')\n create_file(downloaded_file, 'some contents')\n\n unpack_http_url(\n link,\n 'location',\n download_dir=download_dir,\n session=session,\n hashes=Hashes({'sha1': [download_hash.hexdigest()]})\n )\n\n # despite existence of downloaded file with bad hash, downloaded again\n session.get.assert_called_once_with(\n 'http://www.example.com/somepackage.tgz',\n headers={\"Accept-Encoding\": \"identity\"},\n stream=True,\n )\n # cached file is replaced with newly downloaded file\n with open(downloaded_file) as fh:\n assert fh.read() == 'downloaded'\n\n finally:\n rmtree(download_dir)",
"def test_download_host(self):\n pass",
"def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)",
"def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_read_http_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.read(data_url)\n assert 'HTTP error: 300' in capsys.readouterr().out",
"def test_download(api):\n # upload file prior to download\n # with pytest.raises(APIConnectionError):\n uploaded_file = api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # remove the uploaded file from the os\n remove('tests/test_file.txt')\n\n # download and save the file\n api.download(tag='test_upload')\n\n # check that file was saved in a filesystem\n assert path.isfile('tests/test_file.txt')",
"def test_download_write_asset_item_error(self, mock_get):\n # Arrange\n mock_resp = MockResponse({}, 200, content=Exception(\"boom\"))\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, False)",
"def test_file_chunk_generator(mock_get):\n url = 'www.example.com'\n mock_data = ['This', 'is', 'to', 'test', 'streaming', 'data.']\n mock_get.return_value.status_code = 200\n mock_get.return_value.iter_content.return_value = mock_data\n chunk = list(presentation.file_chunk_generator(url))\n assert chunk == mock_data\n mock_get.assert_called_once_with(url, stream=True)",
"def test_url_missing_streams(self):\n with self.assertRaises(NotImplementedError):\n EventStreams()",
"def test_download_to_file_retry(req, tmpdir):\n req.get(ENTREZ_URL, response_list=[\n {\"text\": u'Whoa, slow down', \"status_code\": 429, \"headers\": {\"Retry-After\": \"0\"}},\n {\"text\": 'This works.'},\n ])\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()",
"def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called",
"def test_download(self):\n fake_contents = \"This is the file contents\"\n fake_key_name = 'fake_key_name'\n\n with patch('iceit.backends.TemporaryFile', spec=True) as mock_file:\n string_file = StringIO()\n string_file.write(fake_contents)\n mock_file.return_value = string_file\n\n with patch('iceit.backends.Key', spec=True) as mock_key:\n mock_key.return_value = mock_key\n backend = self.test_init_valid()\n result = backend.download(fake_key_name)\n\n assert result is string_file\n self.assertEqual(result.tell(), 0)\n self.assertEqual(result.read(), fake_contents)\n mock_key.assert_called_once_with(backend.bucket, fake_key_name)\n mock_key.get_contents_to_file.assert_called_once_with(string_file)",
"def test_download_syscall_error(caplog, error_no, result_status):\n caplog.set_level(logging.DEBUG, \"snowflake.connector\")\n mock_resource = MagicMock()\n mock_resource.download_file.side_effect = OpenSSL.SSL.SysCallError(error_no)\n client_meta = {\n \"cloud_client\": mock_resource,\n \"stage_info\": {\"location\": \"loc\"},\n }\n meta = {\n \"name\": \"f\",\n \"stage_location_type\": \"S3\",\n \"self\": SFResourceMeta(**client_meta),\n \"sha256_digest\": \"asd\",\n \"src_file_name\": \"f\",\n \"src_file_size\": 99,\n \"get_callback_output_stream\": None,\n \"show_progress_bar\": False,\n \"get_callback\": None,\n }\n meta = SnowflakeFileMeta(**meta)\n with mock.patch(\n \"snowflake.connector.s3_util_sdk.SnowflakeS3Util._get_s3_object\",\n return_value=mock_resource,\n ):\n SnowflakeS3Util._native_download_file(meta, \"f\", 4)\n assert meta.last_error is mock_resource.download_file.side_effect\n assert meta.result_status == result_status",
"def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")"
]
| [
"0.7340208",
"0.6803314",
"0.67575866",
"0.67487544",
"0.66739756",
"0.66731954",
"0.66725594",
"0.6651754",
"0.64714664",
"0.6464837",
"0.63992447",
"0.63616765",
"0.6331959",
"0.63270634",
"0.63261515",
"0.63195616",
"0.62991345",
"0.6268872",
"0.61475223",
"0.61062175",
"0.6089896",
"0.60820735",
"0.60813093",
"0.6052344",
"0.6038667",
"0.6033081",
"0.6023304",
"0.6012185",
"0.6008595",
"0.6004245"
]
| 0.76500154 | 0 |
Test getting a download stream handles bad status codes. | def test_get_stream_bad_status(req):
req.get(ENTREZ_URL, text=u'Nope!', status_code=404)
params = dict(id='FAKE')
with pytest.raises(InvalidIdError):
core.get_stream(ENTREZ_URL, params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_stream_exception(req):\n req.get(ENTREZ_URL, exc=requests.exceptions.RequestException)\n params = dict(id='FAKE')\n with pytest.raises(DownloadError):\n core.get_stream(ENTREZ_URL, params)",
"def test_file_chunk_generator_with_bad_url(mock_get):\n url = 'www.example.com'\n mock_get.return_value.status_code = 404\n chunk = list(presentation.file_chunk_generator(url))\n assert chunk == []\n mock_get.assert_called_once_with(url, stream=True)",
"def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_read_http_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.read(data_url)\n assert 'HTTP error: 300' in capsys.readouterr().out",
"def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''",
"def test_download_nonexistent(client: FlaskClient):\n response = util.download_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"",
"def test_request_failure(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test', status=500)\n r = CkanResource('http://somewhere.com/test', None, {'offset': None, 'limit': None})\n try:\n r._get_response(200, 20)\n assert_true(False, \"Expected exception StreamError\")\n except StreamError:\n pass",
"def test_failing(self):\n request = self.factory.get(\n '/path', HTTP_RANGE='bytes=17-20'\n )\n response = RangedFileResponse(\n request, io.BytesIO(b'sui2khiau2tsiang5'), content_type='audio/wav'\n )\n self.assertEqual(response.status_code, 416)",
"def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))",
"def test_http_get_metadata_non_200_status_code(self, mock_urllib2):\n mock_response = Mock(name=\"Always non-200 Status Code\")\n mock_response.getcode.return_value = 400\n mock_urllib2.return_value = mock_response\n with self.assertRaises(IOError) as exception:\n ef_utils.http_get_metadata(\"ami-id\")\n self.assertIn(\"Non-200 response\", str(exception.exception))",
"def test_get_stream(self):\n pass",
"def test_download_view_invalid_url(self):\n factory = RequestFactory()\n request = factory.get('/downloads');\n error = False\n try:\n response = views.download(request, 'fake_id');\n except: \n error = True\n\n self.assertTrue(error)",
"def test_404_url():\n def not_found(request):\n request.send_error(404)\n\n with test_server(handler=not_found, methods=(\"post\", \"get\"),\n port=\"random\") as server:\n stream = TweetStream(\"foo\", \"bar\", url=server.baseurl)\n assert_raises(ConnectionError, stream.next)\n\n stream = FollowStream(\"foo\", \"bar\", [1, 2, 3], url=server.baseurl)\n assert_raises(ConnectionError, stream.next)\n\n stream = TrackStream(\"foo\", \"bar\", [\"opera\"], url=server.baseurl)\n assert_raises(ConnectionError, stream.next)",
"def __check_status_code(cls, status_code):\n if status_code >= 400:\n raise IOError(\"error status_code: %d\" % status_code)",
"def test_404_url(self):\r\n url = 'http://lococast.net/archives/001'\r\n read = ReadUrl.parse(url)\r\n\r\n self.assertTrue(\r\n read.status == 404, \"The status is 404: \" + str(read.status))\r\n self.assertTrue(\r\n not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(\r\n read.content is None, \"Content should be none\")",
"def test_get_audio_stream_does_not_raise(self):\n youtube_url = \"https://www.youtube.com/watch?v=jIxas0a-KgM\"\n _ = utils.get_audio_stream(youtube_url)\n assert True # No error",
"def test_get_stream_too_many_requests(req):\n req.get(ENTREZ_URL, text=u'Whoa, slow down', status_code=429, headers={\"Retry-After\": \"2\"})\n params = dict(id='FAKE')\n with pytest.raises(TooManyRequests):\n core.get_stream(ENTREZ_URL, params)",
"def test_download_missing_file(self):\n key = \"badkey\"\n\n with self.assertRaises(AzureReportDownloaderError):\n self.downloader.download_file(key)",
"def test_status_code(self):\n self.assertEquals(self.response.status_code, 404)",
"def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)",
"def test_file_url_status_error(self):\n file_url = \"file_url\"\n status = FileManagementStatus(\n FileManagementStatusType.ERROR,\n FileManagementErrorType.MALFORMED_URL,\n )\n expected_topic = (\n self.factory.common_topic + WAPMF.FILE_URL_DOWNLOAD_STATUS\n )\n expected_payload = json.dumps(\n {\n \"fileUrl\": file_url,\n \"status\": status.status.value,\n \"error\": status.error.value,\n }\n )\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_file_url_status(\n file_url, status\n )\n\n self.assertEqual(expected_message, serialized_message)",
"def testStreamParseError(self):\n\n def _testStreamError(res):\n self.assertEqual(True, isinstance(res.value, httpb_client.HTTPBNetworkTerminated))\n self.assertEqual(res.value.body_tag.getAttribute('condition', None), 'remote-connection-failed')\n\n def _failStreamError(res):\n self.fail('Expected a remote-connection-failed error')\n\n def _testSessionCreate(res):\n self.sid = res[0]['sid']\n self.server_protocol.triggerInvalidXML()\n return self.send().addCallbacks(_failStreamError, _testStreamError)\n\n return self.proxy.connect(self.get_body_node(connect=True)).addCallback(_testSessionCreate)",
"def test_read_unexpected_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.read(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out",
"def test_get_fail(self):\n response = self.second_client.get(self.url)\n self.assertEquals(response.status_code, 400)",
"def test_download_syscall_error(caplog, error_no, result_status):\n caplog.set_level(logging.DEBUG, \"snowflake.connector\")\n mock_resource = MagicMock()\n mock_resource.download_file.side_effect = OpenSSL.SSL.SysCallError(error_no)\n client_meta = {\n \"cloud_client\": mock_resource,\n \"stage_info\": {\"location\": \"loc\"},\n }\n meta = {\n \"name\": \"f\",\n \"stage_location_type\": \"S3\",\n \"self\": SFResourceMeta(**client_meta),\n \"sha256_digest\": \"asd\",\n \"src_file_name\": \"f\",\n \"src_file_size\": 99,\n \"get_callback_output_stream\": None,\n \"show_progress_bar\": False,\n \"get_callback\": None,\n }\n meta = SnowflakeFileMeta(**meta)\n with mock.patch(\n \"snowflake.connector.s3_util_sdk.SnowflakeS3Util._get_s3_object\",\n return_value=mock_resource,\n ):\n SnowflakeS3Util._native_download_file(meta, \"f\", 4)\n assert meta.last_error is mock_resource.download_file.side_effect\n assert meta.result_status == result_status",
"def test_download_image(self, mock_get):\n\n # Test the good url first\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name: \n full_filename = self.retriever._download_image(dir_name, image_url)\n with open(full_filename, \"rb\") as read_back_file:\n chunk = read_back_file.read(100)\n\n self.assertEqual(bytes.fromhex(image_data), \\\n chunk, msg = \"For the 'good' image URL, the image data written must match the test data\")\n\n # Test the url that missing file name\n image_url = self.test_data[\"bad_image_url\"][\"url\"]\n image_data = self.test_data[\"bad_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name:\n with self.assertRaises(ValueError, msg = \"URLs without file name in them must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)\n\n # Test the rection to a HTTP error\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(status = 500, raise_for_status = HTTPError('Server-side error'))\n\n with tempfile.TemporaryDirectory() as dir_name: \n with self.assertRaises(Exception, msg = \"HTTP errors must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)",
"def test_POST_fetcher_fail():\n bad_url = POST_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_nonexistant_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/nofilewiththisnameright.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 404)\n self.assertEqual(response.body, \"404 \" + webhttp.consts.REASON_DICT[404])",
"def test_non_existing_progress(self):\n code, res = get_progress(data={PROGRESS_ID: 42})\n self.assertEqual(code, 204)\n self.assertEqual(res, {})",
"def test_fetch_url_not_ok():\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.bad_request\n with assert_raises(NotFoundError):\n fetch_url(\"http://example.com\", TIMEOUT, getLogger())"
]
| [
"0.71951884",
"0.68888",
"0.67313933",
"0.66616815",
"0.6567206",
"0.6535835",
"0.64916086",
"0.64071435",
"0.635155",
"0.6336012",
"0.6319616",
"0.6313055",
"0.6244992",
"0.6199207",
"0.61868465",
"0.61866546",
"0.6169734",
"0.6151099",
"0.61127555",
"0.6112462",
"0.61009485",
"0.60770434",
"0.6075308",
"0.6036433",
"0.6036243",
"0.6013448",
"0.60112464",
"0.6003928",
"0.597028",
"0.5946434"
]
| 0.7255718 | 0 |
Test URL generation for API key | def test_generate_url_with_api_key():
config = core.Config(api_key='FAKE')
expected = "{}?{}".format(ENTREZ_URL, "retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&rettype=gbwithparts")
assert expected == core.generate_url("FAKE", config)
config.format = 'gff3'
expected = "{}?{}".format(SVIEWER_URL, "retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&report=gff3")
assert expected == core.generate_url("FAKE", config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_api_key(self):\n pass",
"def test_api_key(self):\n self.assertEqual(self.route4me.key, '11111111111111111111111111111111')",
"def test_add_api_key_to_org(self):\n pass",
"def test_generate_api_key():\n\n key = auth.generate_api_key() # returns a NamedTuple with api_key and hashed_key\n hashed_api_key = sha256(key.api_key.encode('utf-8')).hexdigest()\n assert hashed_api_key == key.hashed_key",
"def get_url():\n key = _get_key()\n return key.generate_url(300)",
"async def test_dev_fetch_api_key(client):\n params = [('username', '[email protected]')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/v1/dev_fetch_api_key',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_validate_api_key(app, seed_data, key, result):\n user_id, api_key = seed_data\n if key == 'use-valid-key':\n key = api_key\n with app.app_context():\n assert auth.validate_api_key(user_id, key) == result",
"async def test_fetch_api_key(client):\n params = [('username', '[email protected]'),\n ('password', 'abcd1234')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/v1/fetch_api_key',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_client_build_url():\n eq_(\"{0}/{1}\".format(client.BASE_URL, \"v1/charges/\"), client.build_url(\"v1/charges/\"))",
"def test_get_test_organization_api_key(self):\n pass",
"def test_get_user_api_keys(self):\n pass",
"def test_add_with_key(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'}, follow=True)\n self.assertShortURLCreated(response, 'example')",
"def get_api_key(api_key):\n api.get(api_key)",
"def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url",
"def test_aws_service_api_keypair_generate_post(self):\n pass",
"def test_get_cloud_organization_api_key(self):\n pass",
"def api_url(url_base):\n return f\"{url_base}/api/v2\"",
"def test_url_construction(self):\n\n a = api.InvenTreeAPI(\"http://localhost:1234\", connect=False)\n\n tests = {\n 'part': 'http://localhost:1234/api/part/',\n '/part': 'http://localhost:1234/api/part/',\n '/part/': 'http://localhost:1234/api/part/',\n 'order/so/shipment': 'http://localhost:1234/api/order/so/shipment/',\n }\n\n for endpoint, url in tests.items():\n self.assertEqual(a.constructApiUrl(endpoint), url)",
"def test_delete_api_key(self):\n pass",
"def test_api_url(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer?id=471281504&pid=uid3600-33034440-48'\n assert extract_product_id_from_api_url(url) == '471281504'",
"def make_url(api_key, url, args=None):\n if args is None:\n args = []\n argsep = '&'\n if '?' not in url:\n argsep = '?'\n if '?apiKey=' not in url and '&apiKey=' not in url:\n args.insert(0, ('apiKey', api_key))\n return url + argsep + '&'.join(['='.join(t) for t in args])",
"def key_id(cls, url: str):\r\n ...",
"def api_url(self, url_key):\n dic = self.api_endpoints()\n return dic.get(url_key)",
"def a_valid_api_key(configuration):\n configuration.api_key[\"apiKeyAuth\"] = os.getenv(\"DD_TEST_CLIENT_API_KEY\", \"fake\")",
"def test_get_datafile_url__sdk_key_and_template_provided(self, _):\n test_sdk_key = 'optly_key'\n test_url_template = 'www.optimizelydatafiles.com/{sdk_key}.json'\n expected_url = test_url_template.format(sdk_key=test_sdk_key)\n self.assertEqual(\n expected_url, config_manager.PollingConfigManager.get_datafile_url(test_sdk_key, None, test_url_template),\n )",
"def test_set_api_key(self):\n\n api_key = 'abc'\n project_id = '123'\n\n kaput.init(api_key, project_id)\n\n self.assertEqual(api_key, kaput._API_KEY)\n self.assertEqual(project_id, kaput._PROJECT_ID)\n self.assertFalse(kaput._DEBUG)\n self.assertEqual(kaput._handle_exception, sys.excepthook)",
"def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")",
"def generate_api_key(key_length: int = settings.api_app_auth_key_length) -> str:\n return secrets.token_urlsafe(64)[:key_length]",
"def test_api_key (key):\n\tdb = getattr(g,'db', None)\n\n\tif isinstance(key, unicode):\n\t\tkey = key.encode('utf-8')\n\n\tqry = \"SELECT apikey FROM api_keys WHERE apikey=%s;\"\n\twith db as cur:\n\t\treturn 0 < cur.execute(qry, (key,))",
"def test_get_datafile_url__sdk_key_and_url_and_template_provided(self, _):\n test_sdk_key = 'optly_key'\n test_url_template = 'www.optimizelydatafiles.com/{sdk_key}.json'\n test_url = 'www.myoptimizelydatafiles.com/my_key.json'\n\n # Assert that if url is provided, it is always returned\n self.assertEqual(\n test_url, config_manager.PollingConfigManager.get_datafile_url(test_sdk_key, test_url, test_url_template),\n )"
]
| [
"0.77718914",
"0.7511353",
"0.7062115",
"0.70539737",
"0.7016188",
"0.7004324",
"0.69317126",
"0.69125515",
"0.6899556",
"0.6894859",
"0.68166417",
"0.6765619",
"0.6725878",
"0.6664452",
"0.6595064",
"0.6595008",
"0.658102",
"0.6571271",
"0.6566767",
"0.6563885",
"0.65369767",
"0.6524253",
"0.6502395",
"0.64437723",
"0.64377177",
"0.6436086",
"0.64286786",
"0.6426092",
"0.6398573",
"0.6396235"
]
| 0.79853916 | 0 |
Test that the small tree contains a root value. | def test_root_value(small_tree):
assert small_tree.root.value == 3 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_small_tree_has_root_value(small_tree):\n assert small_tree.root.right.value == 11",
"def test_root_large_tree_value(large_tree):\n assert large_tree.root.value == 11",
"def test_small_tree_has_no_root(small_tree):\n assert small_tree.root.left is None",
"def test_val_root_filled_tree(depth_one_tree):\n assert depth_one_tree.root.val is 0",
"def test_empty_tree(height_one_tree):\n assert height_one_tree.root.val is 0",
"def test_bst_root(bst_ten_values_random):\n assert bst_ten_values_random.root.val == 5",
"def test_tree_with_one_node_root_exists(one_t):\n assert one_t.root",
"def test_tree_with_one_node_has_correct_value(one_t):\n assert one_t.root.value == 10",
"def test_tree_initiates_with_empty_root(empty_t):\n assert not empty_t.root",
"def test_small_tree_has_right_child_child(small_tree):\n assert small_tree.root.right.right.value == 27",
"def test_bst_empty_root(bst_empty):\n assert bst_empty.root == None",
"def test_empty_tree_contains(empty_t):\n empty_t.contains(1) == \"An empty tree has no values.\"",
"def test_root():\n gnroot = 's51001'\n root = germanet_data.get_synset_by_id(gnroot)\n np.testing.assert_equal(root.is_root(), True)\n np.testing.assert_equal(root.is_leaf(), False)",
"def test_bst_single_node():\n assert BST(1).root is None",
"def test_value_in_tree_returns_true(balanced_7_nodes):\n assert balanced_7_nodes.contains(7)",
"def test_contains_returns_false_on_empty_tree(bst_empty):\n assert bst_empty.contains(4) is False",
"def is_root(self, p):\n return self.root() == 0",
"def test_tree_with_one_node_root_no_children(one_t):\n assert not one_t.root.left\n assert not one_t.root.right",
"def test_contains_returns_true_on_tree_with_value_left(bst_all_to_left):\n assert bst_all_to_left.contains(3) is True\n assert bst_all_to_left.contains(1) is True\n assert bst_all_to_left.contains(2) is True",
"def test_tree_balace_root():\n result = balance.generate_tree('test_input.txt')\n expected = 'tknk'\n\n assert expected == result['root'][0]",
"def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right",
"def test_init_root():\n from bst import BST\n bst = BST()\n # import pdb; pdb.set_trace()\n assert bst.root is None",
"def is_root(self, p):\n return self.root() == p",
"def test_size_is_correct_on_empty_tree(bst_empty):\n assert bst_empty.size() == 0",
"def test_empty_tree_size_is_zero(empty_t):\n assert empty_t.size() == 0",
"def is_root(self, u) -> bool:\n return (\n self.num_samples(u) >= self.root_threshold and self.parent(u) == tskit.NULL\n )",
"def test_minimal_tree_creation():\n t = Tree(None)\n\n assert t.data is None\n assert t.parent is None\n assert len(t) == 0",
"def test_empty_tree_depth(empty_t):\n assert empty_t.depth() == \"Empty tree has no depth.\"",
"def test_size_is_correct_on_filled_tree(bst_100_rand):\n assert bst_100_rand.size() == 100",
"def test_depth_empty():\n bst = BST()\n assert bst.depth() == 0"
]
| [
"0.90258086",
"0.8421181",
"0.81683797",
"0.80954325",
"0.79102343",
"0.78552735",
"0.784492",
"0.7693691",
"0.7687923",
"0.75692046",
"0.75289965",
"0.73862946",
"0.7377434",
"0.72751",
"0.72711587",
"0.70437205",
"0.6977386",
"0.69767857",
"0.69623435",
"0.69555503",
"0.695512",
"0.6910889",
"0.68317753",
"0.679384",
"0.6788121",
"0.66928756",
"0.66859704",
"0.6672994",
"0.66695386",
"0.6665476"
]
| 0.88009286 | 1 |
Test that there is no left child of the small tree. | def test_small_tree_has_no_root(small_tree):
assert small_tree.root.left is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tree_with_one_node_root_no_children(one_t):\n assert not one_t.root.left\n assert not one_t.root.right",
"def has_left(self):\n return self.left != None",
"def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right",
"def _is_left(self):\n if self.parent is None:\n return None\n else:\n return self is self.parent.left",
"def is_left_child(self):\n if self.parent == None:\n return False\n\n return self.parent.left == self",
"def has_left(self):\n return self.__left != None",
"def test_delete_left_tree_single_child(bst_all_to_left):\n bst_all_to_left.delete(4)\n assert bst_all_to_left.search(3).val == 3\n assert bst_all_to_left.search(4) is None",
"def _has_left(self, index):\r\n return self._left(index) < len(self)",
"def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...",
"def test_empty_tree(height_one_tree):\n assert height_one_tree.root.val is 0",
"def _has_left(self, j):\n return self._left(j) < len(self._data)",
"def has_left_child(self, index):\n return self.get_left_child_index(index) < len(self.heap)",
"def test_tree_initiates_with_empty_root(empty_t):\n assert not empty_t.root",
"def has_left(self):\n return self.l is not None",
"def isLeft(self):\n return self.left",
"def has_left(self, position):\n return self.left_child(position) is not None",
"def test_depth_empty():\n bst = BST()\n assert bst.depth() == 0",
"def is_left_child(self):\n is_left_child = False\n parent = self.get_parent()\n if parent is not None:\n is_left_child = parent.get_left() == self\n\n return is_left_child",
"def remove_left(self):\n temp = self._leftchild\n self._leftchild.set_parent(None)\n self.set_leftchild(None)\n return temp",
"def leaf(self):\n if not self._leftchild and not self._rightchild:\n return True\n return False",
"def leaf(self):\n if not self.left and not self.right:\n return True\n return False",
"def test_right_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(15)\n assert empty_t.root.right\n assert not empty_t.root.left",
"def test_search_returns_none_when_value_notin_tree_left(bst_all_to_left):\n assert bst_all_to_left.search(0) is None",
"def test_minimal_tree_creation():\n t = Tree(None)\n\n assert t.data is None\n assert t.parent is None\n assert len(t) == 0",
"def test_balance_left_tree(bst_all_to_left):\n assert bst_all_to_left.balance() == -1",
"def test_small_tree_has_right_child_child(small_tree):\n assert small_tree.root.right.right.value == 27",
"def test_lacking_parent(self):\n pass",
"def is_leaf(self):\n return not self.children.exists()",
"def _has_left(self, j):\n return (2 * j + 1) < len(self)",
"def test_contains_returns_false_on_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.contains(25) is False"
]
| [
"0.7628269",
"0.7396116",
"0.73536694",
"0.73440623",
"0.7323851",
"0.7244781",
"0.7193488",
"0.70159173",
"0.69852084",
"0.6982524",
"0.6960716",
"0.6857339",
"0.68238616",
"0.67927694",
"0.679249",
"0.67556167",
"0.6754398",
"0.67338854",
"0.67301023",
"0.672847",
"0.67097545",
"0.6702047",
"0.66935027",
"0.6629092",
"0.662082",
"0.66101736",
"0.66077006",
"0.65990824",
"0.6593273",
"0.6587321"
]
| 0.82072014 | 0 |
Test that there is a value of the right child in the small tree. | def test_small_tree_has_root_value(small_tree):
assert small_tree.root.right.value == 11 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_small_tree_has_right_child_child(small_tree):\n assert small_tree.root.right.right.value == 27",
"def test_tree_with_one_node_has_correct_value(one_t):\n assert one_t.root.value == 10",
"def test_root_value(small_tree):\n assert small_tree.root.value == 3",
"def test_val_root_filled_tree(depth_one_tree):\n assert depth_one_tree.root.val is 0",
"def test_root_large_tree_value(large_tree):\n assert large_tree.root.value == 11",
"def test_right_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(15)\n assert empty_t.root.right\n assert not empty_t.root.left",
"def test_has_correct_value(self):\n self.assertEqual(self.node.value, 7)",
"def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...",
"def test_contains_returns_true_on_tree_with_value_right(bst_right_balance):\n assert bst_right_balance.contains(6) is True\n assert bst_right_balance.contains(2) is True",
"def test_contains_returns_true_on_tree_with_value_left(bst_all_to_left):\n assert bst_all_to_left.contains(3) is True\n assert bst_all_to_left.contains(1) is True\n assert bst_all_to_left.contains(2) is True",
"def is_internal(self):\n # TODO: Check if either left child or right child has a value\n return ... or ...",
"def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right",
"def test_empty_tree(height_one_tree):\n assert height_one_tree.root.val is 0",
"def test_value_in_tree_returns_true(balanced_7_nodes):\n assert balanced_7_nodes.contains(7)",
"def is_right_child(self):\n if self.parent == None:\n return False\n\n return self.parent.right == self",
"def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1",
"def test_bst_root(bst_ten_values_random):\n assert bst_ten_values_random.root.val == 5",
"def test_small_tree_has_no_root(small_tree):\n assert small_tree.root.left is None",
"def test_depth_returns_correct_value_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.depth() == 3",
"def right_left_most_has_right_child():\n from bbst import Bst\n return Bst([1, 5, 3, 10, 8, 6, 20, 7])",
"def has_right_child(self, index):\n return self.get_right_child_index(index) < len(self.heap)",
"def test_balance_right_tree(bst_right_balance):\n assert bst_right_balance.balance() == 0",
"def test_depth_returns_correct_value_right_balanced_tree(bst_right_balance):\n assert bst_right_balance.depth() == 3",
"def has_right(self):\n return self.right != None",
"def has_right(self, position):\n return self.right_child(position) is not None",
"def getValue(self):\n r = 1 if self.left.getValue() != self.right.getValue() else 0\n return r",
"def test_contains_returns_false_on_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.contains(25) is False",
"def test_contains_returns_false_on_right_balanced_tree(bst_right_balance):\n assert bst_right_balance.contains(25) is False",
"def contains(self,value,parent= None):\n if value == self.node.value: \n return True\n if (value < self.node.value):\n if (self.node.left):\n return self.node.left.contains(value, self.node)\n else: \n return False\n else:\n if (self.node.right):\n return self.node.right.contains(value, self.node)\n else:\n return False",
"def _has_right(self, j):\n return self._right(j) < len(self._data)"
]
| [
"0.8579176",
"0.69598657",
"0.69145495",
"0.6835501",
"0.6834478",
"0.67782515",
"0.66623455",
"0.66231143",
"0.66171",
"0.65439016",
"0.65047574",
"0.6443494",
"0.6417503",
"0.63988703",
"0.63982207",
"0.63054246",
"0.62891006",
"0.6267407",
"0.6237089",
"0.6206176",
"0.6206087",
"0.6156969",
"0.61484134",
"0.6138329",
"0.61149675",
"0.61024475",
"0.6093481",
"0.60835856",
"0.6046669",
"0.60036993"
]
| 0.7728338 | 1 |
Test that there is a right child of the small tree. | def test_small_tree_has_right_child_child(small_tree):
assert small_tree.root.right.right.value == 27 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_right_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(15)\n assert empty_t.root.right\n assert not empty_t.root.left",
"def is_right_child(self):\n if self.parent == None:\n return False\n\n return self.parent.right == self",
"def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right",
"def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1",
"def is_right_child(self):\n is_right_child = False\n parent = self.get_parent()\n if parent is not None:\n is_right_child = parent.get_right() == self\n\n return is_right_child",
"def has_right(self, position):\n return self.right_child(position) is not None",
"def test_small_tree_has_root_value(small_tree):\n assert small_tree.root.right.value == 11",
"def right_left_most_has_right_child():\n from bbst import Bst\n return Bst([1, 5, 3, 10, 8, 6, 20, 7])",
"def has_right(self):\n return self.right != None",
"def get_rightchild(self):\n return self._rightchild",
"def has_right_child(self, index):\n return self.get_right_child_index(index) < len(self.heap)",
"def set_rightchild(self, newright):\n if newright is not None and not isinstance(newright, BNode):\n return False\n self._rightchild = newright",
"def has_right(self):\n return self.__right != None",
"def test_small_tree_has_no_root(small_tree):\n assert small_tree.root.left is None",
"def test_handle_root_deletion(right_left_most_has_right_child):\n right_left_most_has_right_child.delete(1)\n assert tuple(right_left_most_has_right_child.in_order()) == (\n 3, 5, 6, 7, 8, 10, 20\n )",
"def has_right(self):\n return self.r is not None",
"def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1",
"def rightChild(self, pos):\n return (2 * pos) + 1",
"def rightChild(self, pos):\n return (2 * pos) + 1",
"def test_tree_with_one_node_root_no_children(one_t):\n assert not one_t.root.left\n assert not one_t.root.right",
"def right_child(self, position):\n child = 2 * position + 2\n if child > len(self.table) - 1:\n return None\n return child",
"def _has_right(self, index):\r\n return self._right(index) < len(self)",
"def right_child(self, pos): \n return (2 * pos) + 1",
"def _restructure_rightchild(self):\n right = self._rightchild\n if right.full(): # If right has both children\n if right._leftchild._height > right._rightchild._height:\n right._rotate_leftchild() # Double rotate if left unbalanced\n elif right._leftchild and not right._rightchild:\n right._rotate_leftchild()\n self._rotate_rightchild()",
"def test_depth_returns_correct_value_right_balanced_tree(bst_right_balance):\n assert bst_right_balance.depth() == 3",
"def test_depth_returns_correct_value_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.depth() == 3",
"def _has_right(self, j):\n return self._right(j) < len(self._data)",
"def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...",
"def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False",
"def test_tree_with_one_leaf_node_left_of_right_depth(balanced_3_nodes):\n balanced_3_nodes.insert(13)\n assert balanced_3_nodes.depth() == 2"
]
| [
"0.7641996",
"0.74434966",
"0.717954",
"0.7019246",
"0.6967774",
"0.69477737",
"0.6912281",
"0.68781763",
"0.6797199",
"0.6698184",
"0.6681731",
"0.66418415",
"0.66311455",
"0.6553698",
"0.65529317",
"0.64987636",
"0.6466696",
"0.64635324",
"0.64635324",
"0.64625084",
"0.64589816",
"0.64553434",
"0.6441989",
"0.64208597",
"0.6414384",
"0.6405544",
"0.6383746",
"0.63682854",
"0.63038486",
"0.62999445"
]
| 0.8620883 | 0 |
Generate a set with a minimal number of banknotes | def generate(self):
# prepare data
banknote_quantity_max = [int(math.floor(self.money / self.banknotes[i])) for i in range(0, self.n)]
# model
mdl = Model(name='MinSetGenerator')
# decision variables
mdl.banknote_quantity = {i: mdl.integer_var(lb=0, ub=banknote_quantity_max[i]) for i in range(0, self.n)}
# decision expressions
money_amount = mdl.sum(mdl.banknote_quantity[i] * self.banknotes[i] for i in range(0, self.n))
notes_quantity = mdl.sum(mdl.banknote_quantity[i] for i in range(0, self.n))
# constraints
mdl.add_constraint(money_amount == self.money)
# strategy
mdl.minimize(notes_quantity)
# solve model: return quantity of each banknotes and a set with a minimal number of banknotes
if not mdl.solve():
print('*** No solution!')
return None, None
else:
return [int(mdl.banknote_quantity[i].solution_value) for i in range(0, self.n)], \
[self.banknotes[i] for i in range(0, self.n) if mdl.banknote_quantity[i].solution_value > 0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_biomarkes(abundant, xxx):\n cc = []\n bk = set()\n lvl = 0\n\n for _, t in abundant:\n cc.append(t.split('.'))\n\n while lvl < len(max(cc)):\n bk = set()\n\n for c in cc:\n if lvl < len(c):\n bk |= set([c[lvl]])\n\n if len(bk) >= xxx:\n break\n\n lvl += 1\n\n return bk",
"def gen_all_holds(hand):\n ans_set = set([()])\n\n for dummy_idx in range(len(hand)):\n temp_set = set([()])\n for seq in ans_set:\n for item in hand:\n new_seq = list(seq)\n if hand.count(item) > new_seq.count(item):\n new_seq.append(item)\n new_seq = sorted(new_seq)\n temp_set.add(tuple(new_seq))\n ans_set = temp_set\n return ans_set",
"def apriori_gen(Ls):\n Lks = Ls[len(Ls) -1] #L(k-1)\n LLength = len(Ls)\n Lc = combinations(Lks, r = LLength+1)\n fs = frozenset([i for i in Lc])\n\n Ck =[] #L(k)\n for s in fs:\n ckItem = frozenset()\n for ss in s:\n ckItem = ckItem.union(ss)\n if not has_infrequent_subset(ckItem, Lks):\n Ck.append(ckItem)\n\n# print \"Ck:\",Ck\n return Ck",
"def initial_S(domains):\n\n return set([('0',)*len(domains)])",
"def allPossibleSlates(m,k,mdoc):\n total=combin(m,k)\n return [addNullDoc(random.choices(mdoc,k=k)) for i in range (total)]",
"def __init__(self, maxNumbers: int):\n self.nums = set(range(maxNumbers))",
"def __init__(self, maxNumbers):\n self.numbers = set(range(maxNumbers))",
"def gen_all_holds(hand):\n \n answer_set = set([()])\n for dummy_idx in range(len(hand)):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in range(1,len(hand)+1):\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n if set(tuple(new_sequence)).issubset(set(range(1,len(hand)+1))):\n temp_set.add(tuple(set(new_sequence)))\n answer_set = answer_set.union(temp_set)\n answer_set2 = set([()])\n for seq in answer_set:\n temp_seq = []\n for element in seq: \n temp_el = hand[element -1]\n temp_seq.append(temp_el)\n answer_set2.add(tuple(temp_seq))\n return answer_set2",
"def sampleset():\n pos = [(0, i) for i in range(50)]\n neg = [(1, i) for i in range(10)]\n return pos + neg",
"def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))",
"def gen_all_holds(hand):\n all_holds_set = [()]\n for entry in hand:\n for subset in all_holds_set:\n # create subsets of hand set\n all_holds_set = all_holds_set + [tuple(subset) + (entry,)]\n return set(sorted(all_holds_set))",
"def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)",
"def generate_superset(self, number):\n \n superset = []\n for i in range(0, 2**(self.dim)):\n if (number & i)==number:\n superset.append(i)\n return superset",
"def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer",
"def test_heads(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertTrue([1, 4], list(s.heads()))",
"def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]",
"def subsets(n):\n binary = lambda x: x>0 and binary(x>>1) + [x&1] or []\n pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n'\n return [pad(binary(i)) for i in range(1, 2**n)]",
"def gen_all_holds(hand):\n from_hand = [()]\n for item in hand:\n for subset in from_hand:\n from_hand = from_hand + [tuple(subset) + (item, )]\n \n return set(from_hand)",
"def gen_all_holds(hand):\r\n possible_holds = set([()])\r\n \r\n for dice in hand:\r\n temp_holds = possible_holds.copy()\r\n for hold in temp_holds:\r\n temp_seq = list(hold)\r\n temp_seq.append(dice)\r\n possible_holds.add(tuple(temp_seq))\r\n \r\n return possible_holds",
"def generate_Lk_by_Ck(data_set, Ck, min_support, support_data):\r\n Lk = set()\r\n item_count = {}\r\n for t in data_set:\r\n for item in Ck:\r\n if item.issubset(t):\r\n if item not in item_count:\r\n item_count[item] = 1\r\n else:\r\n item_count[item] += 1\r\n t_num = float(len(data_set))\r\n for item in item_count:\r\n if (item_count[item] ) >= min_support:\r\n Lk.add(item)\r\n support_data[item] = item_count[item] / t_num\r\n return Lk",
"def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1",
"def get_set(dim, maximum):\n\n i = 0\n numbers = []\n while i**2 <= maximum:\n n = i**2\n counter = 0\n while n <= maximum and counter < dim:\n numbers += [i**2]\n n += i**2\n counter += 1\n i += 1\n return numbers",
"def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))",
"def vertices(size):\n return set(range(size))",
"def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1",
"def lego_sets():\n # you must replace this line and return your own list\n return []",
"def build_possible_naked_sets(c, setlength=2):\n ns = {}\n pairs = [p for p in c.values() if len(p) == setlength]\n for k, v in c.items():\n if v in pairs:\n ns[k] = sorted(v)\n return ns",
"def gen_all_holds(hand):\n\n mask = sorted(gen_all_sequences((1,0), len(hand)))\n answer_set = []\n for current_mask in mask:\n temp = []\n for indx in range(len(current_mask)):\n if current_mask[indx] == 1:\n temp.append(hand[indx]);\n answer_set.append(tuple(temp))\n return set(answer_set)",
"def create_ngram_set(input_list, ngram_value=2):\n return set(zip(*[input_list[i:] for i in range(ngram_value)]))",
"def make_pool(num_snp):\r\n\tc=0\r\n\tpool=[]\r\n\tfor i in xrange(0,num_snp+1):\r\n\t\ts=make_str(i, num_snp)\r\n\t\tpool+=map(\"\".join, itertools.permutations(s, num_snp))\r\n\treturn list(set(pool))"
]
| [
"0.61206543",
"0.60467297",
"0.5945952",
"0.5873081",
"0.5808728",
"0.58037686",
"0.5771521",
"0.5753275",
"0.56810796",
"0.56564367",
"0.5638397",
"0.56355566",
"0.5576355",
"0.5565222",
"0.5560541",
"0.5557775",
"0.55472076",
"0.55464345",
"0.55194867",
"0.5493678",
"0.5491451",
"0.5488751",
"0.54786146",
"0.54784715",
"0.54745394",
"0.54696864",
"0.5463394",
"0.5451693",
"0.5442912",
"0.54340506"
]
| 0.70151883 | 0 |
Check whether exists another set with a minimal number of banknotes | def is_exist_another_solution(self):
# prepare data
notes_quantity_min = sum(self.banknote_quantity)
banknote_quantity_max = [int(math.floor(self.money / self.banknotes[i])) for i in range(0, self.n)]
# model
mdl = Model(name='MinSetChecker')
# decision variables
mdl.banknote_quantity = {i: mdl.integer_var(lb=0, ub=banknote_quantity_max[i]) for i in range(0, self.n)}
# decision expressions
money_amount = mdl.sum(mdl.banknote_quantity[i] * self.banknotes[i] for i in range(0, self.n))
notes_quantity = mdl.sum(mdl.banknote_quantity[i] for i in range(0, self.n))
# constraints
mdl.add_constraint(money_amount == self.money)
mdl.add_constraint(notes_quantity == notes_quantity_min)
mdl.add_constraint(
mdl.sum(mdl.banknote_quantity[i] == self.banknote_quantity[i] for i in range(0, self.n)) != self.n
)
# solve model: return True if it exists, False if not
if not mdl.solve():
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_infrequent_subset(Ck, Lks):\n ksSubset = combinations(Ck, r=len(Ck)-1)\n for ks in ksSubset:\n if not Lks.issuperset([frozenset(ks)]):\n return True\n return False",
"def test_contains(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertTrue(2 in s)\n self.assertTrue(5 in s)",
"def contains_duplicate_full_slow_set(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))",
"def fullIn(C, g):\n for set in C:\n if not fullCmpSets(set, g):\n return 1",
"def primary_set_length(self):\n return sum([1 for item in self._results if len(item.in_sets) == 1])",
"def match_sets(required, potential):\n\tif required.issubset(potential):\n\t\treturn len(potential) - len(required)\n\telse:\n\t\treturn -1",
"def tie_exists(self):\n return len(self.marks) == 9",
"def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet):\n _itemSet = set()\n localSet = defaultdict(int)\n for item in itemSet:\n for transaction in transactionList:\n if item.issubset(transaction):\n freqSet[item] += 1\n localSet[item] += 1\n for item, count in localSet.items():\n support = float(count)/len(transactionList)\n if support >= minSupport:\n _itemSet.add(item)\n\n return _itemSet",
"def numcheck(list1, list2):\r\n set1 = set(list1)\r\n set2 = set(list2)\r\n #set3 contains all items common to set1 and set2\r\n set3 = set1.intersection(set2)\r\n # return number of matching items\r\n return len(set3)",
"def _valid_sbu_combination(self, incidence, sbu_set):\n if incidence is None:\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n if set(sorted([i.degree for i in sbu_set])) == set(sorted(incidence)):\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n return False",
"def check_subset(P1,P2,k=-1):",
"def containsDuplicateSet(self, nums):\n distinct_nums = set()\n for number in nums:\n if number in distinct_nums:\n return True\n distinct_nums.add(number)\n return False",
"def checkStrictSuperset(a, n):\n for i in range(n):\n b = set(map(int, input().split()))\n if not a.issuperset(b):\n return False\n if not any(a.difference(b)):\n return False\n\n return True",
"def all_in_set(the_set, the_list):\n return True",
"def contains_duplicate_fast_set(self, nums: List[int]) -> bool:\n visited = set()\n for i in nums:\n if i in visited:\n return True\n visited.add(i)",
"def is_apriori(Ck_item, Lksub1):\n for item in Ck_item:\n sub_Ck = Ck_item - frozenset([item])\n if sub_Ck not in Lksub1:\n return False\n return True",
"def canBeWritten(n):\n for a in abundantNumbersList:\n if a >= n: break\n if (n - a) in abundantNumbersSet:\n return True\n return False",
"def find_nb(self, ox1, atoms, r1, r2):\n nb_check = [{}, \"\"]\n for k in atoms:\n dox = Vector.length(ox1[1][1] - atoms[k][1])\n if (k != ox1[0] and ox1[1][2] != atoms[k][2] and\n dox <= (r1 + r2)):\n nb_check[0][k] = atoms[k]\n if dox <= r2:\n nb_check[1] = ''.join([nb_check[1], atoms[k][0]])\n return nb_check",
"def check_poset(poset):\n if not set(poset._elements) == set(range(1, poset.cardinality() + 1)):\n return False\n\n for i in range(1, poset.cardinality() + 1):\n stop = False\n for j in range(i - 1, 0, -1):\n if not poset.le(j, i):\n stop = True # j does not precede i so no j'<j should\n elif stop:\n return False\n stop = False\n for j in range(i + 1, poset.cardinality() + 1):\n if not poset.le(j, i):\n stop = True # j does not precede i so no j'>j should\n elif stop:\n return False\n return True",
"def ValidateB(a_as_set, b, n):\n\n assert isinstance(a_as_set, set)\n\n assert len(b) == n, \"Incorrect length\"\n assert min(b) >= MIN_AB and max(b) <= MAX_AB, \"Values out of range\"\n b_as_set = set(b)\n assert len(b) == len(b_as_set), \"Non-unique elements\"\n assert len(b_as_set) == len(b_as_set - a_as_set), \"Elements from A appear in B\"\n assert (sum(a_as_set) + sum(b)) % 2 == 0, \"Odd sum\"",
"def supportCk(ckItem, transactions):\n count = 0\n for trans in transactions:\n if ckItem.issubset(frozenset(trans['itemId'])):\n count += 1\n return count",
"def dictsub_strict(subset,superset):\n\treturn all(item in superset.items() for item in subset.items())",
"def gen_all_holds(hand):\n ans_set = set([()])\n\n for dummy_idx in range(len(hand)):\n temp_set = set([()])\n for seq in ans_set:\n for item in hand:\n new_seq = list(seq)\n if hand.count(item) > new_seq.count(item):\n new_seq.append(item)\n new_seq = sorted(new_seq)\n temp_set.add(tuple(new_seq))\n ans_set = temp_set\n return ans_set",
"def is_apriori(Ck_item, Lksub1):\r\n for item in Ck_item:\r\n sub_Ck = Ck_item - frozenset([item])\r\n if sub_Ck not in Lksub1:\r\n return False\r\n return True",
"def get_biomarkes(abundant, xxx):\n cc = []\n bk = set()\n lvl = 0\n\n for _, t in abundant:\n cc.append(t.split('.'))\n\n while lvl < len(max(cc)):\n bk = set()\n\n for c in cc:\n if lvl < len(c):\n bk |= set([c[lvl]])\n\n if len(bk) >= xxx:\n break\n\n lvl += 1\n\n return bk",
"def testContains(self):\n\n N = randint(20,100)\n for i in xrange(N):\n self.s.insert(i,True)\n N-=(i in self.s)\n\n self.assertEqual(N,0)",
"def verify_entry(b):\n for f in minimalfields:\n if f not in b:\n return False\n return True",
"def is_subset(a, b):\n return any(map(lambda x: b[x:x + len(a)] == a, range(len(b) - len(a) + 1)))",
"def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)",
"def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)"
]
| [
"0.59751385",
"0.59414506",
"0.5938108",
"0.58064103",
"0.58059067",
"0.5717115",
"0.56468505",
"0.56367284",
"0.5627375",
"0.5582969",
"0.55733943",
"0.5560441",
"0.5472907",
"0.54553425",
"0.5448051",
"0.54383254",
"0.54306436",
"0.54231006",
"0.54181796",
"0.54130954",
"0.54100674",
"0.54066676",
"0.53999263",
"0.5387984",
"0.5385953",
"0.53848845",
"0.5379837",
"0.537742",
"0.53661996",
"0.5350749"
]
| 0.625606 | 0 |
From Sqlmap Returns (basic conversion) HTML unescaped value >>> htmlunescape('a<b') 'a<b' | def htmlunescape(value):
retVal = value
if value and isinstance(value, str):
codes = (("<", '<'), (">", '>'), (""", '"'),
(" ", ' '), ("&", '&'), ("'", "'"))
retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)
try:
retVal = re.sub(
r"&#x([^ ;]+);", lambda match: chr(int(match.group(1), 16)), retVal)
except ValueError:
pass
return retVal | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unescape(t):\r\n return (t\r\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\r\n .replace(\"'\", \"´\").replace(\""\", '\"').replace(''',\"'\")\r\n )",
"def html_unescape(text):\n return html.unescape(text)",
"def xhtml_unescape(value):\r\n return re.sub(r\"&(#?)(\\w+?);\", _convert_entity, _unicode(value))",
"def htmldecode(s):\n\ts = s.replace(\"<\", \"<\")\n\ts = s.replace(\">\", \">\")\n\ts = s.replace(\""\", \"\\\"\")\n\ts = s.replace(\"'\",\"'\")\n\ts = s.replace(\"&\", \"&\")\n\treturn s",
"def unescape(s):\r\n s = s.replace(\"&\", \"&\")\r\n s = s.replace(\"<\", \"<\")\r\n s = s.replace(\">\", \">\")\r\n s = s.replace(\""\", '\"')\r\n s = s.replace(\"'\", \"'\")\r\n return s",
"def unescape(s):\n\n\tif s is None:\n\t\treturn \"\"\n\n\t# html entities\n\ts = s.replace(\" \", \"\\r\")\n\n\t# standard html\n\ts = s.replace(\"<\", \"<\")\n\ts = s.replace(\">\", \">\")\n\ts = s.replace(\"&\", \"&\") # this has to be last\n\n\treturn s",
"def HtmlUnescape(text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def __html_unescape(self, text):\n\n return re.sub(\"&(%s);\" % \"|\".join(name2codepoint),\n lambda m: unichr(name2codepoint[m.group(1)]),\n text)",
"def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&')\n\t\tunsafe = string.replace(unsafe, '<', '<')\n\t\treturn string.replace(unsafe, '>', '>')",
"def html_unescape(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return chr(int(text[3:-1], 16))\n else:\n return chr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = chr(html.entities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def unescape_html_entities(self, text):\n text = html.unescape(text)\n return text",
"def unescape_tweet(tweet):\r\n return html.unescape(tweet)",
"def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output",
"def unescape(s):\n return (\n s.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")\n .replace(\""\", '\"')\n .replace(\"'\", \"'\")\n )",
"def unescape(text):\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == '&#':\r\n try:\r\n if text[:3] == '&#x':\r\n return unichr(int(text[3:-1], 16)).encode('utf-8')\r\n return unichr(int(text[2:-1])).encode('utf-8')\r\n except ValueError:\r\n logger.info('error de valor')\r\n\r\n else:\r\n try:\r\n import htmlentitydefs\r\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode('utf-8')\r\n except KeyError:\r\n logger.info('keyerror')\r\n except:\r\n pass\r\n\r\n return text\r\n\r\n return re.sub('&#?\\\\w+;', fixup, text)",
"def unicode2html(_unicrap):\n xlate = {u'\\u0022': '"',\nu'\\u0026': '&',\nu'\\u0027': ''',\nu'\\u003C': '<',\nu'\\u003E': '>',\nu'\\u00A0': ' ',\nu'\\u00A1': '¡',\nu'\\u00A2': '¢',\nu'\\u00A3': '£',\nu'\\u00A4': '¤',\nu'\\u00A5': '¥',\nu'\\u00A6': '¦',\nu'\\u00A7': '§',\nu'\\u00A8': '¨',\nu'\\u00A9': '©',\nu'\\u00AA': 'ª',\nu'\\u00AB': '«',\nu'\\u00AC': '¬',\nu'\\u00AD': '­',\nu'\\u00AE': '®',\nu'\\u00AF': '¯',\nu'\\u00B0': '°',\nu'\\u00B1': '±',\nu'\\u00B2': '²',\nu'\\u00B3': '³',\nu'\\u00B4': '´',\nu'\\u00B5': 'µ',\nu'\\u00B6': '¶',\nu'\\u00B7': '·',\nu'\\u00B8': '¸',\nu'\\u00B9': '¹',\nu'\\u00BA': 'º',\nu'\\u00BB': '»',\nu'\\u00BC': '¼',\nu'\\u00BD': '½',\nu'\\u00BE': '¾',\nu'\\u00BF': '¿',\nu'\\u00C0': 'À',\nu'\\u00C1': 'Á',\nu'\\u00C2': 'Â',\nu'\\u00C3': 'Ã',\nu'\\u00C4': 'Ä',\nu'\\u00C5': 'Å',\nu'\\u00C6': 'Æ',\nu'\\u00C7': 'Ç',\nu'\\u00C8': 'È',\nu'\\u00C9': 'É',\nu'\\u00CA': 'Ê',\nu'\\u00CB': 'Ë',\nu'\\u00CC': 'Ì',\nu'\\u00CD': 'Í',\nu'\\u00CE': 'Î',\nu'\\u00CF': 'Ï',\nu'\\u00D0': 'Ð',\nu'\\u00D1': 'Ñ',\nu'\\u00D2': 'Ò',\nu'\\u00D3': 'Ó',\nu'\\u00D4': 'Ô',\nu'\\u00D5': 'Õ',\nu'\\u00D6': 'Ö',\nu'\\u00D7': '×',\nu'\\u00D8': 'Ø',\nu'\\u00D9': 'Ù',\nu'\\u00DA': 'Ú',\nu'\\u00DB': 'Û',\nu'\\u00DC': 'Ü',\nu'\\u00DD': 'Ý',\nu'\\u00DE': 'Þ',\nu'\\u00DF': 'ß',\nu'\\u00E0': 'à',\nu'\\u00E1': 'á',\nu'\\u00E2': 'â',\nu'\\u00E3': 'ã',\nu'\\u00E4': 'ä',\nu'\\u00E5': 'å',\nu'\\u00E6': 'æ',\nu'\\u00E7': 'ç',\nu'\\u00E8': 'è',\nu'\\u00E9': 'é',\nu'\\u00EA': 'ê',\nu'\\u00EB': 'ë',\nu'\\u00EC': 'ì',\nu'\\u00ED': 'í',\nu'\\u00EE': 'î',\nu'\\u00EF': 'ï',\nu'\\u00F0': 'ð',\nu'\\u00F1': 'ñ',\nu'\\u00F2': 'ò',\nu'\\u00F3': 'ó',\nu'\\u00F4': 'ô',\nu'\\u00F5': 'õ',\nu'\\u00F6': 'ö',\nu'\\u00F7': '÷',\nu'\\u00F8': 'ø',\nu'\\u00F9': 'ù',\nu'\\u00FA': 'ú',\nu'\\u00FB': 'û',\nu'\\u00FC': 'ü',\nu'\\u00FD': 'ý',\nu'\\u00FE': 'þ',\nu'\\u00FF': 'ÿ',\nu'\\u0152': 'Œ',\nu'\\u0153': 'œ',\nu'\\u0160': 'Š',\nu'\\u0161': 'š',\nu'\\u0178': 'Ÿ',\nu'\\u0192': 'ƒ',\nu'\\u02C6': 'ˆ',\nu'\\u02DC': '˜',\nu'\\u0391': 'Α',\nu'\\u0392': 'Β',\nu'\\u0393': 'Γ',\nu'\\u0394': 'Δ',\nu'\\u0395': 'Ε',\nu'\\u0396': 'Ζ',\nu'\\u0397': 'Η',\nu'\\u0398': 'Θ',\nu'\\u0399': 'Ι',\nu'\\u039A': 'Κ',\nu'\\u039B': 'Λ',\nu'\\u039C': 'Μ',\nu'\\u039D': 'Ν',\nu'\\u039E': 'Ξ',\nu'\\u039F': 'Ο',\nu'\\u03A0': 'Π',\nu'\\u03A1': 'Ρ',\nu'\\u03A3': 'Σ',\nu'\\u03A4': 'Τ',\nu'\\u03A5': 'Υ',\nu'\\u03A6': 'Φ',\nu'\\u03A7': 'Χ',\nu'\\u03A8': 'Ψ',\nu'\\u03A9': 'Ω',\nu'\\u03B1': 'α',\nu'\\u03B2': 'β',\nu'\\u03B3': 'γ',\nu'\\u03B4': 'δ',\nu'\\u03B5': 'ε',\nu'\\u03B6': 'ζ',\nu'\\u03B7': 'η',\nu'\\u03B8': 'θ',\nu'\\u03B9': 'ι',\nu'\\u03BA': 'κ',\nu'\\u03BB': 'λ',\nu'\\u03BC': 'μ',\nu'\\u03BD': 'ν',\nu'\\u03BE': 'ξ',\nu'\\u03BF': 'ο',\nu'\\u03C0': 'π',\nu'\\u03C1': 'ρ',\nu'\\u03C2': 'ς',\nu'\\u03C3': 'σ',\nu'\\u03C4': 'τ',\nu'\\u03C5': 'υ',\nu'\\u03C6': 'φ',\nu'\\u03C7': 'χ',\nu'\\u03C8': 'ψ',\nu'\\u03C9': 'ω',\nu'\\u03D1': 'ϑ',\nu'\\u03D2': 'ϒ',\nu'\\u03D6': 'ϖ',\nu'\\u2002': ' ',\nu'\\u2003': ' ',\nu'\\u2009': ' ',\nu'\\u200C': '‌',\nu'\\u200D': '‍',\nu'\\u200E': '‎',\nu'\\u200F': '‏',\nu'\\u2013': '–',\nu'\\u2014': '—',\nu'\\u2018': '‘',\nu'\\u2019': '’',\nu'\\u201A': '‚',\nu'\\u201C': '“',\nu'\\u201D': '”',\nu'\\u201E': '„',\nu'\\u2020': '†',\nu'\\u2021': '‡',\nu'\\u2022': '•',\nu'\\u2026': '…',\nu'\\u2030': '‰',\nu'\\u2032': '′',\nu'\\u2033': '″',\nu'\\u2039': '‹',\nu'\\u203A': '›',\nu'\\u203E': '‾',\nu'\\u2044': '⁄',\nu'\\u20AC': '€',\nu'\\u2111': 'ℑ',\nu'\\u2118': '℘',\nu'\\u211C': 'ℜ',\nu'\\u2122': '™',\nu'\\u2135': 'ℵ',\nu'\\u2190': '←',\nu'\\u2191': '↑',\nu'\\u2192': '→',\nu'\\u2193': '↓',\nu'\\u2194': '↔',\nu'\\u21B5': '↵',\nu'\\u21D0': '⇐',\nu'\\u21D1': '⇑',\nu'\\u21D2': '⇒',\nu'\\u21D3': '⇓',\nu'\\u21D4': '⇔',\nu'\\u2200': '∀',\nu'\\u2202': '∂',\nu'\\u2203': '∃',\nu'\\u2205': '∅',\nu'\\u2207': '∇',\nu'\\u2208': '∈',\nu'\\u2209': '∉',\nu'\\u220B': '∋',\nu'\\u220F': '∏',\nu'\\u2211': '∑',\nu'\\u2212': '−',\nu'\\u2217': '∗',\nu'\\u221A': '√',\nu'\\u221D': '∝',\nu'\\u221E': '∞',\nu'\\u2220': '∠',\nu'\\u2227': '∧',\nu'\\u2228': '∨',\nu'\\u2229': '∩',\nu'\\u222A': '∪',\nu'\\u222B': '∫',\nu'\\u2234': '∴',\nu'\\u223C': '∼',\nu'\\u2245': '≅',\nu'\\u2248': '≈',\nu'\\u2260': '≠',\nu'\\u2261': '≡',\nu'\\u2264': '≤',\nu'\\u2265': '≥',\nu'\\u2282': '⊂',\nu'\\u2283': '⊃',\nu'\\u2284': '⊄',\nu'\\u2286': '⊆',\nu'\\u2287': '⊇',\nu'\\u2295': '⊕',\nu'\\u2297': '⊗',\nu'\\u22A5': '⊥',\nu'\\u22C5': '⋅',\nu'\\u2308': '⌈',\nu'\\u2309': '⌉',\nu'\\u230A': '⌊',\nu'\\u230B': '⌋',\nu'\\u27E8': '⟨',\nu'\\u27E9': '⟩',\nu'\\u25CA': '◊',\nu'\\u2660': '♠',\nu'\\u2663': '♣',\nu'\\u2665': '♥',\nu'\\u2666': '♦'}\n\n strOut = \"\"\n if _unicrap is not None:\n for i in _unicrap:\n if i in xlate:\n strOut += xlate[i]\n else:\n strOut += str(i)\n return strOut",
"def convertHTML(self, text):\n return text.replace(''', \"'\")",
"def unescape_html(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unicode_char(int(text[3:-1], 16))\n else:\n return unicode_char(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unicode_char(htmlentities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n\n return re.sub(r\"&#?\\w+;\", fixup, text)",
"def _decode_html_entities(text: str) -> str:\n return html.unescape(text)",
"def html_decode(row_, text_column):\n htmlCodes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n text = row_[text_column]\n for code in htmlCodes:\n text = text.replace(code[1], code[0])\n return text",
"def _escape(html):\n return encoding.force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('\"', '"').replace(\"'\", ''')",
"def html_decode(s):\n htmlCodes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n for code in htmlCodes:\n s = s.replace(code[1], code[0])\n return s",
"def html_decode(s):\n html_codes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n for code in html_codes:\n s = s.replace(code[1], code[0])\n return s",
"def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&').replace('<', '<').replace('>', '>').replace('\"', '"').replace(\"'\", ''')",
"def test_unescape__great_than(self) -> None:\n escaped: str = \">\"\n unescaped: str = \">\"\n\n self.assertEqual(first=unescape(value=escaped), second=unescaped)",
"def html_escape(u):\n u = _DEFAULT_TAG_ESCAPE(u)\n return u.replace(\"'\", ''')",
"def html_decode(s):\n htmlCodes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n for code in htmlCodes:\n s = s.replace(code[1], code[0])\n return s",
"def html_decode(s):\n htmlCodes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n for code in htmlCodes:\n s = s.replace(code[1], code[0])\n return s",
"def html_decode(s):\n htmlCodes = (\n (\"'\", '''),\n ('\"', '"'),\n ('>', '>'),\n ('<', '<'),\n ('&', '&')\n )\n for code in htmlCodes:\n s = s.replace(code[1], code[0])\n return s",
"def decode_high(self, text):\n h = HTMLParser()\n text = '&#%s;' % text\n return h.unescape(text)"
]
| [
"0.70825934",
"0.7071044",
"0.69811505",
"0.68802",
"0.6750165",
"0.6678649",
"0.6585287",
"0.6580559",
"0.6545425",
"0.6541923",
"0.653157",
"0.6513827",
"0.6476411",
"0.64493966",
"0.6419319",
"0.63503563",
"0.635006",
"0.63456255",
"0.631522",
"0.6315084",
"0.63005626",
"0.6296756",
"0.6291327",
"0.6286913",
"0.62631017",
"0.6259212",
"0.6252681",
"0.6252681",
"0.6252681",
"0.6220509"
]
| 0.7275439 | 0 |
From Sqlmap Returns filtered page content without script, style and/or comments or all HTML tags >>> getFilteredPageContent(u'foobartest') u'foobar test' | def get_filtered_page_content(page, onlyText=True, split=" "):
retVal = page
# only if the page's charset has been successfully identified
if isinstance(page, str):
retVal = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>%s" %
(r"|<[^>]+>|\t|\n|\r" if onlyText else ""), split, page)
retVal = re.sub(r"%s{2,}" % split, split, retVal)
retVal = htmlunescape(retVal.strip().strip(split))
return retVal | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_page(page):\n content = utils.any2unicode(page, 'utf8').strip()\n content = re.sub(r\"[^a-zA-Z]\", \" \", content)\n \n return content",
"def fix_page_content(filename, content):\n return JournalStaticPage(filename, content).body",
"def get_text_from_page(page_content: str) -> str:\n\n soup = BeautifulSoup(page_content, 'lxml')\n\n for tag in soup.findAll([\"script\", \"style\"]):\n tag.extract()\n\n lines = (line.strip() for line in soup.get_text().splitlines())\n\n return '\\n'.join(line for line in lines if line) # remove empty lines",
"def clean_content(self) -> str:",
"def get_file_contents(self):\n with open(self.sql_file, 'r') as sql:\n text = sql.read()\n # text = text.replace('\\n', '\\n\\n')\n # text=sql.read()\n # TODO: fix some text replacement issues here\n # https://github.com/andialbrecht/sqlparse/issues/313\n return self.filter_text(text)",
"def extract_page_text(html):\n soup = bs4.BeautifulSoup(html)\n\n # Remove <script/> and <style/> content\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n\n # Strip leading and trailing whitespace from each line, then join all the\n # non-empty lines together.\n lines = (line.strip() for line in text.splitlines())\n text = '\\n'.join(line for line in lines if line)\n\n return text",
"def get_massage():\n # Javascript code in ths page generates HTML markup\n # that isn't parsed correctly by BeautifulSoup.\n # To avoid this problem, all document.write fragments are removed\n my_massage = copy(BeautifulSoup.MARKUP_MASSAGE)\n my_massage.append((re.compile(u\"document.write(.+);\"), lambda match: \"\"))\n my_massage.append((re.compile(u'alt=\".+\">'), lambda match: \">\"))\n return my_massage",
"def _filter(self, text, context, encoding):\n\n content = []\n soup = bs4.BeautifulSoup(text, self.parser)\n soup = self.get_sub_node(soup)\n blocks, attributes, comments = self.to_text(soup)\n if self.comments:\n for c, desc in comments:\n content.append(filters.SourceText(c, context + ': ' + desc, encoding, self.type + 'comment'))\n if self.attributes:\n for a, desc in attributes:\n content.append(filters.SourceText(a, context + ': ' + desc, encoding, self.type + 'attribute'))\n for b, desc in blocks:\n content.append(filters.SourceText(b, context + ': ' + desc, encoding, self.type + 'content'))\n return content",
"def test_filter_content(self):\n bs = self.get_soup(baseUrl + 'food/filter/')\n self.assertOneExists(bs, \"#page_filter\")",
"def content_pages(self):\n pages = []\n for block in self.contents: # pylint: disable=not-an-iterable\n if block.value:\n pages.append(block.value.specific)\n return pages",
"def get_page_source(self):\n return self.driver.page_source.replace('xmlns=\"http://www.w3.org/1999/xhtml\"', '')",
"def sanitize_content(content):\r\n\r\n # stripping all HTML TAGS\r\n content = BeautifulSoup(content).find(\"div\", {\"id\": \"content\", \"role\": \"main\"}).get_text()\r\n\r\n if CASE_FOLDING:\r\n # case folding\r\n content = content.lower()\r\n if PUNCTUATION_REMOVAL:\r\n\r\n # Removing punctuation.\r\n content = re.sub(PUNCTUATIONS_REGEX_COMMA_1, \"\", content, 0)\r\n content = re.sub(PUNCTUATIONS_REGEX_COMMA, \"\", content, 0)\r\n content = re.sub(PUNCTUATIONS_REGEX, \"\", content, 0).replace(\"'\", \"\");\r\n\r\n return re.sub(r\"([\\n\\t])+\", \" \", content, 0)",
"def get_visible_text(_text):\n #text = _text.decode('utf-8', 'ignore').lower() # Don't get hung up on unicode chars in foreign languages\n text = _text.lower()\n text = re.compile(r'<').sub(' <',text) # These two lines keep words from getting smushed\n text = re.compile(r'>').sub('> ',text) # together when two they are only separated by tags.\n soup = BeautifulSoup(text, 'lxml')\n\n # decompose removes the tag and it's text content completely\n for s in soup(['script','code','style']):\n s.decompose()\n\n text = soup.get_text()\n # compress space to reduce footprint and fit on one line so it neatly fits in csv file\n text = re.compile(r'\\s+').sub(' ',text).strip()\n return text",
"def ProcessPage (self, page):\n\t\tcontent = BeautifulSoup (open(page), 'lxml')\n\n\t\t# Find and replace script tags with local version\n\t\tfor script in content.find_all ('script'):\n\t\t\tif script.get ('src'):\n\t\t\t\tscript ['src'] = self._register (script.get ('src'))\n\n\t\tfor link in content.find_all ('link'):\n\t\t\tif link.get ('href'):\n\t\t\t\tlink ['href'] = self._register (link.get ('href'))\n\n\t\treturn content.prettify().encode('utf-8')",
"def extractContent(content):\n soup = BeautifulSoup(content, 'html.parser')\n return soup.get_text()",
"def get_content(self):\r\n if self.typ == 'fk':\r\n if not self.content:\r\n return None\r\n\r\n options = stemplates.get_field(self.page.template, self.title)[2]\r\n\r\n app_label = options.get('app', 'stoat')\r\n model_name = options.get('model', 'Page')\r\n model = get_model(app_label, model_name)\r\n\r\n try:\r\n return model.objects.get(id=self.content)\r\n except model.DoesNotExist:\r\n return None\r\n elif self.typ == 'bool':\r\n return True if int(self.content) else False\r\n else:\r\n return self.content",
"def __filter( self, text ):\n return text",
"def contentRaw(request):\n paste = Paste.get(request.matchdict['idContent'])\n # TODO type/mime\n return paste.content",
"def clean_content(self):\n content = self.cleaned_data.get('content', None)\n if '<script' in content:\n raise forms.ValidationError(\"These tags are not allowed\")\n return content",
"def collect_content_old_0(web_page: req.Response\n ) -> Union[article_content, None]:\n try:\n this_soup = BeautifulSoup(web_page.text, 'lxml')\n article = this_soup.find('table', {'class': 'storycontent'})\n head = article.find('h1').text\n head = tidy_string(head)\n body_area = article.find('td', {'class': 'storybody'})\n body = collect_body(body_area)\n return article_content(head, body)\n except (AttributeError, IndexError):\n return collect_content_old_1(web_page)",
"def extract_page_text(self, bs_object):\n\n # kill all script and style elements\n for script in bs_object([\"script\", \"style\", \"head\"]):\n script.extract() # rip it out\n\n # get text\n text = bs_object.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text_list_gen = (chunk for chunk in chunks if chunk)\n text_list = list(text_list_gen)\n # print \"TEXT LIST >>>\\n\", text_list\n \n return text_list",
"def processed_content(miscobj):\n\n content = load_html_content(miscobj)\n if content:\n return process_highlighting(content)\n else:\n return content",
"def contentextract(text):\n stopword=stopwords.words('english')\n punctuation=['.','?','!',',',';',\"''\",'\"\"',\"'\",\"--\",\"``\",\"|\",\"<\",\">\",\"...\",\"......\",\"'s\",':','[',']',\n '(',')','#','*','$','%','@','^','-','+','=','/','{','}','\\\\','\"','&']\n content=[w for w in text if w.lower() not in stopword]\n content=[w for w in content if w not in punctuation]\n return content",
"def get_webpage_content(url):\n request = urllib2.Request(url)\n page = urllib2.urlopen(request)\n soup = BeautifulSoup(page.read())\n return unicode(soup)",
"def striphtml(content):\n\tif not isinstance(content, basestring):\n\t\treturn u''\n\tcontent = re_script.sub(u'',content)\n\tdoc = html.fragment_fromstring(content, create_parent=True)\n\tclean.clean_html(doc)\n\treturn unicode(re_nl.sub(u'', doc.text_content()))",
"def getpage(self, page: str, limit: int = 5, lang: str = \"en\") -> str:\n\n tags = BeautifulSoup(\n requests.get(self.url.format(lang, page)).text, \"lxml\"\n ).select(self.selector)\n res = \"\"\n for i in range(min(limit, len(tags))):\n res += tags[i].text + \"\\n\\n\"\n return res",
"def content(self, **args):\n return self.pageConfig['content'] % self.pageConfig",
"def _get_main_page(website):\n return BeautifulSoup(requests.get(website).content, 'html.parser')",
"def get_article(self, url):\n response = self.opener.open(url)\n doc = lxml.html.document_fromstring(response.read())\n content = doc.find_class(\"post\")[0] # Select content by CSS class \n cleaned_content = clean_html(content)\n str_cleaned_content = lxml.html.tostring(cleaned_content)\n # self.__save_article_to_file(str_cleaned_content)\n return str_cleaned_content",
"def clean_content(sender, instance, **kwargs):\r\n if kwargs.get('raw'):\r\n # We're in loaddata (or something similar).\r\n return\r\n\r\n page = instance\r\n fields = dict(stemplates.get_fields_bare(page.template))\r\n current_contents = list(page.pagecontent_set.all())\r\n\r\n for content in current_contents:\r\n if content.title not in fields or fields[content.title] != content.typ:\r\n content.delete()\r\n\r\n existing_contents = dict([(pc.title, pc.typ)\r\n for pc in page.pagecontent_set.all()])\r\n\r\n for title, typ in fields.items():\r\n if title not in existing_contents or existing_contents[title] != typ:\r\n PageContent(page=page, title=title, typ=typ, content='').save()"
]
| [
"0.61187595",
"0.6044113",
"0.5769857",
"0.54977685",
"0.5493789",
"0.54862213",
"0.5450206",
"0.5276105",
"0.5263728",
"0.5217712",
"0.5206948",
"0.51723135",
"0.5163936",
"0.5148837",
"0.51385295",
"0.51373094",
"0.510455",
"0.5087979",
"0.50860703",
"0.5041471",
"0.5037026",
"0.50232637",
"0.5018846",
"0.5002505",
"0.4996583",
"0.49891666",
"0.49883646",
"0.49712253",
"0.493636",
"0.49310052"
]
| 0.6476379 | 0 |
Check that an event admin for a different event can't get a tag. | def test_get_event_admin_correct_event(self):
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user2_headers, data=params)
self.assertEqual(response.status_code, 403) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_post_event_admin(self):\n self.seed_static_data()\n params = {\n 'event_id': 2,\n 'tag_type': 'RESPONSE',\n 'name': {\n 'en': 'English Tag 2 Event 2',\n 'fr': 'French Tag 2 Event 2',\n },\n 'description': {\n 'en': 'English Tag 2 Event 2 Description',\n 'fr': 'French Tag 2 Event 2 Description',\n }\n }\n # User 1 is not an event admin for event 2\n response = self.app.post(\n '/api/v1/tag', \n headers=self.user1_headers, \n data=json.dumps(params), \n content_type='application/json')\n self.assertEqual(response.status_code, 403)",
"def test_get_event_admin(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user3_headers, data=params)\n self.assertEqual(response.status_code, 403)",
"def tag_check():\n\n async def check(ctx):\n if ctx.author.id == ctx.bot.owner_id:\n return True\n\n is_allowed = (\n ctx.author.guild_permissions.administrator\n or await ctx.bot.get_guild_setting(ctx.guild.id, \"tag_creation_allowed\")\n )\n\n if is_allowed:\n return True\n else:\n raise exceptions.DemocracivBotException(\n message=f\"{config.NO} Only Administrators can add or remove tags on this server.\"\n \" Administrators can change this setting in \"\n f\"`{config.BOT_PREFIX}server tagcreation`.\"\n )\n\n return commands.check(check)",
"def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)",
"def check_event_update_safe(old_event: event_models.Event,\n new_event: event_models.Event) -> bool:\n old_event.status = event_models.EventStatusEnum.cancelled\n return old_event == new_event",
"def test_unpublish_event_as_publisher_when_event_does_not_exist(self):\n e = self.make_event()\n e.published=True\n e.save()\n other_id = ObjectId()\n resp = self.request_with_role('/admin/events/unpublish/%s' % other_id, role='publisher',\n method='POST', follow_redirects=True)\n self.assertIn('Invalid event id', resp.data)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(Event.objects(published=False).count(), 0)",
"def test_tags_tag_search_invalid_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n global NON_EXISTENT_TAG\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(NON_EXISTENT_TAG)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code\" % (NON_EXISTENT_TAG) \\\n + \"on the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))",
"def test_forbidden_non_taggers(self):\n phenotype_taggers = Group.objects.get(name='phenotype_taggers')\n self.user.groups.remove(phenotype_taggers)\n response = self.client.get(self.get_url(self.trait.pk))\n self.assertEqual(response.status_code, 403)",
"def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def check_tags(self):\n if(self.tags is None or not self.tags.get('subscriber', False)):\n self.filters |= Filters.NonSubs\n\n if(self.tags is None or not self.tags.get('user-type', 0) > 0):\n self.filters |= Filters.NonMods",
"def tag_event(request, tag_id, event_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n event = Event.objects.get(id=event_id)\n except:\n error += 'Couldn\\'t retrieve event ' + event_id + '.'\n\n if tag in event.tags.all():\n error += 'This event has already been tagged.'\n\n if not error:\n try:\n event.tags.add(tag)\n message += 'Tagged event ' + str(event.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag event.'\n return render_to_response('feedback/taglist.html', {\n 'object': event, 'type': 'event', 'error': error,\n }, context_instance=RequestContext(request))",
"def raise_for_disabled(self, disabled_tags: Collection[str]):\n tok = self.token()\n if tok.type == TOKEN_TAG and tok.value in disabled_tags:\n raise DisabledTagError(\n f\"{tok.value} usage is not allowed in this context\",\n linenum=tok.linenum,\n )",
"def untag_event(request, tag_id, event_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n event = Event.objects.get(id=event_id)\n except:\n error += 'Couldn\\'t retrieve event ' + event_id + '.'\n\n if tag not in event.tags.all():\n error += 'This event isn\\'t tagged with this tag.'\n\n if not error:\n try:\n event.tags.remove(tag)\n except:\n error += 'Couldn\\'t remove tag from comment.'\n return render_to_response('feedback/taglist.html', {\n 'object': event, 'type': 'event', 'error': error,\n }, context_instance=RequestContext(request))",
"def __lt__(self, event):\n\t\treturn True",
"def cant_view_event(self, event, request):\n return render(request, self.template_name, {\n 'error': \"Not a public event\",\n 'event': None,\n })",
"def too_few_tags(request):\n if request.method == 'POST':\n form = forms.EventEditTagsForm(request.POST)\n if form.is_valid():\n event = get_object_or_404(Event, id=form.cleaned_data['event_id'])\n assert request.user.is_active\n if is_contributor(request.user):\n assert event.privacy != Event.PRIVACY_COMPANY\n\n if not EventRevision.objects.filter(event=event).count():\n EventRevision.objects.create_from_event(event)\n\n value = set([\n x.strip()\n for x in form.cleaned_data['tags'].split(',')\n if x.strip()\n ])\n prev = set([x.name for x in event.tags.all()])\n for tag in prev - value:\n tag_obj = Tag.objects.get(name=tag)\n event.tags.remove(tag_obj)\n added = []\n for tag in value - prev:\n try:\n tag_obj = Tag.objects.get(name__iexact=tag)\n except Tag.DoesNotExist:\n tag_obj = Tag.objects.create(name=tag)\n except Tag.MultipleObjectsReturned:\n tag_obj, = Tag.objects.filter(name__iexact=tag)[:1]\n event.tags.add(tag_obj)\n added.append(tag_obj)\n EventRevision.objects.create_from_event(\n event,\n user=request.user\n )\n messages.success(\n request,\n 'Thank you for adding: %s' % ', '.join(x.name for x in added)\n )\n return redirect('main:too_few_tags')\n\n zero_tags = (\n Event.objects.scheduled_or_processing()\n .exclude(id__in=Event.tags.through.objects.values('event_id'))\n )\n few_tags = (\n Event.tags.through.objects\n .filter(event__status=Event.STATUS_SCHEDULED)\n .values('event_id')\n .annotate(count=Count('event'))\n .filter(count__lt=2)\n )\n\n assert request.user.is_active\n if is_contributor(request.user):\n few_tags = few_tags.exclude(event__privacy=Event.PRIVACY_COMPANY)\n zero_tags = zero_tags.exclude(privacy=Event.PRIVACY_COMPANY)\n\n count = zero_tags.count()\n count += few_tags.count()\n try:\n event, = zero_tags.order_by('?')[:1]\n except ValueError:\n event = None\n if few_tags.count():\n try:\n first, = few_tags.order_by('?')[:1]\n event = Event.objects.get(id=first['event_id'])\n except ValueError:\n # there's nothing!\n event = None\n assert count == 0\n\n context = {\n 'count': count,\n 'event': event,\n }\n if event:\n initial = {\n 'tags': ', '.join(x.name for x in event.tags.all()),\n 'event_id': event.id,\n }\n context['form'] = forms.EventEditTagsForm(\n initial=initial,\n instance=event\n )\n\n return render(request, 'main/too_few_tags.html', context)",
"def test_publish_event_as_publisher_when_event_does_not_exist(self):\n e = self.make_event()\n e.save()\n other_id = ObjectId()\n resp = self.request_with_role('/admin/events/publish/%s' % other_id, role='publisher',\n method='POST', follow_redirects=True)\n self.assertIn('Invalid event id', resp.data)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(Event.objects(published=True).count(), 0)",
"def test_atevent_disabled(self):\n from plone.api.exc import InvalidParameterError\n with self.assertRaises(InvalidParameterError):\n self.event = api.content.create(\n type='Event',\n title=u'Invalid event',\n container=self.lc\n )",
"def __ne__(self, other):\n if not isinstance(other, SafetyEvent):\n return True\n\n return self.to_dict() != other.to_dict()",
"def _verify_tags(self):\n for tag in self.tags:\n if tag.lower() in VASP_TAG_LIST:\n continue\n else:\n print((\"Warning: unknown INCAR tag '\" + tag + \"' with value '\" + str(self.tags[tag]) + \"'\"))",
"def cant_find_event(self, request, slug):\n return render(request, self.template_name, {\n 'error': \"Event not found\",\n 'event': None\n })",
"def _entry_tag_is_valid(entry: _LexiconEntry) -> None:\n tag = _tag_of(entry)\n\n if tag not in tags.VALID_TAGS:\n raise InvalidLexiconEntryError(\n \"Entry 'tag' field has invalid value. It can only be one of the valid\"\n \" tags that are defined in 'morphotactics_compiler/tags.py'.\")",
"def bad_events(self) -> Optional[pulumi.Input['ServiceLevelEventsBadEventsArgs']]:\n return pulumi.get(self, \"bad_events\")",
"async def try_get_tag(self, ctx: Context) -> None:\n tags_cog = self.bot.get_cog(\"Tags\")\n if not tags_cog:\n log.debug(\"Not attempting to parse message as a tag as could not find `Tags` cog.\")\n return\n tags_get_command = tags_cog.get_command_ctx\n\n maybe_tag_name = ctx.invoked_with\n if not maybe_tag_name or not isinstance(ctx.author, Member):\n return\n\n ctx.invoked_from_error_handler = True\n try:\n if not await self.bot.can_run(ctx):\n log.debug(\"Cancelling attempt to fall back to a tag due to failed checks.\")\n return\n\n if await tags_get_command(ctx, maybe_tag_name):\n return\n\n if not any(role.id in MODERATION_ROLES for role in ctx.author.roles):\n await self.send_command_suggestion(ctx, maybe_tag_name)\n except Exception as err:\n log.debug(\"Error while attempting to invoke tag fallback.\")\n if isinstance(err, errors.CommandError):\n await self.on_command_error(ctx, err)\n else:\n await self.on_command_error(ctx, errors.CommandInvokeError(err))",
"def cant_find_event(self, request, slug):\n return staticpage(request, slug)",
"def __ne__(self, other):\n if not isinstance(other, EventType):\n return True\n\n return self.to_dict() != other.to_dict()",
"def test_no_tagging_button(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertNotContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))\n self.assertFalse(context['show_tag_button'])",
"def __ne__(self, other: Event) -> bool:\n return not self.__eq__(other)",
"def __ge__(self, other: Event) -> bool:\n return not self.__lt__(other)",
"def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)"
]
| [
"0.65669644",
"0.6325694",
"0.5844845",
"0.5678775",
"0.55033827",
"0.5427271",
"0.54185647",
"0.536466",
"0.5351754",
"0.5336185",
"0.52735984",
"0.5238865",
"0.5234522",
"0.5223319",
"0.52078164",
"0.52011263",
"0.5167887",
"0.51529324",
"0.51504195",
"0.51264316",
"0.51249003",
"0.51063293",
"0.5105492",
"0.50859755",
"0.50662166",
"0.50614476",
"0.5046017",
"0.5044917",
"0.49996498",
"0.49994025"
]
| 0.6942056 | 0 |
Parse file into list of strings, some of which are hoped to have valid XML for a docket. Returns list of all 'good' dockets, where good means the string is wellformed XML. If any string in list has '' or '' in it, entire file will be rejected. | def parseBadFileAsString(self,myfile):
def removeTopDocketTags(string):
return re.sub(r'<dockets>\n<docket>','',string)
def removeBottomDocketTags(string):
return re.sub(r'</docket>\n</dockets>$','',string)
def makeListOfDocketsAsText(string):
text = removeTopDocketTags(string)
text = removeBottomDocketTags(text)
return re.split(r'</docket>\n<docket>',text)
def splitFileIntoListOfStringsOrThrowError(fileObject,myfile):
docketListAsText = makeListOfDocketsAsText(fileObject.read())
regex = re.compile('</*docket>')
badDockets = []
counter = 0
for d in docketListAsText:
counter += 1
for m in [regex.search(d)]:
if m:
self.logger.error("****Docket # %s has %s in it:\n\t%s****" % (counter, m.group(0), d))
badDockets.append(m.group(0))
#badDockets = [m.group(0) for d in docketListAsText for m in [regex.search(d)] if m]
if badDockets == []:
return docketListAsText
else:
self.logger.info(
"There were %s dockets with '<docket>' or '</docket>' inside the docket-specific string.\n\t\t=>This file will have no output.",
len(badDockets)
)
raise JBGSyntaxError('JBGSyntaxError')
def initializeRoot():
return ET.Element("root")
def initializeLists():
self.listOfGoodDockets = []
self.listOfBadDockets = []
self.listOfBadDocketNumbers = []
#########################################################
##### MAIN PROCEDURAL BLOCK OF parseBadFileAsString #####
#########################################################
with open(myfile) as f:
initializeLists()
root = initializeRoot()
try:
docketListAsText = splitFileIntoListOfStringsOrThrowError(f,myfile)
for d in docketListAsText:
self.allDocketsCounter += 1
d.strip()
try:
tree = ET.fromstring('<docket>%s</docket>' % d)
self.goodDocketsCounter += 1 #has to be after parse or we will count bad dockets here as well
root.append(tree)
self.listOfGoodDockets.append(tree)
except ET.XMLSyntaxError:
self.badDocketsCounter += 1
self.logger.info(
" --> XMLSyntaxError for docket # %s", self.allDocketsCounter
)
self.listOfBadDocketNumbers.append(self.allDocketsCounter)
self.listOfBadDockets.append(d)
except JBGSyntaxError:
pass
self.logger.info("Total number of all dockets in this file was %s", self.allDocketsCounter)
self.logger.info("Total number of good dockets in this file was %s", self.goodDocketsCounter)
self.logger.info("Total number of bad dockets in this file was %s", self.badDocketsCounter)
self.logger.info(
"List of bad dockets' text starts on next line:\n" +
'\n'.join(["Next bad docket is number %s:\n\t%s" % (self.listOfBadDocketNumbers[index], badDocket) for index,badDocket in enumerate(self.listOfBadDockets)])
# '\n'.join(['Next bad docket is number ' + self.listOfBadDocketNumbers[index] + ':\n\t' + badDocket for index,badDocket in self.listOfBadDockets])
)
return ET.ElementTree(root) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def threatCheck(file):\n\tglobal threatList\n\tthreatList = []\n\twith open(file) as f:\n\t\tfor line in f.readlines():\n\t\t\tentry = line.split(',')\n\t\t\tif entry[3] == 'Vulnerable' or entry[3] == 'Endangered' or entry[3] == 'Critically Endangered':\n\t\t\t\tthreatList.append(entry[2])\n\treturn len(threatList)",
"def recipe12_8():\n from xml.parsers.xmlproc import utils, xmlval, xmldtd\n def validate_xml_file(xml_filename, app=None, dtd_filename=None):\n # build validating parser object with appropriate error handler\n parser=xmlval.Validator()\n parser.set_error_handler(utils.ErrorPrinter(parser))\n if dtd_filename is None:\n # DTD fiel specified, laod and set it as the DTD to use\n dtd=xmldtd.load_dtd(dtd_filename)\n parser.val.dtd = parser.dtd = parser.ent = dtd\n if app is not None:\n # Application processing requested, set application object\n parser.set_application(app)\n # everything being set correctly, finally perform the parsing\n parser.parse_resource(xml_filename) \n # if XML data is in a string s, use instead\n # parser.feed(s)\n # parser.close(s)",
"def ValidateXML(file):\n #TODO validate against DTD\n re_escape_quotes=re.compile('\"')\n s=re_escape_quotes.sub('\\\\\"', f)\n return getstatusoutput(\"echo \\\"%s\\\" | xmllint --valid - 2>&1 > /dev/null\" % s)[0]",
"def correct(filename):\n with open(filename) as f:\n html = f.read()\n lines = html.split('\\n')\n\n for line in lines:\n l = re.findall(r'<[^>]+>', line)\n if len(l) == 1: continue # deal with lines containing only 1 tag\n correct = check_tags(l)\n if not correct:\n print \"Incorrect:\", line",
"def __read_white_list(file_path):\n assert os.path.exists(file_path), 'FILE \"{}\" NOT FOUND,' \\\n ' PLEASE GIVE THE CORRECT FILE PATH'.format(file_path)\n white_list = []\n my_file = open(file_path, 'r')\n for line in my_file.readlines():\n white_list.append(''.join(line.split('\\n')))\n return white_list",
"def get_bad_word_list(self):\n self.bad_words = self.read_word_file(self.bad_word_file)",
"def validate(file_in) :\n\tname = str(file_in.name)\n\tif name[-4:] != \".xml\" and name[-4:] != \".XML\" :\n\t\treturn False\n\txsd = open('wcdb/WorldCrises.xsd.xml', 'r')\n\txmlFile = open('wcdb/temp.xml', 'w')\n\txmlFile.write(file_in.read())\n\txmlFile = open('wcdb/temp.xml', 'r')\n\ttry:\n\t\tpsvi = pyxsval.parseAndValidate(\"wcdb/temp.xml\",\n\t\t\t\"wcdb/WorldCrises.xsd.xml\", xmlIfClass=pyxsval.XMLIF_ELEMENTTREE)\n\t\ttree = psvi.getTree()\n\texcept pyxsval.XsvalError, e:\n\t\treturn 'Validation aborted. ' + str(e)\n\texcept GenXmlIfError, e:\n\t\treturn 'Parsing aborted. ' + str(e)\n\texcept Exception as e:\n\t\t# catch all\n\t\treturn 'Exception. ' + str(e)\n\t#handle invalid case\n\treturn tree",
"def from_xml(xml_file):\r\n\tevents = []\r\n\txmldoc = minidom.parse(xml_file)\r\n\teventlist = xmldoc.getElementsByTagName('event')\r\n\tfor e in eventlist:\r\n\t\ttry:\r\n\t\t\tevents.append(parseEvent(e))\r\n\t\texcept ValueError:\r\n\t\t\tprint \"Event is being skipped!\"\r\n\treturn events",
"def parse_xml(content, file_name):\n\n patents = []\n\n xml_block = ''\n for line in iter(content):\n line_str = line.decode()\n if line_str.startswith('<?xml') and xml_block != \"\": # this means we are done with one xml block/patent\n\n patents.append(parse_single_patent(xml_block, file_name))\n xml_block = \"\"\n\n else:\n xml_block += line_str\n\n patents.append(parse_single_patent(xml_block, file_name))\n return patents",
"def read_xml(filename, from_string=False, write_to_file=False):\n # TODO: write the stripped xml to file for OBT (Oslo Bergen Tagger)\n print 'reading xml'\n if write_to_file:\n pass\n\n if from_string:\n try:\n tree = ElTree.fromstring(filename)\n no_tags = ElTree.tostring(tree, encoding='utf8', method='text')\n except Exception as inst:\n return {'Error reading xml': inst}\n else:\n tree = etree.parse(filename)\n no_tags = etree.tostring(tree, encoding='utf-8', method='text')\n no_tags = re.sub(ur'[^a-zA-Z0-9]', ' ', no_tags, re.UNICODE)\n\n return create_word_list(no_tags)",
"def test_parse_restricted_tags():\n invalid_tags = {'*', '**', '***', 'a*', '*a', 'a*a*', '*a*a', '*aa*', 'a**a', '}'}\n combined_tags = valid_tags | invalid_tags\n\n # Function under test\n resultant_tags = searchtag.parse_restricted_tags(\" \".join(combined_tags))\n\n # Verify that we have the tags in the valid list\n assert resultant_tags == valid_tags",
"def extract_cuewords(cuewords, xml_file_path):\n\n try:\n file_output = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE, 'w', encoding='utf8')\n file_output_pos_tagged = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE_POS_TAGGED,\n 'w', encoding='utf8')\n\n except FileNotFoundError:\n print('Please set correct filenames')\n\n # Empty lists for collecting data per file\n cueword_ids = []\n cuewords = []\n\n # Empty list to collect data for all files\n all_cuewords = []\n all_cuewords_pos_tagged = []\n\n print('Extracting cuewords from:', xml_file_path, 'to:', CUEWORDS_DATA_PATH+CUEWORDS_FILE)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n file_input = open(file, 'r', encoding='utf8')\n file_input = BeautifulSoup(file_input, 'xml')\n\n # Collect frames, get ids\n for frame in file_input.find_all('frame', {'name' : NEGATION_FRAME_NAME}):\n for target in frame.find_all('target'):\n for fenode in target.find_all('fenode'):\n cueword_ids.insert(0, fenode.get('idref'))\n\n # Find all splitwords\n for splitword in file_input.find_all('splitword'):\n cueword_ids.insert(0, splitword.get('idref'))\n\n # Find all terminals, check if its ID is in cueword_ids[]\n for terminal in file_input.find_all('t'):\n if terminal.get('id') in cueword_ids:\n all_cuewords.insert(0, terminal.get('word').lower())\n all_cuewords_pos_tagged.insert(0, terminal.get('word').lower()+\n '\\t'+terminal.get('pos'))\n\n # clear list for next document\n cueword_ids = []\n cuewords = []\n\n # Sort final list\n all_cuewords = sorted(set(all_cuewords))\n all_cuewords_pos_tagged = sorted(set(all_cuewords_pos_tagged))\n\n # Write cuewords without duplicates to file:\n for cueword in all_cuewords:\n file_output.write(cueword+'\\n')\n\n for cueword in all_cuewords_pos_tagged:\n file_output_pos_tagged.write(cueword+'\\n')\n\n file_output.close()\n file_output_pos_tagged.close()\n\n print('Cuewords extracted to:', file_output.name)\n print('Cuewords extracted and POS tagged to:', file_output_pos_tagged.name)\n print('Done!')",
"def get_tags_from_file(tag_file):\n tags = []\n with open(tag_file) as f:\n lines = f.readlines()\n for line in lines:\n if not line.startswith(\" \") and not line.startswith(\"\\n\"):\n tag = line.strip()\n tags.append(tag)\n return tags",
"def parse_requirements(filename):\r\n lineiter = (line.strip() for line in open(filename))\r\n return [line for line in lineiter if line and not line.startswith(\"#\")]",
"def parse_requirements(filename):\n lines = (line.strip() for line in open(filename))\n return [line.strip() for line in lines if line and not line.strip().startswith(\"#\")]",
"def read_gdbot_file(str_infile):\r\n lst_para = list()\r\n lst_good_rules = list()\r\n lst_bad_rules = list()\r\n with open(str_infile, 'r') as f:\r\n for line_raw in f:\r\n dic_r = str_to_rule(line_raw)\r\n if dic_r['valid']:\r\n if dic_r['type']=='rule':\r\n lst_good_rules.append(dic_r)\r\n elif dic_r['type']=='para':\r\n lst_para.append(dic_r)\r\n elif dic_r['type']=='null':\r\n pass # various forms of empty lines, comments, etc.\r\n else:\r\n lst_bad_rules.append(dic_r)\r\n else:\r\n lst_bad_rules.append(dic_r)\r\n if len(lst_bad_rules)>0:\r\n for rule in lst_bad_rules:\r\n log.warning('Counted bad lines: '+str(len(lst_bad_rules)))\r\n return (lst_para,lst_good_rules,lst_bad_rules)",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith((\"#\", \"--\"))]",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith('#')]",
"def _check_xml_syntax_error(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True",
"def wellformedness_check(tokens):\n stack = []\n for token in tokens:\n if token.is_a(Start):\n stack.append(token)\n elif token.is_a(End):\n try:\n start = stack.pop()\n except IndexError:\n raise WellformednessError('Extra end tag found: \"%s\"' % token.xml)\n if start.name != token.name:\n raise WellformednessError('\"%s\" matched by \"%s\"' % (start.xml, token.xml))\n elif token.is_a(Error):\n raise MarkupError(token.xml + tokens.next().xml)\n yield token",
"def parse_wos_xml(fp, good_cf, bad_cf, global_year=None):\n events = (\"start\", \"end\")\n tree = cET.iterparse(fp, events)\n context = iter(tree)\n event, root = next(context)\n rec_ = \"REC\"\n it = 0\n\n for event, pub in context:\n if event == \"end\" and pub.tag == rec_:\n ans = parse_record(pub, global_year)\n if ans[0]:\n good_cf.push(ans[1])\n else:\n msg = (\n \" parse_wos_xml() : wos_id {0} failed \"\n \"to parse, placed in the bad heap\".format(ans[1][\"id\"])\n )\n logging.error(msg)\n bad_cf.push(ans[1])\n if good_cf.stop():\n break\n root.clear()\n it += 1",
"def parse_requirements(filename):\n try:\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n except OSError:\n return []",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not\n line.startswith(\"#\")]",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]",
"def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]",
"def _check_sanity(self, cands: List[str], n_space: int):\n for cand in cands:\n # Count the number of space special character\n if cand.count(\"▁\") != n_space:\n continue\n # Check whether candidate XML is valid\n try:\n etree.fromstring(cand)\n return cand\n except:\n continue\n return False",
"def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data",
"def get_all_data(file, list): \n list = []\n with open(file, \"r\") as list:\n list = [row for row in list if len(row.strip()) > 0]\n return list",
"def _ProcessTagsForFileUse(self) -> List[str]:\n return list(self.tags)"
]
| [
"0.54615986",
"0.54525554",
"0.54438585",
"0.5365079",
"0.5323219",
"0.53079414",
"0.529013",
"0.52852976",
"0.5273892",
"0.5187629",
"0.5176548",
"0.51764387",
"0.5174369",
"0.51624626",
"0.5141824",
"0.51382256",
"0.5135575",
"0.51249886",
"0.51228625",
"0.51198125",
"0.51041204",
"0.5104024",
"0.510239",
"0.5094995",
"0.5094995",
"0.5094995",
"0.50943077",
"0.50657874",
"0.501454",
"0.50136924"
]
| 0.69328964 | 0 |
Overriding a forms' __init__ method to perform validation checks on the verb category level within the taxonomy | def initialise(self, FormClass, *args, **kwargs):
self.taxonomy = kwargs.pop('taxonomy', None)
# The original/old category, for update view case
self.old_category = kwargs.pop('category', None)
super(FormClass, self).__init__(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, *args, **kwargs):\n super(CategoryForm, self).__init__(*args, **kwargs)\n\n if self.initial.get('protected'):\n self.fields['name'].disabled = True\n self.fields['protected'].disabled = True",
"def clean_level_(self):\n try:\n # Get the verb categories of the taxonomy\n verb_cats = VerbCategory.objects.filter(taxonomy=self.taxonomy)\n except Taxonomy.DoesNotExist:\n raise Http404('The taxonomy does not exist!')\n else:\n\n # Check categories for the entered level value\n submitted_level = self.cleaned_data.get('level', None)\n\n # if updating, need to allow the original level value to be re-entered\n old_level = None if not self.old_category else self.old_category.level\n\n if submitted_level in [cat.level for cat in verb_cats.all()\\\n if cat.level != old_level]:\n culprit = verb_cats.get(level=submitted_level)\n raise forms.ValidationError(f'The verb category \"{culprit.title}\" \\\n already has this value!')\n\n return submitted_level",
"def __init__(self, *args, **kwargs):\n\n self._caffe = kwargs.pop('caffe')\n\n kwargs.setdefault('label_suffix', '')\n super(FullExpenseForm, self).__init__(*args, **kwargs)\n self.fields['expense'].label = 'Przeznaczenie'\n self.fields['amount'].label = 'Kwota'\n self.fields['expense'].empty_label = None\n self.fields['expense'].queryset =\\\n Expense.objects.filter(caffe=self._caffe)",
"def __init__(self, *args, **kwargs):\n\n self._caffe = kwargs.pop('caffe')\n\n kwargs.setdefault('label_suffix', '')\n super(ExpenseForm, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Nazwa'\n self.fields['company'].label = 'Firma'\n self.fields['company'].required = False\n self.fields['company'].queryset =\\\n Company.objects.filter(caffe=self._caffe)",
"def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]",
"def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')",
"def __init__(field, form, content):",
"def __init__(field, form, content):",
"def __init__(self, cat_name):\n super(Cat, self).__init__(cat_name)",
"def clean(self):\n # Do not use self.long_slug here, it might don't exists yet,\n # use self._make_long_slug() instead.\n try:\n category_list = Category.objects.filter(\n site=self.site,\n blog=self.blog,\n long_slug=self._make_long_slug()\n )\n except ObjectDoesNotExist:\n pass\n else:\n if self.pk:\n category_list = category_list.exclude(pk=self.pk)\n if category_list.exists():\n raise ValidationError(\n _('The slug chosen already exists. Please try another.'))\n finally:\n super(Category, self).clean()",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['cd_identifier'].required = True\n self.fields['library'].required = True",
"def clean(self):\n return super(CharacterSkillForm, self).clean()",
"def test_category_form_class(self):\n # Check that we can import CategoryForm.\n import rango.forms\n self.assertTrue('CategoryForm' in dir(rango.forms), f\"{FAILURE_HEADER}The class CategoryForm could not be found in Rango's forms.py module. Check you have created this class in the correct location, and try again.{FAILURE_FOOTER}\")\n\n from rango.forms import CategoryForm\n category_form = CategoryForm()\n\n # Do you correctly link Category to CategoryForm?\n self.assertEqual(type(category_form.__dict__['instance']), Category, f\"{FAILURE_HEADER}The CategoryForm does not link to the Category model. Have a look in the CategoryForm's nested Meta class for the model attribute.{FAILURE_FOOTER}\")\n\n # Now check that all the required fields are present, and of the correct form field type.\n fields = category_form.fields\n\n expected_fields = {\n 'name': django_fields.CharField,\n 'views': django_fields.IntegerField,\n 'likes': django_fields.IntegerField,\n 'slug': django_fields.CharField,\n 'type' : django_fields.CharField,\n 'description' : django_fields.CharField,\n 'recommend_buy' : django_fields.IntegerField,\n 'url' : django_fields.URLField,\n 'picture' : django_fields.ImageField,\n }\n\n for expected_field_name in expected_fields:\n expected_field = expected_fields[expected_field_name]\n\n self.assertTrue(expected_field_name in fields.keys(), f\"{FAILURE_HEADER}The field '{expected_field_name}' was not found in your CategoryForm implementation. Check you have all required fields, and try again.{FAILURE_FOOTER}\")\n self.assertEqual(expected_field, type(fields[expected_field_name]), f\"{FAILURE_HEADER}The field '{expected_field_name}' in CategoryForm was not of the expected type '{type(fields[expected_field_name])}'.{FAILURE_FOOTER}\")",
"def test_valid_form(self):\n\n data = {'category': ['103','109'] }\n form = CategoriesForm(data=data)\n self.assertTrue(form.is_valid())",
"def __init__(self, serverID, *args, **kwargs):\n super(AddAttributeForm, self).__init__(*args, **kwargs)\n types = []\n\n # Get a list of attributetypes we cant have more than one of..\n serverObj = get_object_or_404(Server, id=serverID)\n ignoreList = serverObj.attributemapping_set.filter(attributeType__multiple_allowed=False)\n ignoredAttributes = [ a.attributeType.id for a in ignoreList ]\n\n for t in AttributeType.objects.all().order_by('name'):\n if t.id not in ignoredAttributes:\n types.append((t.id, t.name))\n validTypes = tuple(types)\n self.fields['attrtype'] = forms.ChoiceField(label='Type', choices=validTypes)\n self.fields['value'] = forms.CharField(max_length=255)",
"def __init__(self, *args, **kwargs):\n\n self._caffe = kwargs.pop('caffe')\n\n kwargs.setdefault('label_suffix', '')\n super(CompanyForm, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Nazwa'",
"def __init__(self, *args, **kwargs):\n\n\t\tsuper(CustomStatusFormset, self).__init__(*args, **kwargs)\n\n\t\tfor form in self.forms:\n\t\t\tfor field in form.fields:\n\t\t\t\tform.fields[field].widget.attrs.update({'class': 'form-control'})",
"def __init__(self, *args, **kwargs):\n super(EnterpriseCustomerAdminForm, self).__init__(*args, **kwargs)\n\n self.fields['catalog'] = forms.ChoiceField(\n choices=self.get_catalog_options(),\n required=False,\n help_text=\"<a id='catalog-details-link' href='#' target='_blank'\"\n \"data-url-template='{catalog_admin_url}'> View catalog details.</a>\".format(\n catalog_admin_url=utils.get_catalog_admin_url_template(),\n )\n )",
"def __post_init__(self) -> None:\n arity = self.root.arity\n length = len(self.children)\n if arity != length:\n raise ValueError(\n 'Incorrect number of child terms: '\n f'Expected {arity}, found {length}'\n )",
"def test_ensure_likes_dislikes_are_positive(self):\n testFailedCheck = False\n category_params = {'name': 'n', 'views': -1, 'likes': -1, 'dislikes': -2, 'likesDislikesDefault': 0, 'slug': str(random.randint(1, 1000))}\n cat = CategoryForm(category_params)\n self.assertFalse(cat.is_valid())",
"def __init__(self, base):\n Category_realization_of_parent.__init__(self, base)",
"def __init__(self, base):\n Category_realization_of_parent.__init__(self, base)",
"def validate_form(form, collection):\r\n\r\n # variable initialization\r\n max_title = 50\r\n max_ingredients = 500\r\n max_method = 1500\r\n max_recipe_URL = 250\r\n max_servings = 100\r\n max_category_name = 50\r\n max_category_URL = 250\r\n max_review = 250\r\n error_list = []\r\n\r\n # validates recipe form\r\n if collection == 'recipe':\r\n if not form['title'] or len(form['title']) > max_title:\r\n error_list.append(\r\n 'Title must not be empty or more than {} characters!'\r\n .format(max_title)\r\n )\r\n\r\n ingredient = form['ingredients']\r\n if not ingredient or len(ingredient) > max_ingredients:\r\n error_list.append(\r\n 'Ingredients must not be empty or more than {} characters!'\r\n .format(max_ingredients)\r\n )\r\n\r\n if not form['method'] or len(form['method']) > max_method:\r\n error_list.append(\r\n 'Method must not be empty or more than {} characters!'\r\n .format(max_method)\r\n )\r\n\r\n if 'appliance_categories' not in form:\r\n error_list.append(\r\n 'At least one of the appliances should be checked!'\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_recipe_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!!'\r\n .format(max_recipe_URL)\r\n )\r\n\r\n try:\r\n if not form['servings'] or int(form['servings']) > max_servings:\r\n error_list.append(\r\n 'Servings must not be empty or more than {}!'\r\n .format(max_servings)\r\n )\r\n\r\n except ValueError:\r\n error_list.append('Servings is not a number!')\r\n\r\n # validates recipe category form\r\n elif collection == 'recipe_category':\r\n if not form['name'] or len(form['name']) > max_category_name:\r\n error_list.append(\r\n 'Category name must not be empty or more than {} characters!'\r\n .format(max_category_name)\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_category_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!'\r\n .format(max_category_URL)\r\n )\r\n\r\n # validates review form\r\n elif collection == 'review':\r\n if not form['review'] or len(form['review']) > max_review:\r\n error_list.append(\r\n 'Review must not be empty or more than {} characters!'\r\n .format(max_review)\r\n )\r\n\r\n # returns errors on an empty list\r\n return error_list",
"def test_check_category_input_2(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(3, choices) == False",
"def __init__(self, handler=None, formdata=None, obj=None, prefix='', **kwargs):\n if handler:\n self._handler = handler\n super(Form, self).__init__(formdata=TornadoInputWrapper(self._handler), obj=obj, prefix=prefix, **kwargs)",
"def __init__(self):\n self.clean_optional()",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super(TaxonomyCreateView, self).form_valid(form)",
"def __validate__(self):",
"def test_required_term(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})",
"def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )"
]
| [
"0.6305087",
"0.6184841",
"0.5756757",
"0.5699593",
"0.567558",
"0.56258833",
"0.53758687",
"0.53758687",
"0.52496165",
"0.52444685",
"0.5201543",
"0.5200204",
"0.51989377",
"0.51971245",
"0.5184488",
"0.51700205",
"0.5154692",
"0.5148399",
"0.5117388",
"0.5108232",
"0.50853014",
"0.50853014",
"0.50826395",
"0.5075775",
"0.5071255",
"0.50696844",
"0.50642544",
"0.50642127",
"0.50619674",
"0.5061751"
]
| 0.7239037 | 0 |
Ensure the user does not input a level value equal to that of a preexisting verb category of the taxonomy | def clean_level_(self):
try:
# Get the verb categories of the taxonomy
verb_cats = VerbCategory.objects.filter(taxonomy=self.taxonomy)
except Taxonomy.DoesNotExist:
raise Http404('The taxonomy does not exist!')
else:
# Check categories for the entered level value
submitted_level = self.cleaned_data.get('level', None)
# if updating, need to allow the original level value to be re-entered
old_level = None if not self.old_category else self.old_category.level
if submitted_level in [cat.level for cat in verb_cats.all()\
if cat.level != old_level]:
culprit = verb_cats.get(level=submitted_level)
raise forms.ValidationError(f'The verb category "{culprit.title}" \
already has this value!')
return submitted_level | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_input__(self):\n # | - __check_input__\n tmp = set(self.tree_level_labels)\n input_diff = tmp.symmetric_difference(self.level_entries.keys())\n if not input_diff == set():\n undefined_labels = []\n for i in input_diff:\n undefined_labels.append(i)\n\n print(\"\\n\")\n message = \"Did not fill out level entries dict properly\" + \"\\n\"\n message += \"The following properties need to be defined\" + \"\\n\"\n message += str(undefined_labels)\n raise ValueError(message)\n # __|",
"def test_ensure_likes_dislikes_are_positive(self):\n testFailedCheck = False\n category_params = {'name': 'n', 'views': -1, 'likes': -1, 'dislikes': -2, 'likesDislikesDefault': 0, 'slug': str(random.randint(1, 1000))}\n cat = CategoryForm(category_params)\n self.assertFalse(cat.is_valid())",
"def test_check_category_input_2(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(3, choices) == False",
"def test_does_not_have_value(self) -> None:\n self.assertFalse(LogLevels.has_value(1))",
"def test_check_category_input_1(self):\n assert validation.check_category_input(1, []) == False",
"def check_level(ctx, param, value):\n try:\n return int(value)\n except ValueError:\n return value.upper()",
"def check_level(level, n_level):\n if level is None:\n level = list(range(1, n_level + 1))\n else:\n if isinstance(level, int):\n if 0 < level <= n_level:\n level = [level]\n else:\n raise ValueError(\n \"Level is out of range: [1:{}]!\".format(n_level))\n elif isinstance(level, list) or isinstance(level, tuple):\n level = [(i if (0 < i <= n_level) else 0) for i in level]\n if 0 in level:\n raise ValueError(\n \"Level is out of range: [1:{}]!\".format(n_level))\n else:\n raise ValueError(\"Level-input format is incorrect!\")\n return level",
"def show_terms_if_not_agreed(context, slug=DEFAULT_TERMS_SLUG, field=DEFAULT_HTTP_PATH_FIELD):\n request = context['request']\n terms = TermsAndConditions.get_active(slug)\n agreed = TermsAndConditions.agreed_to_terms(request.user, terms)\n\n # stop here, if terms has been agreed\n if agreed:\n return {}\n\n # handle excluded url's\n url = urlparse(request.META[field])\n protected = is_path_protected(url.path)\n\n if (not agreed) and terms and protected:\n return {'terms': terms}\n\n return {}",
"def test_None_no_change(self, treant):\n treant.categories['bark'] = 'smooth'\n treant.categories['bark'] = None\n\n assert treant.categories['bark'] == 'smooth'\n\n treant.categories.add(bark=None)\n\n assert treant.categories['bark'] == 'smooth'",
"def test_blank_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)",
"def _verify_basic_categories(self):\n for cat in CATEGORIES:\n if not Category.objects.filter(name=cat).exists():\n self.add_category(cat)",
"def test_required_level_of_education_missing(self):\r\n self.url_params['level_of_education'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'A level of education is required',\r\n )",
"def test_category_invalid(self):\n # wiki and questions\n ques = QuestionFactory(title=u'q1 audio')\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n d1 = DocumentFactory(\n title=u'd1 audio',\n locale=u'en-US',\n category=10,\n is_archived=False,\n tags=[u'desktop'])\n ApprovedRevisionFactory(document=d1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(2, json.loads(response.content)['total'])",
"def set_LevelLimit(self, value):\n super(GetCategoriesInputSet, self)._set_input('LevelLimit', value)",
"def input_level():\n\tlevel_instructions = \"\\n\" + \"Please select a game difficulty by typing it in!\" + \"\\n\"\n\tlevel_options = [\"easy\", \"medium\", \"hard\"]\n\terror_level = \"\\n\" + \"That's not an option! \" + \"\\n\"\n\tlevel = \"\" \n\tprint level_instructions\n\tlevel = raw_input('Possible choices include: easy, medium, or hard: ')\n\tlevel = level.lower()\n\twhile check_level(level, level_options) == None:\n\t\tprint \"\\n\" + error_level + \"\\n\"\n\t\tprint \"\\n\" + level_instructions + \"\\n\"\n\t\tlevel = raw_input('Possible choices include: easy, medium, or hard: ')\n\t\tcheck_level(level, level_options)\n\tprint \"\\n\" + \"You've chosen %s!\" % level\n\tlevel_index = level_options.index(level)\n\treturn level_index",
"def test_general_subset_invalid_level():\n pass",
"def is_allowed_to_have_child_terms(self):\n return self._is_allowed_to_have_child_terms",
"def validate_blank(level, blanks):\n for blank in blanks:\n if blank in level:\n return blank\n return None",
"def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)",
"def test_get_depth_category(self):\n self.assertEqual(self.category.get_depth(), 0)",
"def remove_category(self,cat):\n if isinstance(cat,Category):\n cat = cat.name\n if cat.name in self.__categories:\n del self.__categories[cat.name]\n return True\n else:\n warnings.warn('Category \\'{}\\' is not in Gradebook.'.format(cat.name))\n return False",
"def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)",
"def normalize(level):\n if level in LogLevel.levels:\n return level\n for l in LogLevel.levels:\n if l[0] == level:\n return l\n raise ValueError('invalid log level')",
"def _coerceLevel(self,level):\n result = level\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n level = level.lower()\n result = Trace.traceLevels.get(level)\n # Need an explicit test for None in the following if condition because\n # trace levels \"none\" and \"off\" map to a level with a value of 0\n if (result == None):\n raise TraceLevelException(\"Unknown trace level: %s Valid trace levels: %s\" % (level,Trace.traceNames))\n #endIf\n #endIf\n return result",
"def test_check_category_input_3(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(2, choices) == True",
"def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level",
"def set_threshold_levels(self, event_name, val):\n if self.validate_supply_name(event_name, \"events/\") and val:\n self.console.runcmd(f\"echo {val} > events/{event_name}\")\n else:\n assert (\n False\n ), \"A valid event name or the value, is not given while setting levels\"",
"def test_add_category_with_wrong_perms(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=4).save()\n response = self.client.post('/categories/add', {}, follow=True)\n self.assertTemplateUsed(response, 'unauthorized.html')",
"def choose_level():\n level_choices = [\"html\", \"css\", \"python\"]\n while True:\n player_prompt = (\"\"\"\\n Please type in (and then press enter) your choice of quiz topic:\n html\n css\n python\n -> \"\"\")\n user_choice = raw_input(player_prompt).lower()\n if user_choice not in level_choices:\n print \" Sorry, I didn't catch that.\"\n else:\n level = user_choice\n player_prompt = \" You chose the {} quiz! Are you sure? (Y/N) \".format(level)\n user_choice = raw_input(player_prompt).lower()\n if user_choice == \"y\" or user_choice == \"yes\":\n print \"\\n Welcome to the {} questions!\".format(level)\n return level",
"def category_choice_input(self):\n self.category = input(fr.FR[8])\n try:\n if self.category == \"q\":\n self.leave_category_choice -= 1\n elif 1 <= int(self.category) <= len(config.CATEGORIES):\n print(self.category)\n self.products = self.product_table.get_list_product(\n self.category)\n self.products_sub = self.product_table.get_list_product(\n self.category)\n self.choice_product()\n self.leave_category_choice -= 1\n except ValueError:\n print(fr.FR[10])"
]
| [
"0.5470051",
"0.5153077",
"0.5141412",
"0.50964123",
"0.50700456",
"0.5001281",
"0.4957744",
"0.49151474",
"0.48706883",
"0.4863167",
"0.48525065",
"0.4838317",
"0.48381013",
"0.48132944",
"0.48017558",
"0.4790142",
"0.47901177",
"0.47769713",
"0.47293702",
"0.4697029",
"0.466129",
"0.4659292",
"0.4650231",
"0.4646461",
"0.4630078",
"0.46221924",
"0.4615904",
"0.46111324",
"0.460571",
"0.46020976"
]
| 0.7883397 | 0 |
dexterity should be installed when we install documentgenerator | def test_dexterity_is_dependency_of_documentgenerator(self):
dependencies = self.portal.portal_setup.getProfileDependencyChain('collective.documentgenerator:default')
self.assertTrue(u'profile-plone.app.dexterity:default' in dependencies) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)",
"def documento():\r\n\tpass",
"def setup_doc(request):\n if not request.user.is_superuser:\n messages.info(request, \"Logon to set up VNS\")\n return HttpResponseRedirect('/login/')\n do_setup_doc()\n messages.info(request, \"Refreshed documentation\")\n return HttpResponseRedirect('/')",
"def generate(env):\n## doxyfile_scanner = env.Scanner(## DoxySourceScan,\n## \"DoxySourceScan\",\n## scan_check = DoxySourceScanCheck,\n##)\n\n if targz.exists(env):\n srcdist_builder = targz.makeBuilder(srcDistEmitter)\n\n env['BUILDERS']['SrcDist'] = srcdist_builder",
"def docs():",
"def miktex_install_deps():\n raise NotImplementedError",
"def test_doc():\n pass",
"def document(self):\n ...",
"def documentation_only():\n pass",
"def test_docdir(self):\n self.chck_triple('docdir')",
"def Init(self, *args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Init(self, *args)",
"def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)",
"def _create_documenter(env: sphinx.environment.BuildEnvironment,\n documenter_cls: Type[sphinx.ext.autodoc.Documenter],\n name: str) -> sphinx.ext.autodoc.Documenter:\n bridge = _FakeBridge(env)\n documenter = documenter_cls(bridge, name)\n assert documenter.parse_name()\n assert documenter.import_object()\n if documenter_cls.objtype == 'class':\n bridge.genopt['special-members'] = [\n '__eq__',\n '__getitem__',\n '__setitem__',\n # '__hash__',\n '__init__',\n '__class_getitem__',\n '__call__',\n '__array__',\n ]\n try:\n documenter.analyzer = sphinx.pycode.ModuleAnalyzer.for_module(\n documenter.get_real_modname())\n # parse right now, to get PycodeErrors on parsing (results will\n # be cached anyway)\n documenter.analyzer.find_attr_docs()\n except sphinx.pycode.PycodeError:\n # no source file -- e.g. for builtin and C modules\n documenter.analyzer = None\n return documenter",
"def docx():\n env.file_ext = \".docx\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --bibliography={bib_file} --csl={csl_file} --toc\".format(**env))",
"def get_documentation(self, *args, **dargs):\n pass",
"def has_doc() -> None:",
"def _ensure_ctypesgen(self):\n try:\n subprocess.check_call(\n ['ctypesgen.py', '--help'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError:\n sys.stderr.write(\n 'ctypesgen.py not found in $PATH, attempting installation'\n )\n install_package(['ctypesgen'])\n except subprocess.CalledProcessError:\n sys.stderr.write(\n 'ctypesgen.py is installed, but not functioning properly, '\n 'consider reinstalling.\\n'\n )\n sys.exit(1)",
"def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()",
"def test() -> None:\n docx2python(\"resources/example.docx\")",
"def test_noDocumentsFound(self):\n self.assertRaises(\n NoDocumentsFound,\n self.builder.build, \"1.2.3\", self.howtoDir, self.howtoDir,\n self.templateFile)",
"def setup(self):\n pass # pragma: no cover",
"def test_buildPDF(self):\n bookPath = self._setupTeXFiles()\n outputPath = FilePath(self.mktemp())\n\n builder = BookBuilder()\n builder.buildPDF(bookPath, self.howtoDir, outputPath)\n\n self.assertTrue(outputPath.exists())",
"def deploy_sphinx_docs():\n require('docs_root', 'docs_install_dir')\n sphinx.build_html_docs(env.docs_root)\n sudo('mkdir -p {}'.format(env.docs_install_dir))\n sphinx.deploy_html_docs(env.docs_root,\n env.docs_install_dir)",
"def Setup(self):\n return True",
"def consistent_documentation():\n\n return 3",
"def test(self):\n self.skipped_test('doctest module has no DocTestSuite class')",
"def install_deps():\n dist = check_distribution()\n if dist == Distribution.TEXLIVE:\n texlive_install_deps()\n elif dist == Distribution.MIKTEX:\n miktex_install_deps()\n\n install_pygments()",
"def fini_doc(self):\n raise NotImplementedError()",
"def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")",
"def genlangs(self):\r\n raise NotImplementedError"
]
| [
"0.5717058",
"0.5526926",
"0.5411139",
"0.53990114",
"0.5289636",
"0.5268645",
"0.523585",
"0.5222095",
"0.52014726",
"0.5199993",
"0.51526654",
"0.5081674",
"0.5065004",
"0.50619376",
"0.50328755",
"0.5009421",
"0.50085336",
"0.5003667",
"0.49927482",
"0.4989397",
"0.49832636",
"0.49766886",
"0.49504185",
"0.4940791",
"0.4938723",
"0.4934319",
"0.49188784",
"0.49187735",
"0.48952246",
"0.48933962"
]
| 0.6871886 | 0 |
z3cform.datagridfield should be installed when we install documentgenerator | def test_z3cformdatagridfield_is_dependency_of_documentgenerator(self):
dependencies = self.portal.portal_setup.getProfileDependencyChain('collective.documentgenerator:default')
self.assertTrue(u'profile-collective.z3cform.datagridfield:default' in dependencies) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def db_fields(self):",
"def __init__(self):\n self.fields = [ \n \n# plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n# default=\"/\"),\n# plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n# plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]",
"def dtype() -> ExtensionDtype:\n return skfda.representation.grid.FDataGridDType(\n grid_points=[\n np.arange(10),\n np.arange(10) / 10,\n ],\n dim_codomain=3,\n )",
"def define_fields(cls, dbmanager):\n return []",
"def addProductFields(form, forCreation=False, restWriter=None, hasOptions=False):\n form.addField('code', formal.String(required=True, strip=True))\n form.addField('title', formal.String(required=True, strip=True))\n\n images = formal.Group('images')\n form.add( images )\n images.add( formal.Field('mainImage', formal.File(required=forCreation), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n images.add( formal.Field('ndgrad', formal.File(), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n\n\n availability = formal.Group('availability')\n form.add( availability )\n\n availability.add( formal.Field('show', formal.Boolean()))\n availability.add( formal.Field('available', formal.Boolean()) )\n availability.add( formal.Field('availabilityDescription', formal.String()) )\n\n metadata = formal.Group('metadata')\n form.add( metadata )\n\n metadata.add( formal.Field('date', formal.Date(), formal.widgetFactory(formal.DatePartsInput, dayFirst=True)))\n metadata.add( formal.Field('location', formal.String()) )\n \n lensOptions = [\n \"80mm Schneider Super Symmar XL f/4.5\",\n \"110mm Schneider Super Symmar XL f/5.6\",\n \"150mm Rodenstock Sironar S f/5.6\",\n \"240mm Fujinon A f/9\",\n \"360mm Nikkor T*ED f/8\",\n \"360mm Nikkor T*ED f/11\",\n ]\n metadata.add( formal.Field('lens', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=lensOptions) ) )\n \n # this is a redundant field... need to remove if possible\n metadata.add( formal.Field('speedaperture', formal.String()) )\n \n speedOptions = ['1/500', '1/250','1/125','1/60','1/30','1/15','1/8','1/4','1/2','1s','2s','4s','8s','15s','30s','1m','2m']\n metadata.add( formal.Field('speed', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=speedOptions),description='If you enter a text value please use the same format as the existing values e.g. 6s, 1/3, 2m' ) )\n \n \n apertureOptions = ['f/5.6','f/6.3','f/8','f/8⅓','f/8½','f/8⅔','f/16','f/16⅓','f/16½','f/16⅔','f/22','f/22⅓','f/22½','f/22⅔','f/32','f/32⅓','f/32½','f/32⅔','f/45','f/45⅓','f/45½','f/45⅔']\n metadata.add( formal.Field('aperture', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=apertureOptions) ) ) \n metadata.add( formal.Field('tiltswing', formal.String()) )\n metadata.add( formal.Field('fronttilt', formal.Integer()) )\n metadata.add( formal.Field('reartilt', formal.Integer()) )\n metadata.add( formal.Field('risefall', formal.String()) )\n ndfilters = ['0.3S','0.45S','0.6S','0.75S','0.9S','0.3H','0.45H','0.6H','0.75H','0.9H']\n metadata.add( formal.Field('ndfilters', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=ndfilters)) )\n otherfilters=['81A','81B','81C','Polariser']\n metadata.add( formal.Field('otherfilters', formal.String(), formal.widgetFactory(formal.SelectOtherChoice, options=otherfilters)) )\n\n \n \n \n data_strings = [\n (0, '-'),\n (1, '*'),\n (2, '**'),\n (3, '***'),\n (4, '****'),\n (5, '*****'),\n ] \n \n metadata.add( formal.Field('rating', formal.Integer(), formal.widgetFactory(formal.SelectChoice, options=data_strings)) )\n\n\n description = formal.Group('description')\n form.add( description )\n parsers = [('markdown','MarkDown'),('xhtml','XHTML'),('plain','Plain Text')]\n description.add( formal.Field('summary', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('description', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('categories', formal.Sequence(formal.String()), \n widgetFactory=categorieswidget.FormalCheckboxTreeMultichoice ) )\n\n\n\n if not hasOptions:\n pricing = formal.Group('pricing')\n form.add( pricing )\n pricing.add( formal.Field('price', formal.Decimal(required=True)) )\n\n\n seo = formal.Group('seo')\n form.add( seo )\n seo.add( formal.Field('titleTag', formal.String()) )\n seo.add( formal.Field('metaDescription', formal.String()) )\n seo.add( formal.Field('metaKeywords', formal.String()) )",
"def create_datamapper(self):\n self.logger.debug(\"Create data widget mapper for fields\")\n self.datamapper = QDataWidgetMapper(self)\n self.datamapper.setModel(self._parent.model_fields)\n self.datamapper.addMapping(self.lblPacketFolder, 0, \"text\") # \"text\" property name must be added for QLabel to work with QDataWidgetmapper\n self.datamapper.addMapping(self.inpProductId, 1)\n self.datamapper.addMapping(self.inpProductName, 2)\n self.datamapper.addMapping(self.editDesc, 3)\n self.datamapper.addMapping(self.editAdvice, 4)\n self.datamapper.addMapping(self.cmbProductType, 5)\n self.datamapper.addMapping(self.inpProductVer, 6)\n self.datamapper.addMapping(self.inpPackageVer, 7)\n self.datamapper.addMapping(self.sldPrio, 8)\n self.datamapper.addMapping(self.cmbLicense, 9)\n self.datamapper.addMapping(self.inpScrSetup, 10)\n self.datamapper.addMapping(self.inpScrUninstall, 11)\n self.datamapper.addMapping(self.inpScrUpdate, 12)\n self.datamapper.addMapping(self.inpScrAlways, 13)\n self.datamapper.addMapping(self.inpScrOnce, 14)\n self.datamapper.addMapping(self.inpScrCustom, 15)\n self.datamapper.addMapping(self.inpScrUserLogin, 16)\n self.datamapper.toFirst()\n\n self.logger.debug(\"Create data widget mapper for dependencies\")\n self.datamapper_dependencies = QDataWidgetMapper(self)\n self.datamapper_dependencies.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)\n self.datamapper_dependencies.setModel(self._parent.model_dependencies)\n self.datamapper_dependencies.addMapping(self.cmbDepAction, 0)\n self.datamapper_dependencies.addMapping(self.cmbDepProdID, 1)\n self.datamapper_dependencies.addMapping(self.cmbDepReqAction, 2)\n self.datamapper_dependencies.addMapping(self.cmbDepInstState, 3)\n self.datamapper_dependencies.addMapping(self.cmbDepRequirement, 4)\n self.datamapper_dependencies.toFirst()\n\n self.logger.debug(\"Create data widget mapper for properties\")\n self.datamapper_properties = QDataWidgetMapper(self)\n self.datamapper_properties.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)\n self.datamapper_properties.setModel(self._parent.model_properties)\n self.datamapper_properties.addMapping(self.inpPropName, 0)\n self.datamapper_properties.addMapping(self.cmbPropType, 1)\n self.datamapper_properties.addMapping(self.cmbPropMulti, 2)\n self.datamapper_properties.addMapping(self.cmbPropEdit, 3)\n self.datamapper_properties.addMapping(self.inpPropDesc, 4)\n self.datamapper_properties.addMapping(self.inpPropVal, 5)\n self.datamapper_properties.addMapping(self.inpPropDef, 6)\n self.datamapper_properties.addMapping(self.cmbPropDef, 6)\n self.datamapper_properties.toFirst()",
"def get_pkg_meta_field(self, pkg, field, version=None):\n pass",
"def __init__(self):\n self.label = \"Change Field Name\"\n self.alias = \"Field name\"\n\n # List of tool classes associated with this toolbox\n self.tools = [FieldName]",
"def DatasetDictFieldWidget(field, request):\n return FieldWidget(field, DatasetDictWidget(request))",
"def __init__(self):\n self.fields = [ \n \n #plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n # default=\"/\"),\n #plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n #plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n #plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]",
"def __init__(self):\n self.fields = [ \n \n #plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n # default=\"/\"),\n #plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n #plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n #plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]",
"def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field",
"def setup(app: Sphinx):\n\n # Sphinx 0.5 support\n if sphinx.__version__.startswith('0.5'):\n app.add_directive('exceltable', ExcelTableDirective, 0, (0, 0, 0))\n else:\n app.add_directive('exceltable', ExcelTableDirective)",
"def mk_custom_field(self):\n msg = \"Do you want to generate a minefield with these options?\"\n self.popup.set_text(msg)\n self.popup.set_title(\"GENERATE FIELD?\")\n self.set_focused_ui(self.popup)\n self.popup.set_enabled(True)",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def define_fields(cls, dbmanager):\n\n # ATTN: UNFINISHED\n fieldlist = [\n # standard primary id number field\n mdbfield.DbfPrimaryId('id', {\n 'label': \"The primary key and id# for this group\"\n }),\n # globally unique resource reference\n mdbmixins.dbfmixin_gobselfreference(),\n ]\n\n return fieldlist",
"def get_dashmanager_field_components(doctype):\n\tfields_list, fields_component_list = get_fields_component_list(doctype)\n\treturn {\n\t\t\"fields\" : json.dumps(fields_list),\n\t\t\"fields_components\" : json.dumps(fields_component_list)\n\t}",
"def test_set_fields():\n\n document = DocumentFactory.create(\n charfield=\"some chars\",\n textfield=\"some text\",\n decimalfield=0.0815,\n integerfield=42,\n )\n\n assert document.charfield == \"some chars\"\n assert document.textfield == \"some text\"\n assert document.decimalfield == 0.0815\n assert document.integerfield == 42",
"def create_user_defined_fielddesc(sbmfield,config,inst):\n\tel_dict = {\"alephcode\":0,\\\n\t\t\t\"marccode\":1,\\\n\t\t\t\"type\":2,\\\n\t\t\t\"size\":3,\\\n\t\t\t\"rows\":4,\\\n\t\t\t\"cols\":5,\\\n\t\t\t\"maxlength\":6,\\\n\t\t\t\"val\":7,\\\n\t\t\t\"fidesc\":8,\\\n\t\t\t\"cd\":9,\\\n\t\t\t\"md\":10,\\\n\t\t\t\"modifytext\":11,\\\n\t\t\t\"fddfi2\":12,\\\n\t\t\t\"cookie\":13}\n\t\n\tsbm_dict = {\"fieldname\":0,\\\n\t\t\t \"fielddesc\":1,\\\n\t\t\t \"mo\":2,\\\n\t\t\t \"order\":3,\\\n\t\t\t \"placeholder\":4}\n\t\n\thgf_field = sbmfield[sbm_dict[\"fieldname\"]]\n\tif hgf_field.startswith(\"hgf\"): \n\t\telement = config[\"fielddesc\"][hgf_field] # we have to read the fielddescriptor from confg file, because all fielddescriptors in database will be redefined to \"user defined fields\" at the end of this function\n\telse: \n\t\tif hgf_field in config[\"default_form\"]: element = get_field_from_sbmfielddesc(hgf_field)[1:]\n\t\telse: return \"\",\"O\" #non hgf-fields (defined collections,...)\n\tplaceholder = \"\" #initialise\n\tfieldlabel = \"\" #initialise\n\tif len(sbmfield) == sbm_dict[\"placeholder\"] +1: placeholder = sbmfield[sbm_dict[\"placeholder\"]] #get placeholder\n\t\n\tif hgf_field == \"hgf_start\": \t\t\n \t# define a fieldset which can then be used for internal element\n \t# placement relative to that div so we end up with a table-less\n \t# form doing arrangement entirely in CSS\n\t\tif read_javascript_includes():\n\t\t\tfieldlabel = read_javascript_includes()\n\t\tfieldlabel += '<fieldset id=\"submissionfields\"><legend id=\"submissionlegend\">%s</legend><div id=\"loadingMsg\"><img src=\"/img/search.png\" alt=\"Loading...\" />Loading data. Please stand by... </div>' %sbmfield[sbm_dict[\"fielddesc\"]]\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\n\tif hgf_field == \"hgf_end\":\n \t\t# close the main fieldset\n\t\tfieldlabel = '</fieldset>'\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\n\tif hgf_field == \"hgf_comment\": #technical field\n\t\tif sbmfield[1] == \"hidden\": pass# 'hidden' is generated by create_mask function\n\t\telse:\t \n\t\t\tfieldlabel = \"<span class=\\\"Comment\\\" id=\\\"hgf_comment\\\">%s</span>\" % sbmfield[sbm_dict[\"fielddesc\"]] \n\t\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\t\n\tif hgf_field == \"hgf_preview\": #mathjax title preview\n\t\tfieldlabel = \"\"\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\n\tif element[el_dict[\"marccode\"]] == \"\": #no marccode\n\t\tunique_id = sbmfield[sbm_dict[\"fieldname\"]] # i.e. hgf_import is Input-field, but not MARC\n\t\tid1 = \"\"\n\t\tid2 = \"\"\n\telse : \n\t\tid1 = element[el_dict[\"marccode\"]][0:3]\n\t\tid2 = element[el_dict[\"marccode\"]]\n\t\tunique_id = hgf_field.replace(\"hgf_\",\"\")\n\tsize,rows,cols = element[3:6]\n\tvalue = element[el_dict[\"val\"]]\n\tif value == \"NULL\": value = \"\"\n\tfieldtext = sbmfield[sbm_dict[\"fielddesc\"]]\n\tfieldtype = \"D\" #change fieldtype to user defined input. IMPORTANT: whole information about the field (spans, fieldname, input-field, textarea) are stored in the fieldlabel in the sbmFIELD herefore fidesc in sbmFIELDDESC has to be \"\" and eltype \"D\")\n\t\n\t\n\tif inst != \"default\":\n\t\tsuffix = \"#\" + inst.upper() + \"_font\" # suffix for twiki page at GSI\t\n\telse: suffix = \"\"\n\t#Insert Helptext#\n\twiki_base = \"\"\n\tif (\"CFG_HGF_WIKI_BASE_URL\" in globals()):\n \t# Twiki needs all page titles to start with a capital letter.\n \t# Therefore, capitalize() the uniq_id when constructing the URL.\n \t\twiki_base = CFG_HGF_WIKI_BASE_URL \n else:\n\t\twiki_base = \"http://invenio-wiki.gsi.de/cgi-bin/view/Main/\"\n\thelp_text = '<span class=\"Helptext\" id=\"%(unique_id)s%(suffix)s\"><a href=\"%(wiki_base)s%(unique_id)s%(suffix)s\" alt=\"Help\" target=\"_blank\"><img src=\"/img/hgfinfo.png\"></a></span>' %{'unique_id':unique_id.capitalize(),\"suffix\":suffix,\"wiki_base\":wiki_base}\n\n\tmog = \"\" #this variable is set for group dependent mandatory fields \n\tif element[el_dict[\"type\"]].upper() == \"I\": #Input text box\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]]) #get groupclass in case of fieldlevel=m1,m2,m3...... if no groupclass, then return \"\"\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].lower().startswith(\"m\"):#fieldlevel\n\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\"></input></span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse: \t\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"G G%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I %(groupclass)s\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\t\telse:\n\t\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I %(groupclass)s\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"T\":\t# Textarea\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]])\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].lower().startswith(\"m\"):#fieldlevel\n\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\" >%(fieldtext)s</label> %(help_text)s <textarea name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\" cols=\"%(cols)s\" rows=\"%(rows)s\"></textarea> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'rows':rows,'cols':cols,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse:\n\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G G%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <textarea name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I %(groupclass)s\" cols=\"%(cols)s\" rows=\"%(rows)s\"></textarea> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'rows':rows,'cols':cols,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"H\": #hidden field\n\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]:\n\t\t\tfieldlabel = '<span class=\"G\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\"></label> <input type=\"hidden\" name=\"%(hgf_name)s\" id=\"I%(unique_id)s\" value=\"%(value)s\" class=\"I\"></input> </span>' % {'unique_id':unique_id,'value':value,'hgf_name':hgf_field}\n\t\telse:\n\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L%(id2)s L%(id1)s L\"></label> <input type=\"hidden\" name=\"%(hgf_name)s\" id=\"I%(unique_id)s\" value=\"%(value)s\" class=\"I%(id2)s I%(id1)s I\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'value':value,'hgf_name':hgf_field}\n\telif element[el_dict[\"type\"]].upper() == \"F\": #File field\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]])\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].startswith(\"m\"):#fieldlevel\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"MG MG%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI %(groupclass)s\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\t\telse:\t\n\t\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\"></input></span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse: \t\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"G G%(unique_id)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'placeholder':placeholder}\n\t\t\telse:\n\t\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" type=\"file\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"C\": #check box\n\t\tfieldlabel = make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,\"checkbox\",inst)\n\telif element[el_dict[\"type\"]].upper() == \"R\": #Radio button Warninig invenio default for \"R\" would be Response Element\n\t\tfieldlabel = make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,\"radio\",inst)\n\telse: \treturn \"\",\"O\" #other hgf-field with marccode (if exists)\n\t\n\teltype = get_eltype_from_sbmfielddesc(hgf_field)\n\tfidesc = \"\"\n\tmodification_text = fieldlabel #modification text\n\tif eltype != fieldtype: update_eltype_in_sbmfielddesc(hgf_field,fieldtype,modification_text,fidesc) #redefine fielddescriptor in database\n\t\n\tif len(sbmfield[sbm_dict[\"mo\"]])>1: fieldlevel = sbmfield[sbm_dict[\"mo\"]][0].upper() #prevent submitting irregular values into DB for fieldlevel, only M,O possible \n\telse: fieldlevel = sbmfield[sbm_dict[\"mo\"]].upper() \n\treturn fieldlabel,fieldlevel",
"def test_document_form_has_right_fields(self):\n form = DocumentForm()\n self.assertEqual(len(form.fields.keys()), 2)\n self.assertIn('file', form.fields.keys())\n self.assertIn('doctype', form.fields.keys())",
"def make_fields(self):\n #Let's first get fields in material_information printer_information\n metadata = GUI.api.get_metadata()\n field_correct_form = filter(lambda field: field['form_name']=='material_information' or field['form_name'] == 'printer_information', metadata)\n rows_w_fields = []\n for field in field_correct_form:\n #make label\n row = []\n key = field['field_name']\n type = field['field_type']\n row.append(sg.Text(text = field['field_label'], key=key+\"_label\"))#keys for labels are key_label (ex. record_id_label)\n if(type == 'radio' or type == \"dropdown\"):\n options = utils.get_options(field)\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"yesno\"):\n options = [\"Yes\", \"No\"]\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"text\"):\n row.append(sg.Input(key=key, disabled=True, metadata=True))\n else:#descirptive\n row[0] = sg.Text(text = field['field_label'], key=key, metadata=True)#we only need text in this case\n rows_w_fields.append(row)\n return rows_w_fields",
"def schema(self):",
"def create_course_featured_registry():\n\n reg = FieldRegistry(\n 'Course Featured', description='Course Featured',\n extra_schema_dict_values={\n 'className': 'inputEx-Group new-form-layout'})\n\n # Course level settings.\n reg.add_property(SchemaField('id', 'Course id', 'string', editable=False))\n reg.add_property(SchemaField('title', 'Title', 'string', editable=False))\n reg.add_property(SchemaField('featured', 'Featured', 'boolean', optional=True))\n return reg",
"def __init__(field, form, content):",
"def __init__(field, form, content):",
"def make_form(self):",
"def get_fields(dgid, metadata=None, computed_columns=None):\n # NOTE: metadata does not contain computed_columns yet\n if metadata is None:\n conn = get_database_connection(dgid)\n metadata = get_metadata(conn)\n\n # Used to evaluate computed columns\n unify_computed_columns(computed_columns)\n columns = list(metadata.keys())\n select_expr_as = [get_field_name(column, metadata) for column in columns]\n databases = [\"datagrid\"]\n\n if computed_columns:\n # Only passed in when calling from endpoint\n update_state(computed_columns, metadata, databases, columns, select_expr_as)\n # Now metadata has computed columns\n\n fields = {}\n for column in metadata:\n datatype = metadata[column][\"type\"]\n field_name = get_field_name(column, metadata)\n qbtype = datatype_to_qbtype(datatype)\n if qbtype is None:\n continue\n\n if datatype in [\"FLOAT\", \"INTEGER\", \"ROW_ID\"]:\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n # name, datatype, min, max, avg, variance, total, stddev, other\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_name][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"DATETIME\":\n field_exp = \"datetime(%s, 'unixepoch')\" % field_name\n fields[field_exp] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_exp][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n # \"dateFormat\": \"DD-MM-YYYY\",\n # \"timeFormat\":\n # \"valueFormat\":\n }\n fields[field_exp][\"valueSources\"] = [\n \"value\",\n \"field\",\n \"func\",\n ] # adds Now, and Relative\n\n elif datatype == \"BOOLEAN\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"fieldSettings\"] = {\n \"labelYes\": \"True\",\n \"labelNo\": \"False\",\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"TEXT\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"JSON\":\n # Asset metadata columns are named\n # 'COLUMN_NAME.metadata' or 'COLUMN_NAME--metadata'\n fields[field_name] = {\n \"label\": column.replace(\".metadata\", \"\").replace(\"--metadata\", \"\"),\n \"field\": field_name,\n \"tooltip\": \"The '%s' column (type 'JSON') from the data grid\"\n % (column,),\n \"type\": \"!struct\",\n \"subfields\": {},\n }\n subfields = ast.literal_eval(metadata[column][\"other\"])\n # Only filterable keys are in subfields\n for key in subfields:\n # Query Builder filter types: \"text\", \"number\", \"boolean\", or \"list-of-text\"\n qbtype = subfields[key][\"type\"]\n if qbtype == \"list-of-text\":\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": \"text\",\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n \"operators\": [\"like\"],\n }\n else:\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": qbtype,\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n }\n if \"values\" in subfields[key]:\n fields[field_name][\"subfields\"][field_exp][\"type\"] = \"select\"\n fields[field_name][\"subfields\"][field_exp][\"fieldSettings\"] = {\n \"listValues\": sorted(subfields[key][\"values\"])\n }\n\n return fields",
"def fields(self):\n ...",
"def data() -> ExtensionArray:\n data_matrix = np.arange(1, 100 * 10 * 10 * 3 + 1).reshape(100, 10, 10, 3)\n grid_points = [\n np.arange(10),\n np.arange(10) / 10,\n ]\n\n return skfda.FDataGrid(data_matrix, grid_points=grid_points)",
"def dataproduct() -> None:\n pass"
]
| [
"0.51919913",
"0.5081313",
"0.5059667",
"0.5010525",
"0.5000373",
"0.49492037",
"0.48477876",
"0.48170084",
"0.4805563",
"0.4780437",
"0.4780437",
"0.47034585",
"0.4697584",
"0.46762303",
"0.46655083",
"0.46433333",
"0.46174502",
"0.46126264",
"0.45991832",
"0.45922363",
"0.45652747",
"0.4554951",
"0.45423704",
"0.45253995",
"0.45253995",
"0.44859555",
"0.44848514",
"0.44782358",
"0.4474335",
"0.44562787"
]
| 0.7025836 | 0 |
Reset a exponent alpha. | def reset_alpha(self, alpha):
self.alpha, old_alpha = alpha, self.alpha
priorities = [(self.tree.get_val(i) + self.__e) ** -
old_alpha for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i) ** -old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i) ** -old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i)**-old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def set_alpha(self, alpha=1.0):\r\n self.unif[17] = alpha",
"def fill_alpha(self) -> Number:\r\n from apysc.type import value_util\r\n self._initialize_fill_alpha_if_not_initialized()\r\n fill_alpha: Number = value_util.get_copy(value=self._fill_alpha)\r\n return fill_alpha",
"def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]",
"def escE(self) :\n #self.logdebug(\"RESET\")\n self.resets += 1",
"def set_decay_rate(self,alpha):\n if not isinstance(alpha,np.ndarray):\n alpha = alpha*np.ones(self.grid.Ncells())\n self.alpha=alpha",
"def unsetExponent(self):\n return _libsbml.Unit_unsetExponent(self)",
"def setExponent(self, *args):\n return _libsbml.Unit_setExponent(self, *args)",
"def setAlpha ( self, newalpha ):\n if isinstance( newalpha, int ):\n raise ValueError('Expects a float value in the [ 0.0 - 1.0 ] range!')\n if newalpha > 1.0:\n newalpha = 1.0\n if newalpha < 0.0:\n newalpha = 0.0\n self.a = newalpha\n self.hsla[3] = newalpha\n self.rgba[3] = newalpha",
"def set_alpha(self, alpha):\n if alpha < 0 or alpha > 255:\n raise ValueError(\"alpha must be betweeen 0 and 255\")\n\n self.alpha = alpha\n self.draw_alpha()",
"def alpha_mask(self, alpha):\n for mask, _ in self.masked_parameters:\n mask.fill_(alpha)",
"def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha",
"def Alpha(self, alpha):\r\n if alpha >= 0 and alpha <= 1:\r\n self._alpha = alpha\r\n else:\r\n raise ValueError(\"Alpha must be between 0 and 1.\")",
"def _initialize_fill_alpha_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_alpha'):\r\n return\r\n self._fill_alpha = Number(1.0)",
"def reset(self):\r\n self._p = self._p_init\r\n self._r = self._r_init\r\n self._v = self._v_init\r\n self._w = self._w_init\r\n self._a = self._a_init\r\n self._alpha = self._alpha_init",
"def set_alpha(self, alpha, bm='Normal'):\n gs = self.add_ext_gstate({'ca': alpha, 'CA': alpha, 'BM': bm})\n self._set_ext_gstate(gs)",
"def reset(self):\n self.epsilon = self.epsilon_start",
"def SetAlpha(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS_SetAlpha(self, *args)",
"def correctalpha(desiredalpha, level):\n \n correctedalpha = 1 - (1 - desiredalpha) ** (1.0 / level)\n \n return correctedalpha",
"def reset(self):\n self.epsilon = self.start",
"def reset ( self ):\n self.hex = ''\n self.r = 0.0\n self.g = 0.0\n self.b = 0.0\n self.h = 0.0\n self.s = 0.0\n self.l = 0.0\n self.a = 1.0\n self.rgb = []\n self.hsl = []\n self.rgba = []\n self.hsla = []\n return self",
"def resetOpacity(self):\n opa = (0,)\n for i in range(1,256):\n opa += (i,)\n if self._displayPjt:\n self._displayPjt.setOpacityPalette(opa)\n if self._displayUsr:\n self._displayUsr.setOpacityPalette(opa)\n if self._displayVtk:\n self._displayVtk.setOpacityPalette(opa)",
"def test_exp_decay(self, alpha: float):\n x = np.linspace(0, 1, 100)\n y = np.exp(alpha * x)\n\n alpha_guess = guess.exp_decay(x, y)\n\n self.assertAlmostEqualAbsolute(alpha_guess, alpha)",
"def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n grid.l.debug('bc.hom: Parameters to dd_xpowalpha: alpha={},cutoff={}'.format(alpha,cutoff))\n if alpha is 0:\n def tmp(x): return float(x[1]<=0)\n return cls._tpl(grid, tmp) \n\n if cutoff:\n def tmp(x):\n return sum(pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n else:\n def tmp(x):\n return sum(pow(float(x[i]>=0)*x[i],alpha)-pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n return cls._tpl(grid, tmp)",
"def addExponent(self):\n\t\t# if the exponent part is not set and this number is allowed an exponent\n\t\tif(self.exponent == None and self.allowExponent):\n\t\t\t# set the exponent to another number (disallowing exponents since we can't\n\t\t\t# have an exponent with an exponent\n\t\t\tself.exponent = Number(allowExponent = False)",
"def reset(self):\n self.c_count = 0\n self.a_count = -1\n self.epsilon = self.init_epsilon",
"def reset(self):\n self.z[:] = 0",
"def power_normalize(xx, alpha = 0.5):\r\n\treturn np.sign(xx) * np.abs(xx) ** alpha"
]
| [
"0.68466955",
"0.68466955",
"0.6816263",
"0.6559019",
"0.6155329",
"0.6124676",
"0.5975661",
"0.59635854",
"0.5957908",
"0.5916377",
"0.581707",
"0.5794807",
"0.5793535",
"0.5769495",
"0.5699174",
"0.56960946",
"0.562096",
"0.5600996",
"0.55926716",
"0.55768263",
"0.55442667",
"0.5538169",
"0.55379",
"0.55188906",
"0.54728884",
"0.5453604",
"0.54527557",
"0.5425192",
"0.5398296",
"0.5395447"
]
| 0.69440967 | 0 |
you can provde a key or a session to sign the URL if none provided will use the global Session.SECRET salt is some salt that can be used in signing if desired. variables_to_sign is a list of variables to be included in the signature. | def __init__(self, session=None, key=None, salt=b"", variables_to_sign=None):
super().__init__() # Yes, I know that this currently doesn't do anything.
self.session = session
self.key = key or Session.SECRET
self.salt = salt
self.variables_to_sign = variables_to_sign or []
assert "_signature" not in self.variables_to_sign | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_vars(self, url, vars):\n vars[\"_signature\"] = self._sign(url, vars)",
"def signWithSecret(self, secret):\r\n self.link(secret)\r\n self.sign()",
"def _sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):\n url = self._uri\n return url, {'params': {'test_param': \"authenticated_test_user\"}}",
"def signing_base(self, request, consumer, token):\r\n sig = (\r\n escape(request.method),\r\n escape(OAuthHook.get_normalized_url(request.url)),\r\n escape(OAuthHook.get_normalized_parameters(request)),\r\n )\r\n\r\n key = '%s&' % escape(consumer.secret)\r\n if token is not None:\r\n key += escape(token.secret)\r\n raw = '&'.join(sig)\r\n return key, raw",
"def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()",
"def make_signer(self, salt=None):\n if salt is None:\n salt = self.salt\n return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)",
"def signSignWithSecondSecret(self, secondSecret):\r\n self.link(None, secondSecret)\r\n self.signSign()",
"def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature",
"def signing_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"signing_secret\")",
"def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))",
"def _sign(self, oauth_payload, request):\n\t\t# merge params\n\t\t# use oauth_payload to update request params might avoid \n\t\t# some oauth params's accidental overriding\n\t\tpayload = dict( request.params )\n\t\tpayload.update( oauth_payload )\n\n\t\t# here I assume that all keys contain only 'a-zA-Z_.-'\n\t\t# thus there is no necessity to percent-encode them\n\t\t# will now sort them according to their original value\n\n\t\tkeylist = sorted( payload.keys() )\n\t\trawlist = []\n\t\tfor k in keylist:\n\t\t\tencoded_value = percent_encode( payload[k] )\n\t\t\trawlist.append( \"%s=%s\" % (k, encoded_value) )\n\n\t\t# craft base string\n\t\tbase_string = request.method.upper()\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode(request.base_url)\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode( '&'.join( rawlist ) )\n\n\t\tself._print( \"Base string:\\n\" + base_string )\n\t\t# craft signing key\n\t\tif self.has_user():\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.a_secret) )\n\t\telse:\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.token_secret) )\n\n\t\t# sign base_string\n\t\thashed = hmac.new(signing_key, base_string, hashlib.sha1)\n\t\tsignature = binascii.b2a_base64(hashed.digest())[:-1]\n\t\t\n\t\t# append signature field\n\t\toauth_payload[\"oauth_signature\"] = signature\n\n\t\t# prepare relevant oauth values\n\t\toauth_entry = []\n\t\tfor k in oauth_payload.keys():\n\t\t\tencoded_value = percent_encode( oauth_payload[k] )\n\t\t\toauth_entry.append( '%s=\"%s\"' % (k, encoded_value) )\n\n\t\toauth_str = 'OAuth ' + ','.join(oauth_entry)\n\t\tself._print( \"OAuth header:\\n\" + oauth_str )\n\t\t# field crafted\n\t\treturn { \"Authorization\" : oauth_str }",
"def _sign_request(secret, method, url, timestamp, content_hash=None):\n message = f'{timestamp}{url}{method}{content_hash}'\n\n return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest()",
"def signed_request(params):\n has_signature = False\n keys = params.keys()\n if \"signature\" in keys:\n has_signature = True\n keys.remove(\"signature\")\n keys.sort()\n if has_signature:\n keys.append(\"signature\")\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") for key in keys)\n return query_string",
"def generate_access_key(self):\n\t\tfrom app import app\n\t\ts = JSONWebSignatureSerializer(app.config['SECRET_KEY'])\n\t\taccess_key = s.dumps({'username': self.username}) \n\t\tself.access_key = access_key",
"def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'):\r\n merchant_id = settings.CC_PROCESSOR['CyberSource'].get('MERCHANT_ID', '')\r\n order_page_version = settings.CC_PROCESSOR['CyberSource'].get('ORDERPAGE_VERSION', '7')\r\n serial_number = settings.CC_PROCESSOR['CyberSource'].get('SERIAL_NUMBER', '')\r\n\r\n params['merchantID'] = merchant_id\r\n params['orderPage_timestamp'] = int(time.time() * 1000)\r\n params['orderPage_version'] = order_page_version\r\n params['orderPage_serialNumber'] = serial_number\r\n fields = u\",\".join(params.keys())\r\n values = u\",\".join([u\"{0}={1}\".format(i, params[i]) for i in params.keys()])\r\n fields_sig = processor_hash(fields)\r\n values += u\",signedFieldsPublicSignature=\" + fields_sig\r\n params[full_sig_key] = processor_hash(values)\r\n params[signed_fields_key] = fields\r\n\r\n return params",
"def gen_smg_url(urlInput,accessKey, secretKey):\n if \"?\" in urlInput:\n url = urlInput + \"&accesskey=\" + accessKey;\n else:\n url = urlInput + \"?accesskey=\" + accessKey;\n\n url = url + \"×tamp=\" + str(int(time.time())) + \"&authversion=1\" + \"&nonce=\" +str(random.randint(0,100000000000));\n\n sig = gen_sig(secretKey, url)\n return url + \"&signature=\" +urllib.parse.quote(sig);",
"def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer",
"def build_jspay_params(paysign_key, appid, prepay_id):\n _params = {\n \"nonceStr\": sign.random_nonce_str(32),\n \"timeStamp\": int(time.time()),\n \"package\": 'prepay_id={}'.format(prepay_id),\n \"signType\": \"MD5\",\n \"appId\": appid\n }\n _params['paySign'] = sign.sign_for_pay(paysign_key, **_params)\n return _params",
"def generate_signature(payload):\n gemini_api_secret = get_secret_key()\n t = datetime.now()\n payload[\"nonce\"] = str(int(mktime(t.timetuple())*1000) + get_nonce())\n encoded_payload = dumps(payload).encode()\n b64 = b64encode(encoded_payload)\n signature = new(gemini_api_secret, b64, sha384).hexdigest()\n update_session(\"X-GEMINI-PAYLOAD\", b64)\n update_session(\"X-GEMINI-SIGNATURE\", signature)\n increment_nonce()",
"def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))",
"def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret",
"def gmap_secret_key():\r\n try:\r\n from django.conf import settings\r\n except ImportError:\r\n return ''\r\n return settings.GMAP_SECRET_KEY",
"def gen_site_secret(self, request, site_id=None, salt='', **kw):\n if site_id is None:\n site_id = self.gen_site_id(request)\n if site_id is None:\n return ''\n \"\"\" Generate site + uid specific secret \"\"\"\n secret_base = site_id + salt\n return sha1(secret_base).hexdigest()",
"def presigned_url():\n return 'https://presignedurl.test.com'",
"def get_fernet_key(app: Sanic, passphrase: str) -> bytes:\n salted = (passphrase + app.secret_key).encode()\n key = hashlib.sha256(salted).digest()[:32]\n return base64.urlsafe_b64encode(key)",
"def getUrlSign(self, var1, var2, var3, var4):\n var1.reset()\n var1.update(self.b)\n return var1.digest(var2, var3, var4)",
"def client_secret(self) -> str:",
"def login(\n access_key_id: str, secret_access_key: str, region: str = AWS_DEFAULT_REGION\n) -> None:\n os.environ[\"AWS_ACCESS_KEY_ID\"] = access_key_id\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = secret_access_key\n os.environ[\"AWS_DEFAULT_REGION\"] = region",
"def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()",
"def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\""
]
| [
"0.62953484",
"0.5759323",
"0.5612654",
"0.5488564",
"0.5365722",
"0.53199714",
"0.527087",
"0.5191166",
"0.5183228",
"0.5169085",
"0.5157569",
"0.51418686",
"0.5059225",
"0.500007",
"0.49755123",
"0.4961267",
"0.49414334",
"0.49344224",
"0.49244606",
"0.49146283",
"0.49065232",
"0.4882469",
"0.48581496",
"0.48476943",
"0.48438534",
"0.48154494",
"0.47875318",
"0.47792345",
"0.47539514",
"0.47503495"
]
| 0.6941905 | 0 |
Gets the signing key, creating it if necessary. | def _get_key(self):
if not self.session:
key = self.key
else:
key = self.session.get("_signature_key")
if key is None:
key = str(uuid.uuid1())
self.session["_signature_key"] = key
return key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_synapse_signing_key(self):\n if not path.exists(self.synapse_signing_key_file):\n key_id = \"a_\" + self.random_string(4)\n key_content = generate_signing_key(key_id)\n with open(self.synapse_signing_key_file, \"w+\") as key_file:\n write_signing_keys(key_file, (key_content,))\n return self.synapse_signing_key_file",
"def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key",
"def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)",
"def public_signing_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"public_signing_key\")",
"def generate_key_and_cert():\n signing_key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=2048)\n subject = issuer = x509.Name(\n [\n x509.NameAttribute(NameOID.COUNTRY_NAME, 'NO'),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.LOCALITY_NAME, 'OSLO'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'Intility AS'),\n x509.NameAttribute(NameOID.COMMON_NAME, 'intility.no'),\n ]\n )\n signing_cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(issuer)\n .public_key(signing_key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(datetime.utcnow())\n .not_valid_after(\n # Our certificate will be valid for 10 days\n datetime.utcnow()\n + timedelta(days=10)\n # Sign our certificate with our private key\n )\n .sign(signing_key, hashes.SHA256(), crypto_default_backend())\n .public_bytes(crypto_serialization.Encoding.DER)\n )\n return signing_key, signing_cert",
"def generate_key(self, **options):\n\n return security_utils_services.generate_rsa_key(**options)",
"def get_public_key(self):\n return self.private_key.get_verifying_key()",
"def _get_key():\n conn = boto.connect_s3()\n bucket = conn.create_bucket(settings.MESSY_BUCKET)\n key = Key(bucket)\n key.key = settings.MESSY_KEY\n return key",
"def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile",
"def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )",
"def create_key ():",
"def _get_encryption_key(self, **options):\n\n return self._public_key",
"def _gen_key(self):\n\n input_data = self._gpg.gen_key_input(key_type=\"RSA\",\n key_length=self.key_length, name_real=self.name,\n name_comment=self.comment, name_email=self.email)\n\n log.info(\"Generating key: (%s)\" % input_data)\n\n self.key = self._gpg.gen_key(input_data)",
"def get_key():\n try:\n return settings.get('backend')['secret_key']\n except AttributeError:\n raise AuthTokenGenerationException()",
"def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")",
"def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))",
"def get_private_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")",
"def generate_key():\n return get_token_generator().generate_token()",
"def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()",
"def get_or_create_key_name(self, gen_key=True):\n keyname = self.extract_key_name()\n if keyname:\n self.report(f'found keyname: {keyname}')\n elif gen_key:\n keyname = self.generate_key()\n self.report(f'generated key: {keyname}')\n else:\n print(f'gpg key for debrepo was not found for user {self.user}. '\n 'please use $0 generate_key, then try this command again')\n self.report('no keyname')\n keyname = None\n return keyname",
"def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key",
"def create_key() -> RSA.RsaKey:\n\n return RSA.generate(1024, Crypto.Random.new().read)",
"def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")",
"def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None",
"def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)",
"def get_key(self):\r\n return self.__encryption_key",
"def build(self, signing_private_key):\n\n is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)\n if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:\n raise TypeError(_pretty_message(\n '''\n signing_private_key must be an instance of\n asn1crypto.keys.PrivateKeyInfo or\n oscrypto.asymmetric.PrivateKey, not %s\n ''',\n _type_name(signing_private_key)\n ))\n\n if self._self_signed is not True and self._issuer is None:\n raise ValueError(_pretty_message(\n '''\n Certificate must be self-signed, or an issuer must be specified\n '''\n ))\n\n if self._self_signed:\n self._issuer = self._subject\n\n if self._serial_number is None:\n time_part = int_to_bytes(int(time.time()))\n random_part = util.rand_bytes(4)\n self._serial_number = int_from_bytes(time_part + random_part)\n\n if self._begin_date is None:\n self._begin_date = datetime.now(timezone.utc)\n\n if self._end_date is None:\n self._end_date = self._begin_date + timedelta(365)\n\n if not self.ca:\n for ca_only_extension in set(['policy_mappings', 'policy_constraints', 'inhibit_any_policy']):\n if ca_only_extension in self._other_extensions:\n raise ValueError(_pretty_message(\n '''\n Extension %s is only valid for CA certificates\n ''',\n ca_only_extension\n ))\n\n signature_algo = signing_private_key.algorithm\n if signature_algo == 'ec':\n signature_algo = 'ecdsa'\n\n signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)\n\n # RFC 3280 4.1.2.5\n def _make_validity_time(dt):\n if dt < datetime(2050, 1, 1, tzinfo=timezone.utc):\n value = x509.Time(name='utc_time', value=dt)\n else:\n value = x509.Time(name='general_time', value=dt)\n\n return value\n\n def _make_extension(name, value):\n return {\n 'extn_id': name,\n 'critical': self._determine_critical(name),\n 'extn_value': value\n }\n\n extensions = []\n for name in sorted(self._special_extensions):\n value = getattr(self, '_%s' % name)\n if name == 'ocsp_no_check':\n value = core.Null() if value else None\n if value is not None:\n extensions.append(_make_extension(name, value))\n\n for name in sorted(self._other_extensions.keys()):\n extensions.append(_make_extension(name, self._other_extensions[name]))\n\n tbs_cert = x509.TbsCertificate({\n 'version': 'v3',\n 'serial_number': self._serial_number,\n 'signature': {\n 'algorithm': signature_algorithm_id\n },\n 'issuer': self._issuer,\n 'validity': {\n 'not_before': _make_validity_time(self._begin_date),\n 'not_after': _make_validity_time(self._end_date),\n },\n 'subject': self._subject,\n 'subject_public_key_info': self._subject_public_key,\n 'extensions': extensions\n })\n\n if signing_private_key.algorithm == 'rsa':\n sign_func = asymmetric.rsa_pkcs1v15_sign\n elif signing_private_key.algorithm == 'dsa':\n sign_func = asymmetric.dsa_sign\n elif signing_private_key.algorithm == 'ec':\n sign_func = asymmetric.ecdsa_sign\n\n if not is_oscrypto:\n signing_private_key = asymmetric.load_private_key(signing_private_key)\n signature = sign_func(signing_private_key, tbs_cert.dump(), self._hash_algo)\n\n return x509.Certificate({\n 'tbs_certificate': tbs_cert,\n 'signature_algorithm': {\n 'algorithm': signature_algorithm_id\n },\n 'signature_value': signature\n })",
"def _generate_s3_key(self, prefix):\r\n conn = S3Connection(\r\n settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"AWS_ACCESS_KEY\"],\r\n settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"AWS_SECRET_KEY\"]\r\n )\r\n bucket = conn.get_bucket(settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"S3_BUCKET\"])\r\n\r\n key = Key(bucket)\r\n key.key = \"{}/{}\".format(prefix, self.receipt_id)\r\n\r\n return key",
"def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature",
"def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname"
]
| [
"0.6825752",
"0.6528289",
"0.65215135",
"0.6436934",
"0.6363638",
"0.631877",
"0.62843007",
"0.6236405",
"0.6234332",
"0.6222451",
"0.61797965",
"0.6166999",
"0.61510545",
"0.6143897",
"0.6137727",
"0.6134392",
"0.60274917",
"0.59892124",
"0.5976278",
"0.59609246",
"0.59529155",
"0.5930827",
"0.5916447",
"0.5912388",
"0.58853996",
"0.58749884",
"0.58730674",
"0.5871472",
"0.5866205",
"0.58557457"
]
| 0.66602325 | 1 |
Signs a URL, adding to vars (the variables of the URL) a signature. | def sign_vars(self, url, vars):
vars["_signature"] = self._sign(url, vars) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_url(path, expiration, account_email, keytext,\n verb='GET', content_type='', content_md5=''):\n private_key = RSA.importKey(keytext)\n if not path.startswith('/'):\n path = '/'+path\n base_url = '%s%s' % (GCS_API_ENDPOINT, path)\n string_to_sign = SIGNATURE_STRING.format(verb=verb,\n content_md5=content_md5,\n content_type=content_type,\n expiration=expiration,\n resource=path)\n print(\"string to sign:\", string_to_sign)\n signature_signed = base64sign(string_to_sign, private_key)\n query_params = {'GoogleAccessId': account_email,\n 'Expires': str(expiration),\n 'Signature': signature_signed}\n return base_url+'?'+urllib.parse.urlencode(query_params)",
"def sign_url(self, url, expiration=None):\n if not expiration:\n expiration = self._s3_presigned_url_expiration\n\n bucket, key = self.split_url(url)\n url = self.client.generate_presigned_url(\n 'get_object',\n ExpiresIn=int(expiration),\n Params={\n 'Bucket': bucket,\n 'Key': key\n }\n )\n\n return url",
"def _sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):\n url = self._uri\n return url, {'params': {'test_param': \"authenticated_test_user\"}}",
"def _sign_request(secret, method, url, timestamp, content_hash=None):\n message = f'{timestamp}{url}{method}{content_hash}'\n\n return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest()",
"def create_signed_url(self, url, keypair_id,\r\n expire_time=None, valid_after_time=None,\r\n ip_address=None, policy_url=None,\r\n private_key_file=None, private_key_string=None):\r\n # Get the required parameters\r\n params = self._create_signing_params(\r\n url=url, keypair_id=keypair_id, expire_time=expire_time,\r\n valid_after_time=valid_after_time, ip_address=ip_address,\r\n policy_url=policy_url, private_key_file=private_key_file,\r\n private_key_string=private_key_string)\r\n\r\n #combine these into a full url\r\n if \"?\" in url:\r\n sep = \"&\"\r\n else:\r\n sep = \"?\"\r\n signed_url_params = []\r\n for key in [\"Expires\", \"Policy\", \"Signature\", \"Key-Pair-Id\"]:\r\n if key in params:\r\n param = \"%s=%s\" % (key, params[key])\r\n signed_url_params.append(param)\r\n signed_url = url + sep + \"&\".join(signed_url_params)\r\n return signed_url",
"def _sign(self, oauth_payload, request):\n\t\t# merge params\n\t\t# use oauth_payload to update request params might avoid \n\t\t# some oauth params's accidental overriding\n\t\tpayload = dict( request.params )\n\t\tpayload.update( oauth_payload )\n\n\t\t# here I assume that all keys contain only 'a-zA-Z_.-'\n\t\t# thus there is no necessity to percent-encode them\n\t\t# will now sort them according to their original value\n\n\t\tkeylist = sorted( payload.keys() )\n\t\trawlist = []\n\t\tfor k in keylist:\n\t\t\tencoded_value = percent_encode( payload[k] )\n\t\t\trawlist.append( \"%s=%s\" % (k, encoded_value) )\n\n\t\t# craft base string\n\t\tbase_string = request.method.upper()\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode(request.base_url)\n\t\tbase_string += '&'\n\t\tbase_string += percent_encode( '&'.join( rawlist ) )\n\n\t\tself._print( \"Base string:\\n\" + base_string )\n\t\t# craft signing key\n\t\tif self.has_user():\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.a_secret) )\n\t\telse:\n\t\t\tsigning_key = \"%s&%s\" % ( percent_encode(self.secret), percent_encode(self.token_secret) )\n\n\t\t# sign base_string\n\t\thashed = hmac.new(signing_key, base_string, hashlib.sha1)\n\t\tsignature = binascii.b2a_base64(hashed.digest())[:-1]\n\t\t\n\t\t# append signature field\n\t\toauth_payload[\"oauth_signature\"] = signature\n\n\t\t# prepare relevant oauth values\n\t\toauth_entry = []\n\t\tfor k in oauth_payload.keys():\n\t\t\tencoded_value = percent_encode( oauth_payload[k] )\n\t\t\toauth_entry.append( '%s=\"%s\"' % (k, encoded_value) )\n\n\t\toauth_str = 'OAuth ' + ','.join(oauth_entry)\n\t\tself._print( \"OAuth header:\\n\" + oauth_str )\n\t\t# field crafted\n\t\treturn { \"Authorization\" : oauth_str }",
"def sign_request(self, host, endpoint, params, headers, method, payload=\"\", time=time):\n\n request_date = time.strftime('%Y%m%dT%H%M%SZ', time.gmtime())\n\n signature = self.calculate_signature(request_date, host, endpoint, params, headers, method, payload, time)\n\n canonical_query = [\n aws_quote(param) + '=' + aws_quote(params[param])\n for param in sorted(params.keys())\n ]\n canonical_query = '&'.join(canonical_query)\n\n return 'http://{host}{endpoint}?{query}&X-Amz-Signature={signature}'.format(\n host=host, endpoint=endpoint, query=canonical_query, signature=aws_quote(signature))",
"def signed_request(params):\n has_signature = False\n keys = params.keys()\n if \"signature\" in keys:\n has_signature = True\n keys.remove(\"signature\")\n keys.sort()\n if has_signature:\n keys.append(\"signature\")\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") for key in keys)\n return query_string",
"def url(self, url):\n return self.presigned_url(url)",
"def getUrlSign(self, var1, var2, var3, var4):\n var1.reset()\n var1.update(self.b)\n return var1.digest(var2, var3, var4)",
"def _sign(self, path, nonce, data):\n url = '/{0}/{1}'.format(self._version, path)\n urlencoded_data = urllib.urlencode(data)\n msg = url + hashlib.sha256(str(nonce) + urlencoded_data).digest()\n signature = hmac.new(base64.b64decode(self._secret), msg,\n hashlib.sha512)\n return base64.b64encode(signature.digest())",
"def gen_smg_url(urlInput,accessKey, secretKey):\n if \"?\" in urlInput:\n url = urlInput + \"&accesskey=\" + accessKey;\n else:\n url = urlInput + \"?accesskey=\" + accessKey;\n\n url = url + \"×tamp=\" + str(int(time.time())) + \"&authversion=1\" + \"&nonce=\" +str(random.randint(0,100000000000));\n\n sig = gen_sig(secretKey, url)\n return url + \"&signature=\" +urllib.parse.quote(sig);",
"def __sign_POST(self, api_url, params, timeout):\r\n sign_str = ''\r\n for key in sorted(params.keys()):\r\n _ = '&' + key + '=' + str(params[key])\r\n sign_str += _\r\n payload_str = 'POST' + '&' + api_url + sign_str\r\n signature = hmac.new(bytes(self.secret, encoding='utf-8'), bytes(payload_str, encoding='utf-8'), digestmod=hashlib.sha256).hexdigest()\r\n params['sign'] = signature\r\n url = self.__base_url + api_url\r\n try:\r\n r = requests.post(url,data=params, timeout=timeout)\r\n r.raise_for_status()\r\n except ReadTimeout:\r\n print(\"post timeout\")\r\n return\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n return\r\n if r.status_code == 200:\r\n return r.json()",
"def _oauth_sign(self, url, body, content_type=u'application/x-www-form-urlencoded', method=u'POST'):\r\n client_key = self.server.config.get('client_key', self.DEFAULT_CLIENT_KEY)\r\n client_secret = self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET)\r\n client = oauthlib.oauth1.Client(\r\n client_key=unicode(client_key),\r\n client_secret=unicode(client_secret)\r\n )\r\n headers = {\r\n # This is needed for body encoding:\r\n 'Content-Type': content_type,\r\n }\r\n\r\n # Calculate and encode body hash. See http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html\r\n sha1 = hashlib.sha1()\r\n sha1.update(body)\r\n oauth_body_hash = unicode(base64.b64encode(sha1.digest())) # pylint: disable=too-many-function-args\r\n params = client.get_oauth_params()\r\n params.append((u'oauth_body_hash', oauth_body_hash))\r\n mock_request = mock.Mock(\r\n uri=unicode(urllib.unquote(url)),\r\n headers=headers,\r\n body=u\"\",\r\n decoded_body=u\"\",\r\n oauth_params=params,\r\n http_method=unicode(method),\r\n )\r\n sig = client.get_oauth_signature(mock_request)\r\n mock_request.oauth_params.append((u'oauth_signature', sig))\r\n new_headers = parameters.prepare_headers(mock_request.oauth_params, headers, realm=None)\r\n return new_headers['Authorization']",
"def get_secure_link(url, key, expire=60, t=None):\n if \"?\" in url:\n url += \"&\"\n else:\n url += \"?\"\n if t is None:\n t = int(time.time())\n\n expire += t\n url += \"e=\" + str(expire)\n s = hmac.new(key.encode(), url.encode(), hashlib.sha256).digest()\n return url + \"&s=\" + base64.b64encode(s, b\"-_\").decode().rstrip(\"=\")",
"def __sign(self, request_type, endpoint, content=None):\n\t\trequest = request_type + \"\\n\" + endpoint + \"\\n\" + content\n\t\tmac = hmac.new(\n\t\t\tself.api_secret.encode('utf-8'),\n\t\t\trequest.encode('utf-8'),\n\t\t\tdigestmod=hashlib.sha256\n\t\t).hexdigest()\n\t\treturn base64.b64encode(mac.encode('utf-8'))",
"def build_signature(method, url, oauth_params, params={}):\n\t# Copy params to prevent modification from original params\n\tall_params = copy.deepcopy(oauth_params)\n\t# Combine OAuth parameters and original parameters\n\tall_params.update(params)\n\t# Sort, stringify, and encode all parameters\n\tkeys = sorted(all_params.keys())\n\tencoded_params = ''\n\tfor key in keys:\n\t\tencoded_params += key+'='+percent_encode(str(all_params[key]))+'&'\n\tencoded_params = encoded_params[:-1]\n\tbase_string = method.upper()+'&'+percent_encode(url)+'&'+percent_encode(encoded_params)\n\t# Request crypt calculation to the server and return caluculated value\n\tcalc_url = 'https://www.ryotosaito.com/shielld/calc_signature.php'\n\toauth_token_secret = users[user_name]['oauth_token_secret'] if user_name in users else ''\n\tparams = {'base_string' : base_string, 'oauth_token_secret' : oauth_token_secret}\n\trequest = requests.post(calc_url, params);\n\treturn request.text",
"def signWithSecret(self, secret):\r\n self.link(secret)\r\n self.sign()",
"def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()",
"def sign(self, payload):\n raise NotImplementedError",
"def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'):\r\n merchant_id = settings.CC_PROCESSOR['CyberSource'].get('MERCHANT_ID', '')\r\n order_page_version = settings.CC_PROCESSOR['CyberSource'].get('ORDERPAGE_VERSION', '7')\r\n serial_number = settings.CC_PROCESSOR['CyberSource'].get('SERIAL_NUMBER', '')\r\n\r\n params['merchantID'] = merchant_id\r\n params['orderPage_timestamp'] = int(time.time() * 1000)\r\n params['orderPage_version'] = order_page_version\r\n params['orderPage_serialNumber'] = serial_number\r\n fields = u\",\".join(params.keys())\r\n values = u\",\".join([u\"{0}={1}\".format(i, params[i]) for i in params.keys()])\r\n fields_sig = processor_hash(fields)\r\n values += u\",signedFieldsPublicSignature=\" + fields_sig\r\n params[full_sig_key] = processor_hash(values)\r\n params[signed_fields_key] = fields\r\n\r\n return params",
"def _create_signing_params(self, url, keypair_id,\r\n expire_time=None, valid_after_time=None,\r\n ip_address=None, policy_url=None,\r\n private_key_file=None, private_key_string=None):\r\n params = {}\r\n # Check if we can use a canned policy\r\n if expire_time and not valid_after_time and not ip_address and not policy_url:\r\n # we manually construct this policy string to ensure formatting\r\n # matches signature\r\n policy = self._canned_policy(url, expire_time)\r\n params[\"Expires\"] = str(expire_time)\r\n else:\r\n # If no policy_url is specified, default to the full url.\r\n if policy_url is None:\r\n policy_url = url\r\n # Can't use canned policy\r\n policy = self._custom_policy(policy_url, expires=None,\r\n valid_after=None,\r\n ip_address=None)\r\n encoded_policy = self._url_base64_encode(policy)\r\n params[\"Policy\"] = encoded_policy\r\n #sign the policy\r\n signature = self._sign_string(policy, private_key_file, private_key_string)\r\n #now base64 encode the signature (URL safe as well)\r\n encoded_signature = self._url_base64_encode(signature)\r\n params[\"Signature\"] = encoded_signature\r\n params[\"Key-Pair-Id\"] = keypair_id\r\n return params",
"def gcs_url(keys, path, verb='GET', expiration_secs=1000, content_type=''):\n expiration = int(time.time() + expiration_secs)\n signed_url = sign_url(path, verb=verb, expiration = expiration,\n content_type=content_type,\n account_email=keys['client_email'],\n keytext=keys['private_key']\n )\n return signed_url",
"def sign(filename: str) -> str:\n fs, relative_path = url_to_fs(filename)\n try:\n return cast(str, fs.sign(relative_path))\n except NotImplementedError:\n return filename",
"def generate_signature(secret, verb, url, nonce, data):\n # Parse the url so we can remove the base and extract just the path.\n parsedURL = urllib.parse.urlparse(url)\n path = parsedURL.path\n if parsedURL.query:\n path = path + '?' + parsedURL.query\n\n # print \"Computing HMAC: %s\" % verb + path + str(nonce) + data\n message = (verb + path + str(nonce) + data).encode('utf-8')\n\n signature = hmac.new(secret.encode('utf-8'), message,\n digestmod=hashlib.sha256).hexdigest()\n return signature",
"def sign(self, body, external_aad, private_key):",
"def _generate_signed_headers(url: str,\n *,\n username: str = None,\n api_key: bytes = None,\n headers: dict = None\n ) -> dict:\n\n if url is None:\n raise ValueError(\"url is required.\")\n if headers is None:\n headers = dict(SYNAPSE_DEFAULT_HTTP_HEADERS)\n\n headers = _enforce_user_agent(headers)\n\n if username is None or api_key is None:\n return headers\n\n sig_timestamp = time.strftime(ISO_FORMAT, time.gmtime())\n url = urllib_parse.urlparse(url).path\n sig_data = username + url + sig_timestamp\n signature = base64.b64encode(hmac.new(api_key,\n sig_data.encode('utf-8'),\n hashlib.sha1).digest())\n headers.update({SYNAPSE_USER_ID_HEADER: username,\n SYNAPSE_SIGNATURE_TIMESTAMP_HEADER: sig_timestamp,\n SYNAPSE_SIGNATURE_HEADER: signature})\n return headers",
"def signing_base(self, request, consumer, token):\r\n sig = (\r\n escape(request.method),\r\n escape(OAuthHook.get_normalized_url(request.url)),\r\n escape(OAuthHook.get_normalized_parameters(request)),\r\n )\r\n\r\n key = '%s&' % escape(consumer.secret)\r\n if token is not None:\r\n key += escape(token.secret)\r\n raw = '&'.join(sig)\r\n return key, raw",
"def signrequest(self, signrequest):\n\n self._signrequest = signrequest",
"def generate_signature(cls, secret, verb, url, nonce, data):\n # Parse the url so we can remove the base and extract just the path.\n parsedURL = urlparse(url)\n path = parsedURL.path\n if parsedURL.query:\n path = path + '?' + parsedURL.query\n\n # print \"Computing HMAC: %s\" % verb + path + str(nonce) + data\n message = verb + path + str(nonce) + data\n\n signature = hmac.new(bytes(secret, 'utf8'), bytes(message, 'utf8'), digestmod=hashlib.sha256).hexdigest()\n return signature"
]
| [
"0.6500728",
"0.64656204",
"0.635007",
"0.63115203",
"0.5918391",
"0.59115523",
"0.58970284",
"0.58378965",
"0.57895446",
"0.57237625",
"0.56381124",
"0.56150156",
"0.56130207",
"0.55921096",
"0.55717814",
"0.5525691",
"0.54901725",
"0.5430866",
"0.54270196",
"0.5400615",
"0.53806776",
"0.53787166",
"0.53633213",
"0.5355734",
"0.53245836",
"0.5320151",
"0.5308291",
"0.5303122",
"0.52981305",
"0.52860457"
]
| 0.8386482 | 0 |
Mark given class as the entity for User. | def register_user(self, cls):
return self.register_entity('user', cls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_user(self, user):\n self._user = user",
"def set_user(self, user):\r\n self.user = user",
"def user_cls(self):\n return self.get_entity_cls('user')",
"def user(self, user):\n self.user_id = user.get_id()",
"def set_user(self, user: User):\n self.__user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def set_as_type_user(self):\n self.type = MessageTypes.USER",
"def set(self, **kwargs: Any) -> None: # nosec\n attributes = {}\n user_id: int = int(kwargs[\"user_id\"])\n user = self.first(id_int=user_id)\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = _serialize(user, to_bytes=True)\n\n self.update_one(query={\"id_int\": user_id}, values=attributes)",
"def save_user(self):\n db.session.add(self)\n db.session.commit()",
"def save_user(self):\n\n User.user_list.append(self)",
"def save_user(self):\n User.user_list.append(self)",
"def save_user(self):\n User.user_list.append(self)",
"def save_user(self):\n\n User.user_list.append(self)",
"def set_user(self, user_model):\n\n self.user_model = user_model\n return self",
"def setUser(self, value):\n return self._set(user=value)",
"def entity(self, entity):\n\n self._entity = entity",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def set_user_attribute(self, key, val):\n self._user_attributes[key] = val",
"def setCurrentUser(self, provider):\n pass",
"def id_user(self, id_user):\n\n self._id_user = id_user",
"def set(isamAppliance, name, user_name, type='embedded_ldap', check_mode=False, force=False):\n new_user = True\n ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name)\n\n if (ret_obj['data']['users'] == None):\n ret_obj['data']['users'] = []\n else:\n for usr in ret_obj['data']['users']:\n if usr['name'] == user_name:\n if usr['type'] == type:\n if force is False:\n return isamAppliance.create_return_object()\n new_user = False\n else: # Replace user with new type\n ret_obj['data']['users'].remove(usr)\n break\n\n if new_user is True:\n ret_obj['data']['users'].append({'name': user_name, 'type': type})\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put(\n \"Add user to management authorization role\",\n \"/authorization/roles/{0}/v1\".format(name), ret_obj['data'])"
]
| [
"0.6509538",
"0.6496744",
"0.6126291",
"0.6119982",
"0.61141914",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.60054684",
"0.5991271",
"0.59332556",
"0.5928648",
"0.5897934",
"0.58634055",
"0.58634055",
"0.5802914",
"0.57995355",
"0.57876736",
"0.57670957",
"0.56959563",
"0.56959563",
"0.56914127",
"0.560517",
"0.5603847",
"0.5586908"
]
| 0.74358714 | 0 |
Mark given class as the entity for Permission. | def register_permission(self, cls):
return self.register_entity('permission', cls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def permission_cls(self):\n return self.get_entity_cls('permission')",
"def permission(self, permission):\n\n self._permission = permission",
"def permission(self, permission):\n\n self._permission = permission",
"def set_permission(\n permission: Permission,\n user_or_group: UserOrGroup,\n obj: models.Model,\n):\n # First perform basic checks on the object itself.\n if not model_has_permissions(obj):\n raise RuntimeError(\n f\"There is no support for permissions on object of type {obj._meta.label}.\"\n )\n obj.set_permission(permission, user_or_group)",
"def set_permission(sender, instance, created, **kwargs):\n if created:\n assign_perm(\n \"website.can_see\",\n instance.author,\n instance,\n )",
"def update_object_permissions(self, agent, Field, Set, Mask, Override = False):\n\n self.send_ObjectPermissions(agent, agent.agent_id, agent.session_id, Field, Set, Mask, Override)",
"def apply_perm(permission_name: Optional[str], entity: UserOrGroup):\n try:\n permission = Permission.from_name(permission_name or \"none\")\n except KeyError:\n raise exceptions.ParseError(f\"Unknown permission: {permission_name}\")\n\n obj.set_permission(permission, entity)",
"def assign_perm(self, permission, user, obj, ctype=None):\n if getattr(obj, 'pk', None) is None:\n raise ObjectNotPersisted(\"Object %s needs to be persisted first\" % obj)\n\n if not ctype:\n ctype = ContentType.objects.get_for_model(obj)\n\n if not isinstance(permission, Permission):\n permission = Permission.objects.get(content_type=ctype, codename=permission)\n\n obj_perm, created = self.get_or_create(\n content_type=ctype,\n permission=permission,\n object_pk=obj.pk,\n user=user)\n return obj_perm",
"def manage_setPermissionMapping(self,\n permission_names=[],\n class_permissions=[],\n REQUEST=None):\n wrapper = getattr(self, '_permissionMapper', None)\n if wrapper is None:\n wrapper = PM()\n\n perms = self.possible_permissions()\n for i in range(len(permission_names)):\n name = permission_names[i]\n p = class_permissions[i]\n if p and (p not in perms):\n __traceback_info__ = perms, p, i\n raise ValueError(\n \"\"\"Attempted to map a permission to a permission, %s,\n that is not valid. This should never happen. (Waaa).\n \"\"\" % escape(p))\n setPermissionMapping(name, wrapper, p)\n\n self._permissionMapper = wrapper\n\n if REQUEST is not None:\n return self.manage_access(\n REQUEST,\n manage_tabs_message='The permission mapping has been updated')",
"def entity(self, entity):\n\n self._entity = entity",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def set_permission(StackId=None, IamUserArn=None, AllowSsh=None, AllowSudo=None, Level=None):\n pass",
"def assign_permissions(sender, instance, created, **kwargs):\n if created:\n assign_perm('view_strand', instance.owner.group, instance)\n assign_perm('change_strand', instance.saver, instance)\n assign_perm('delete_strand', instance.saver, instance)\n assign_perm('view_strand', instance.saver, instance)",
"def add_permission(self, permission: str):\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])",
"def google_account_permission(self, google_account_permission):\n\n self._google_account_permission = google_account_permission",
"def has_change_permission(self, request, obj=None):\n has_class_permission = super(EntryAdmin, self).has_change_permission(request, obj)\n if not has_class_permission:\n return False\n if obj is not None and not request.user.is_superuser and request.user.id != obj.author.id:\n return False\n return True",
"def changePermissions(self, event):\n pass",
"def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)",
"def update(self, sid, permission, **kwargs):\n kwargs['permission'] = permission\n return self.update_instance(sid, kwargs)",
"def grant_permission(self, role, permission):\n return permissions.utils.grant_permission(self, role, permission)",
"def register_user(self, cls):\n return self.register_entity('user', cls)",
"def update(self, permission, **kwargs):\n kwargs['permission'] = permission\n return self.update_instance(**kwargs)",
"def test_change_permission_with_any(self):\n permission_logic = self.permission_logic_class(\n any_permission=True\n )\n add_permission_logic(Article, permission_logic)\n self._auto_test_permission('change')\n self._auto_test_permission('change', obj=True)\n remove_permission_logic(Article, permission_logic)",
"def set_role(self, group, role):\n self.permissions[group] = roles[role]",
"def permits(identity, obj, permission):\n return False",
"def on_permission_after_insert(\n self, mapper: Mapper, connection: Connection, target: Permission\n ) -> None:",
"def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class",
"def setpermission(self, context=None, componentid=None, app=None, permissions={}):\n return jsoncall.do_call(\"setpermission\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context': context,\\\n 'componentid': componentid,\\\n 'app': app,\\\n 'permissions': permissions},\n self.connection)",
"def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")"
]
| [
"0.6191563",
"0.5956788",
"0.5956788",
"0.59558314",
"0.58906794",
"0.5868287",
"0.5775181",
"0.5672249",
"0.5669219",
"0.5610798",
"0.55715984",
"0.55715984",
"0.54696345",
"0.53542596",
"0.52936137",
"0.528369",
"0.5257457",
"0.5256102",
"0.524391",
"0.51908153",
"0.5171996",
"0.5161644",
"0.5161633",
"0.51515794",
"0.5124615",
"0.51226735",
"0.5116945",
"0.51034147",
"0.50935733",
"0.5092053"
]
| 0.7145781 | 0 |
Mark given class as the entity for Bundle. | def register_bundle(self, cls):
return self.register_entity('bundle', cls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bundle(class_: Type[T]) -> Type[T]:\n namespace = OrderedDict()\n for attr in dir(class_):\n if not attr.startswith(\"_\") and attr != \"metadata\":\n attr_object = getattr(class_, attr)\n namespace[attr] = attr_object\n return BundleMeta(class_.__name__, (), namespace) # noqa",
"def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class",
"def entity(self, entity):\n\n self._entity = entity",
"def bundle_cls(self):\n return self.get_entity_cls('bundle')",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def set_entity_class_registry(self, entity_class_registry):\n self.entity_class_registry = entity_class_registry",
"def register(cls, class_):\n cls._registered[class_.tag()] = class_",
"def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)",
"def extend_entity(cls):\n # Generate the corresponding tag entity\n tag_entity = f\"{cls.__name__}Tag\"\n plural = f\"{cls.__name__.lower()}s\"\n fields = {\n \"_table\": tag_entity,\n plural: Set(cls.__name__),\n \"objects\": property(lambda t: getattr(t, plural, [])),\n }\n #fields['_indexes_'] = [Index(fields['first_name'],fields['last_name'],is_pk=False,is_unique=False)]\n entity = type(tag_entity, (Tag, ), fields)\n cls.db_tags = Set(tag_entity)\n cls.tags = lazy_property(_get_tag_handler)",
"def bundle_id(self, bundle_id):\n\n self._bundle_id = bundle_id",
"def qb_class(self, qb_class):\n\n self._qb_class = qb_class",
"def bundle_identifier(self, bundle_identifier):\n\n self._bundle_identifier = bundle_identifier",
"def entity_type(self, entity_type):\n self._entity_type = entity_type",
"def hydrate(self, bundle):\n bundle.data['user'] = \"/api/v1/user/%d/\" % bundle.request.user.id\n return bundle",
"def SetTarget(self, entity):\n\t\tself.target = entity",
"def entity_type(self, entity_type):\n\n self._entity_type = entity_type",
"def class_id(self, class_id):\n\n self._class_id = class_id",
"def add_etl_class(self, etl_class):\n\n if etl_class.identifier in self.etl_classes:\n raise DuplicateConfigException(\n \"Builder already has etl_class with identifier : {}\".format(\n etl_class.identifier\n )\n )\n\n self.etl_classes[etl_class.identifier] = etl_class",
"def __init__(__self__, *,\n bundle_id: str):\n pulumi.set(__self__, \"bundle_id\", bundle_id)",
"def hydrate(self, bundle):\n \n #Update the fabric\n if \"fabric\" in bundle.data and bundle.request.user.has_perm('acknowledgements.change_fabric'):\n try:\n fabric = Fabric.objects.get(pk=bundle.data[\"fabric\"][\"id\"])\n bundle.obj.fabric = fabric\n logger.info(\"{0} changed fabric to {1}\".format(bundle.obj.description,\n fabric.description))\n except KeyError:\n raise ValueError(\"Missing fabric ID.\")\n except Fabric.DoesNotExist:\n raise\n \n #Update the unit price\n if \"unit_price\" in bundle.data:\n if bundle.data[\"unit_price\"] != bundle.obj.unit_price:\n if bundle.request.user.has_perm('acknowledgements.change_item_price'):\n bundle.obj.unit_price = bundle.data['unit_price']\n bundle.obj.total = bundle.obj.unit_price * bundle.obj.quantity\n else:\n bundle.data['unit_price'] = bundle.obj.unit_price\n \n return bundle",
"def set_entity(cls, entity):\n # Preparing auto increment\n entity_count = cls.get_entity_count()\n new_key = \"entity:\" + str(entity_count + 1)\n\n # Set key to Entity\n entity.entity_key = new_key\n\n # Execute HMSET for assigning hash structure\n result = cls.db.hmset(new_key, entity.extract())\n\n # If success, increase key\n if result:\n cls.set_entity_count(entity_count + 1)\n return result",
"def register(self, cls):\r\n\r\n # Do all checks and complain before changing any state.\r\n if len(cls.tags) == 0:\r\n raise ValueError(\"No tags specified for class {0}\".format(cls.__name__))\r\n\r\n for t in cls.tags:\r\n if t in self._mapping:\r\n other_cls = self._mapping[t]\r\n if cls == other_cls:\r\n # registering the same class multiple times seems silly, but ok\r\n continue\r\n raise ValueError(\"Tag {0} already registered by class {1}.\"\r\n \" Can't register for class {2}\"\r\n .format(t, other_cls.__name__, cls.__name__))\r\n\r\n # Ok, should be good to change state now.\r\n for t in cls.tags:\r\n self._mapping[t] = cls\r\n\r\n # Returning the cls means we can use this as a decorator.\r\n return cls",
"def back_entities_embedding(self, entity):\n self.ent_embs.ent_embs.weight.data[entity] = self.source_entity",
"def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1",
"def register(self, system_class):\n temp = system_class(self.evt, self.list, self)\n self.systems.append(temp)",
"def register_user(self, cls):\n return self.register_entity('user', cls)",
"def create_ent(self, classname: str, **kargs: ValidKVs) -> 'Entity':\n kargs['classname'] = classname\n ent = Entity(self, keys=kargs)\n self.add_ent(ent)\n return ent",
"def obj_update(self, bundle, request=None, **kwargs):\n request = request or bundle.request\n bundle = self.check_read_create(bundle)\n\n try:\n # use grandparent rather than parent\n bundle = super(MTResource, self).obj_update(\n bundle, **kwargs)\n\n # update the cc_version\n bundle.obj.cc_version = self.model.objects.get(\n id=bundle.obj.id).cc_version\n\n # specify the user\n bundle.obj.save(user=request.user)\n\n except Exception: # pragma: no cover\n logger.exception(\"error updating %s\", bundle) # pragma: no cover\n raise # pragma: no cover\n\n return bundle",
"async def set_async(self, type_name, entity):\n return await self.call_async(\"Set\", type_name=type_name, entity=entity)"
]
| [
"0.6200419",
"0.6171212",
"0.60723436",
"0.59452605",
"0.58904743",
"0.58904743",
"0.5647311",
"0.5567563",
"0.5504061",
"0.54242414",
"0.53449535",
"0.5311498",
"0.5121058",
"0.5120031",
"0.51176834",
"0.510102",
"0.50701016",
"0.50628865",
"0.5060153",
"0.5053349",
"0.5012933",
"0.49671283",
"0.49617547",
"0.49340248",
"0.49187714",
"0.48870435",
"0.48666722",
"0.4857983",
"0.48462307",
"0.48395202"
]
| 0.74612033 | 0 |
Mark given class as the entity for Group. | def register_group(self, cls):
return self.register_entity('group', cls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_group(self, address, group):\n self.groups[address] = group",
"def group(self, group):\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def set_group(self, group):\n self._group = group",
"def decorator(zha_ent: _ZhaGroupEntityT) -> _ZhaGroupEntityT:\n self._group_registry[component] = zha_ent\n return zha_ent",
"def group_cls(self):\n return self.get_entity_cls('group')",
"def save_object(self, data):\n return GroupAttribute(**data)",
"def group(self, val):\n self.set_property(\"Group\", val)",
"def set_group(self, group: str) -> None:\n self.group = group",
"def add_to_group(self, org, contact, group):\n pass",
"def group(cls):\n return relationship.many_to_one(cls, 'group')",
"def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)",
"def set_group(self, id_: str, player: str, group: list):\n self._groups[id_] = {\n 'player': player,\n 'group': group\n }",
"def grp(self, grpNode):\n\t\tself._grp = grpNode",
"def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self",
"def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group",
"def setUpClass(cls):\n super().setUpClass()\n cls.group = Group.objects.create(\n title=TEST_GROUP_NAME,\n slug=TEST_GROUP_SLUG\n )",
"def set_definition(self, definition):\n return self.client._perform_json(\n \"PUT\", \"/admin/groups/%s\" % self.name,\n body = definition)",
"def persist_test_group(self, obj: object, group: str) -> str:",
"def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)",
"def group(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.group(m.collection, *args, **kwargs)",
"def setGroup(self, group):\n\t\tself.config.GROUP = group",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def new_class(self, grp, name, class_type=\"NXcollection\"):\n sub = grp.require_group(name)\n sub.attrs[\"NX_class\"] = numpy.string_(class_type)\n return sub",
"def group(self, group):\n self.proxy_group = group\n return self",
"def group(self):\n raise NotImplementedError()",
"def group_id(self, group_id):\n\n self._group_id = group_id"
]
| [
"0.66240406",
"0.64159083",
"0.6386166",
"0.6386166",
"0.6386166",
"0.6299137",
"0.62925017",
"0.62784845",
"0.60574126",
"0.604557",
"0.5984747",
"0.59824955",
"0.59489924",
"0.587001",
"0.5849864",
"0.5833509",
"0.5803911",
"0.57778096",
"0.5695118",
"0.5686916",
"0.5671374",
"0.5643026",
"0.56214374",
"0.56147206",
"0.5600889",
"0.5600889",
"0.55406874",
"0.55400866",
"0.55389106",
"0.5536032"
]
| 0.7534639 | 0 |
Mark given class as the entity for Attempt. | def register_attempt(self, cls):
return self.register_entity('attempt', cls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attempt(self, attempt):\n\n self._attempt = attempt",
"def attempt_cls(self):\n return self.get_entity_cls('attempt')",
"def _class(self, _class):\n\n self.__class = _class",
"def _class(self, _class):\n\n self.__class = _class",
"def entity(self, entity):\n\n self._entity = entity",
"def attempt_id(self, attempt_id):\n\n self._attempt_id = attempt_id",
"def attempt_id(self, attempt_id):\n\n self._attempt_id = attempt_id",
"async def set_async(self, type_name, entity):\n return await self.call_async(\"Set\", type_name=type_name, entity=entity)",
"def class_id(self, class_id):\n\n self._class_id = class_id",
"def register_user(self, cls):\n return self.register_entity('user', cls)",
"def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class",
"def changeClass(self, newClass):\n\t\turl = \"https://habitica.com/api/v3/user/change-class?class=\" + newClass\n\t\treturn(postUrl(url, self.credentials))",
"def assign_mark(entry: StudentEntry):\n pass",
"def mark_retry(self, eta=None, delay=None, trace=None):\n if delay is not None:\n eta = timezone.now() + delay\n self.eta = eta\n self.status = self.RETRY\n self.traceback = trace\n self.save(update_fields={'eta', 'status', 'traceback', 'retries', 'updated_at'})",
"def _handle_attempt(self):\n pass",
"def set_entity(cls, entity):\n # Preparing auto increment\n entity_count = cls.get_entity_count()\n new_key = \"entity:\" + str(entity_count + 1)\n\n # Set key to Entity\n entity.entity_key = new_key\n\n # Execute HMSET for assigning hash structure\n result = cls.db.hmset(new_key, entity.extract())\n\n # If success, increase key\n if result:\n cls.set_entity_count(entity_count + 1)\n return result",
"def test_sucesso_set_status_in_class_rs_set_status_in_class(self):\n\n data = SetStatusInClassRQ(\n lms_student_id=15,\n lms_class_id=10,\n status=False\n )\n\n res = self.api.set_status_in_class(data)\n\n if isinstance(res, ExceptionRS):\n raise unittest.SkipTest(res.msg)\n\n self.assertEqual(res.has_error, False)",
"def setIdentity(self) -> None:\n ...",
"def mark(self, mark):\n\n self._mark = mark",
"def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})",
"def claim(self, job, owner):\n raise NotImplementedError()",
"def class_ref(self, class_ref):\n\n self._class_ref = class_ref",
"def post_save_access_attempt(self, instance, **kwargs):",
"def test_resposta_error_parametro_class_id_set_status_in_class(self):\n\n data = SetStatusInClassRQ(\n lms_student_id=15,\n lms_class_id=None,\n status=True\n )\n\n res = self.api.set_status_in_class(data)\n\n if isinstance(res, ConnectionExceptionRS):\n raise unittest.SkipTest(res.msg)\n\n self.assertIsInstance(res, ErrorRS)\n\n self.assertEqual(\n res.has_error,\n True\n )\n\n self.assertEqual(\n res.msg,\n (\n u\"Deve ser informado a chave do registro \"\n u\"no sistema legado ou do LMS.\"\n )\n )",
"def set_key_class(self, key_class):\r\n self.key_class = key_class",
"def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)",
"def OnAttempt(self, event):\n pass",
"def __setstate__(self, state):\n # Restore instance attributes\n try: \n obj = Thing.ID_dict[state['id']] # is this obj already in dict?\n dbg.debug(\"Note: %s already in Thing.ID_dict, maps to %s\" % (state['id'], obj))\n except KeyError: # Not already in dict\n Thing.ID_dict[state['id']] = self\n if 'has_beat' in state:\n Thing.game.register_heartbeat(self)\n self.__dict__.update(state)",
"def SetTarget(self, entity):\n\t\tself.target = entity",
"def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1"
]
| [
"0.6293439",
"0.6087253",
"0.5680625",
"0.5680625",
"0.5511365",
"0.53011936",
"0.53011936",
"0.52569115",
"0.5208276",
"0.51021844",
"0.50543135",
"0.49860868",
"0.49604225",
"0.49561048",
"0.49194008",
"0.48939493",
"0.4889586",
"0.4883102",
"0.48630318",
"0.4858264",
"0.47985506",
"0.47617832",
"0.47578263",
"0.47507674",
"0.47399932",
"0.47014022",
"0.46787277",
"0.46746016",
"0.46715719",
"0.46709603"
]
| 0.7433578 | 0 |
Return the entity registered for Bundle. | def bundle_cls(self):
return self.get_entity_cls('bundle') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetEntity(self):\n return self.__entity",
"def entity(self):\n return self._entity",
"def entity(self):\n return self._entity",
"def GetEntity(self):\n\t\treturn self.acad.ActiveDocument.Utility.GetEntity()",
"def register_bundle(self, cls):\n return self.register_entity('bundle', cls)",
"def getHandle(self):\n return entity",
"def getHandle(self):\n return entity",
"async def get_entity(self):\n if not self.entity and await self.get_input_entity():\n try:\n self._entity =\\\n await self._client.get_entity(self._input_entity)\n except ValueError:\n pass\n\n return self._entity",
"def bundle(self):\n return self._bundle",
"def get(self, entity):\n\t\treturn entity.get_component(self.component_type)",
"def current_entity(self):\n return self.entities[len(self.device_data[CONF_ENTITIES])]",
"def get_entity(self):\n if self.override_entity and not self.override_entity.abstract_entity:\n return self.override_entity\n elif self.get_role():\n return self.get_role().entity\n return None",
"def get_bundle():\n if should_save_generator_bundle():\n return None\n bundle_file = get_bundle_file()\n if bundle_file is None:\n return None\n return sequence_generator_bundle.read_bundle_file(bundle_file)",
"def get_entity_by_name(self, entity_name):\n return Artifact.get_by_name(entity_name)",
"def entity(self) -> _InternalEntityType[_T]:\n self.parent._check_configure()\n return self.entity",
"def get_model(self, name):\n bundle_name, model_name = name.split(\".\")\n bundle = self.bundles[bundle_name]\n model = bundle.models[name]\n return model",
"def entity_for(self, shape):\n return self._shape_to_ent[shape]",
"def get_entity(endpoint):\n _entity, _id = parser_endpoint(endpoint)\n\n return _entity",
"def get_entity_manager(self):\n return self.game.entity_manager",
"def entity():\n return Entity(\n u'Dummy', IDummy, 'icemac.addressbook.tests.test_entities.Dummy')",
"def entity(self, elem):\n return data.Entity(self, elem)",
"def me_class(self):\n return self._entity_class",
"def bundle_id(self):\n return self._bundle_id",
"def get_entity_by_key(cls, key):\n db_key = \"entity:\" + str(key)\n result = cls.db.hgetall(db_key)\n return (Entity.build(result) if type(result) is dict else None)",
"def get_entity_type(self):\n return self.entity_type",
"def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname",
"def _load_entity(client, entity_type, entity_id, parent_key=None):\n\n key = _load_key(client, entity_type, entity_id, parent_key)\n entity = client.get(key)\n log('retrieved entity: ' + entity_type + ' for ID: ' + str(entity_id))\n return entity",
"def resolve_model(root: Entity, *args) -> str:\n return 'entities.entity'",
"def get_bundle(bundle_uuid):\n assert isinstance(bundle_uuid, UUID)\n try:\n data = api_request('get', api_url('bundles', str(bundle_uuid)))\n except NotFound:\n raise BundleNotFound(f\"Bundle {bundle_uuid} does not exist.\") # lint-amnesty, pylint: disable=raise-missing-from\n return _bundle_from_response(data)",
"def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)"
]
| [
"0.6807404",
"0.6516922",
"0.6516922",
"0.62681806",
"0.6195618",
"0.6172222",
"0.6172222",
"0.6164801",
"0.6090738",
"0.6072496",
"0.60393006",
"0.5977017",
"0.59181803",
"0.5861657",
"0.5843958",
"0.58304024",
"0.5593018",
"0.5547911",
"0.5488324",
"0.54595906",
"0.54330087",
"0.54291713",
"0.5375346",
"0.53746784",
"0.52718556",
"0.5265775",
"0.525904",
"0.52122384",
"0.5168815",
"0.51401484"
]
| 0.67808473 | 1 |
Return the entity registered for Attempt. | def attempt_cls(self):
return self.get_entity_cls('attempt') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetEntity(self):\n return self.__entity",
"async def get_entity(self):\n if not self.entity and await self.get_input_entity():\n try:\n self._entity =\\\n await self._client.get_entity(self._input_entity)\n except ValueError:\n pass\n\n return self._entity",
"def register_attempt(self, cls):\n return self.register_entity('attempt', cls)",
"def entity(self):\n return self._entity",
"def entity(self):\n return self._entity",
"def entity(self) -> _InternalEntityType[_T]:\n self.parent._check_configure()\n return self.entity",
"def get_entity(self):\n if self.override_entity and not self.override_entity.abstract_entity:\n return self.override_entity\n elif self.get_role():\n return self.get_role().entity\n return None",
"def provider_entity(provider):\n try:\n return NUTEntity.objects.get(id=provider.first_access().target.id)\n except:\n return None",
"def GetEntity(self):\n\t\treturn self.acad.ActiveDocument.Utility.GetEntity()",
"def me_class(self):\n return self._entity_class",
"def get_entity(endpoint):\n _entity, _id = parser_endpoint(endpoint)\n\n return _entity",
"def getHandle(self):\n return entity",
"def getHandle(self):\n return entity",
"def current_entity(self):\n return self.entities[len(self.device_data[CONF_ENTITIES])]",
"def get(self):\n entity = self._model.get_or_insert(self._key_name)\n credential = getattr(entity, self._property_name)\n if credential and hasattr(credential, 'set_store'):\n credential.set_store(self.put)\n return credential",
"def get_object(self):\n account = Account.get_account_with_admins(account.id)\n\n return account[0] if account else None",
"def _resolve_user(self, data: dict):\n user_email = data.get('eml')\n if not user_email:\n raise OBDControllerError('User email not found')\n\n user: User = self.db_session.query(User).filter(User.email == user_email).first()\n if not user:\n raise OBDControllerError('User does not exist')\n\n return user",
"def entity():\n return Entity(\n u'Dummy', IDummy, 'icemac.addressbook.tests.test_entities.Dummy')",
"def user(self):\r\n return resource.User(self)",
"async def get_input_entity(self):\n # We don't actually have an API call we can make yet\n # to get more info, but keep this method for consistency.\n return self.input_entity",
"def unknownEntity():\n return Entity(u'', IEntities, 'icemac.addressbook.entities.Entities')",
"def _get_helper(cls, database, id_field, entity_id):\n entity = database.get(entity_id)\n if not entity:\n return validation.get_not_found_error(id_field, entity_id)\n return entity",
"def get_target_entity(self):\n return None",
"def entity_for(self, shape):\n return self._shape_to_ent[shape]",
"def get_entity_by_name(self, entity_name):\n return Artifact.get_by_name(entity_name)",
"async def me(self) -> types.User:\n if not hasattr(self, '_me'):\n setattr(self, '_me', await self.get_me())\n return getattr(self, '_me')",
"def me(self):\n return User(self, ResourcePath(\"me\", None))",
"def user(self):\r\n return resources.User(self)",
"async def fetch_self(self) -> User:\n return await self.app.rest.fetch_user(user=self.id)",
"def blocking_entity(self) -> Optional[Entity]:\n return self.engine.game_map.get_blocking_entity_at_location(*self.dest_xy)"
]
| [
"0.6523218",
"0.6447264",
"0.63593775",
"0.6327726",
"0.6327726",
"0.60202014",
"0.5867586",
"0.5776936",
"0.57724816",
"0.56494397",
"0.5592657",
"0.5584119",
"0.5584119",
"0.55728316",
"0.55628717",
"0.5502534",
"0.5401744",
"0.53906035",
"0.5357345",
"0.53007805",
"0.5283975",
"0.5272664",
"0.5260652",
"0.522319",
"0.5196916",
"0.5177333",
"0.51593494",
"0.5151558",
"0.51228774",
"0.5121491"
]
| 0.705092 | 0 |
return True if N is square number | def is_square(N):
if N < 0:
print("N is negative number @is_square in ModulesFactorization.")
sys.exit()
sqrt_N=round(math.sqrt(N))
if N == sqrt_N*sqrt_N:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_square(N):\n return N == round(N**(0.5))**2",
"def is_square(n):\r\n m = int(sqrt(n))\r\n return m * m == n",
"def is_square(n):\n if type(n) is not int:\n raise ValueError(\"Wrong given type, should be integer instead\")\n return n > -1 and math.sqrt(n) == int(math.sqrt(n))",
"def perfect_square(num: int) -> bool:\n return math.sqrt(num) * math.sqrt(num) == num",
"def isPerfectSquare(self, num: int) -> bool:\n for i in range(num + 1):\n square = i * i\n if square == num:\n return True\n if square > num:\n return False",
"def is_hilbert_square(n):\n return ((-1 + math.sqrt(n)) / 4).is_integer()",
"def is_square(apositiveint):\n x = apositiveint // 2\n seen = set([x])\n while x * x != apositiveint:\n x = (x + (apositiveint // x)) // 2\n if x in seen: return False\n seen.add(x)\n return True",
"def winnerSquareGame(self, n: int) -> bool:\n # Solution 1 - 1964 ms\n\n # Solution 2 - 184 ms\n @functools.lru_cache(None)\n def dp(k):\n if k == 0:\n return -1\n for i in range(int(math.sqrt(k)), 0, -1):\n if dp(k - i * i) < 0:\n return 1\n return -1\n\n return dp(n) > 0",
"def is_perfect_square():",
"def is_square(number): \n s = number * number\n return is_palindrome(s)",
"def is_square(x):\n\n if x < 0:\n return False\n if math.pow(int(math.sqrt(x)), 2) == x:\n return True",
"def isSquareOfPalindrome(n):\n s = int(sqrt(n) + 0.5)\n return s * s == n and isPalindrome(s)",
"def is_square(q_1: Qs) -> bool:\n\n return math.sqrt(q_1.dim).is_integer()",
"def snt(n):\r\n f = True\r\n for j in range(2, n):\r\n if n % j == 0:\r\n f = False\r\n break\r\n return f",
"def _can_do_sum_of_squares(n, k):\n if k < 1:\n return False\n if n < 0:\n return False\n if n == 0:\n return True\n if k == 1:\n return is_square(n)\n if k == 2:\n if n in (1, 2):\n return True\n if isprime(n):\n if n % 4 == 1:\n return 1 # signal that it was prime\n return False\n else:\n f = factorint(n)\n for p, m in f.items():\n # we can proceed iff no prime factor in the form 4*k + 3\n # has an odd multiplicity\n if (p % 4 == 3) and m % 2:\n return False\n return True\n if k == 3:\n if (n//4**multiplicity(4, n)) % 8 == 7:\n return False\n # every number can be written as a sum of 4 squares; for k > 4 partitions\n # can be 0\n return True",
"def square(n: int) -> int:\n return int(n ** 2)",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def is_valid(square: tuple, n: int):\n square_i, square_j = square\n if (square_i < n and square_i >= 0 and square_j < n and square_j >= 0):\n return True\n return False",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def square(n):\r\n try:\r\n assert(type(n) is int)\r\n if n == 1:\r\n return 1\r\n s = square(n - 1) + 2*(n - 1) + 1\r\n return s\r\n except:\r\n return None",
"def is_square(self):\n return self.shape[0] == self.shape[1]",
"def is_square(self):\n return self.shape[0] == self.shape[1]",
"def isSquare(P):\n return P.ndim == 2 and P.shape[0] == P.shape[1]",
"def isPerfectSquare(x):\n return (x**0.5) % int(x**0.5) == 0",
"def square(n):\r\n squared = n ** 2\r\n print (\"%d squared is %d.\" % (n, squared)) ## გიო: შეცდომას აგდებდა სანამ ფრჩხილებში არ ჩავსვი\r\n return squared",
"def is_hilbert_squarefree_number(n):\n ubound = math.ceil(n / 2)\n for a in range(5, ubound + 1):\n if is_hilbert_square(a) and n % a == 0:\n return False\n return True",
"def make_magic_square(N): # part a\n if N % 2 == 0:\n print('N must be odd.')\n my_magic_square = np.zeros((N, N))\n i = 0\n j = np.ceil(N / 2.).astype(int)\n n = 1\n while n <= N**2:\n my_magic_square[i, j] = n\n n += 1\n i_next =\n j_next =\n if my_magic_square[i_next, j_next] > 0:\n i =\n else:\n i =\n j =\n return my_magic_square",
"def is_square(self):\n lines, columns = self.get_size()\n return lines == columns",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared)\n return squared",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared)\n return squared"
]
| [
"0.88921505",
"0.8663732",
"0.8025654",
"0.7796842",
"0.75990134",
"0.7459169",
"0.7402554",
"0.7398521",
"0.7383941",
"0.73806375",
"0.73228544",
"0.7117085",
"0.70663697",
"0.70554364",
"0.69903105",
"0.6930154",
"0.68873316",
"0.6887043",
"0.6846944",
"0.6833844",
"0.6817891",
"0.6817891",
"0.6785697",
"0.6737471",
"0.6737361",
"0.66590774",
"0.6632748",
"0.6614035",
"0.6610084",
"0.6610084"
]
| 0.8697051 | 1 |
return positive integer N for string N_str | def PositiveInt(N_str):
try:
N = int(N_str)
except ValueError:
print("整数を入力してください。")
sys.exit()
if N <= 0:
print("0以下の整数です。1以上の自然数を入力してください。")
sys.exit()
return N | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def string_times(str, n):\n if n <= 0:\n return('n has to be non-negative')\n else:\n return(str * n)",
"def my_dp_O_N_FAILED(self, s):\n if s[0] == '0':\n return 0\n if len(s) == 1:\n return 1\n \n \"\"\" Following len(s) >= 2 \"\"\"\n n = len(s)\n dp = [0] * n\n \n dp[0] = 1\n # dp[1] = \n dp[1] = 1 if (int(s[0:2]) > 26 or s[1] == '0') else 2\n if 1 <= int(s[0:2]) <= 26:\n dp[1] = 2\n else:\n if s[1] == '0':\n dp[1] = 0\n else:\n dp[1] = 1\n \n for i in range(2, len(s)+1):\n if s[i] != '0':\n dp[i] += dp[i-1]\n if 10 <= int(s[i-1 : i+1]) <= 26:\n dp[i] += dp[i-2]\n return dp[n-1]",
"def getLength(string):\n return (0)",
"def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str",
"def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)",
"def return_num(strnum):\r\n if strnum != strnum or strnum == ' ':\r\n return -1\r\n else:\r\n strnum = re.sub('[^1-9]', '', str(strnum))\r\n return int(strnum)",
"def repeatedString(s, n):\n\n count = 0\n s_count_a = s.count('a')\n\n count += math.floor(n / len(s)) * s_count_a\n for _ in range(n % len(s)):\n if s[_] == 'a':\n count += 1\n\n return count",
"def InfIntToStr(s, i, n):\n if i == len(s):\n return \"\"\n elif i == 0:\n return str(int(s[i])) + InfIntToStr(s, i + 1, n)\n else:\n return str(int(s[i])).zfill(n) + InfIntToStr(s, i + 1, n)",
"def get_n_out_min(ss):\n n = ss.split(\"_\")[0]\n if n == \"full\":\n n = 1\n else:\n n = int(n)\n return n",
"def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0",
"def f(n):\n\tnstr = ''\n\tfor i in range(1, n + 1):\n\t\tnstr = nstr + str(i)\n\treturn nstr",
"def find_nth(self,string, substring, n) -> int:\n if n == 1:\n return string.find(substring)\n else:\n return string.find(substring, self.find_nth(string, substring, n - 1) + 1)",
"def fo_shizzle_my_nizzle(n): \n if n < 0:\n n = \"fo\"\n elif n >= 1 and n < 50: \n n = \"shizzle\"\n elif n >= 50 and n <= 100:\n n = \"my\"\n elif n % 2 == 0 and n % 3 == 0 and n > 100:\n n = \"nizzle\"\n else:\n n = \"\"\n return n",
"def stringConstruction(s):\n p = ''\n i = 0\n mincost = 0\n while p != s:\n if s[i] in p:\n p = p + s[i]\n # no cost since it is substring of p\n else:\n p = p + s[i]\n mincost += 1\n i += 1\n return mincost",
"def int(s):\n if s is None or s == \"\":\n i = -maxsize\n else:\n i = int(s)\n\n return i",
"def line(n, str):\n\n return_value = ''\n for _ in range(n):\n return_value += str\n return return_value",
"def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1",
"def h_ascii(key, N):\n if type(key) == str:\n if type(N) == int:\n s = 0\n for i in range(len(key)):\n s += ord(key[i])\n return s % N\n else:\n raise ValueError\n else:\n raise ValueError",
"def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)",
"def make_str(n, len):\r\n\ts=\"\"\r\n\tfor i in xrange(0,n):\r\n\t\ts+=\"1\"\r\n\tfor i in xrange(n,len):\r\n\t\ts+=\"0\"\r\n\treturn s",
"def Itinerary(s, n):\n\titinerary = ''\n\tfor i in range(n):\n\t\ts = retmap(s)\n\t\tif s>sCritical:\n\t\t\titinerary = itinerary + '1'\n\t\telif s<sCritical:\n\t\t\titinerary = itinerary + '0'\n\treturn itinerary",
"def multiply_string(message, n):\r\n return message*n",
"def sum_string_digits(my_str):\n\n return 0 # this is a placeholder. remove it.",
"def find_string(n, c_length, start=None):\n \n c = range(c_length)\n if start is None:\n i = get_minimum(n)\n else:\n i = start\n\n strings = [e for e in generate_strings(n, c)]\n while True:\n for x, s in enumerate(generate_strings(i, c)):\n if check_string(s, strings):\n return s\n\n if x % 1000000 == 0:\n print x\n i += 1\n print \"processing %s\" % i",
"def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c",
"def solution(s):\n # check the input string is a valid length\n min_length = 1\n max_length = 101\n\n if len(s) < min_length or len(s) > max_length:\n return ''\n\n # need to handle integers between 1 and 10^100, so up to 101 decimal places of precision\n # The Python default is 28 significant figures\n # https://www.geeksforgeeks.org/setting-precision-in-python-using-decimal-module/\n getcontext().prec = max_length\n\n n = int(s)\n alpha = Decimal(2).sqrt()\n sequence = beatty_sequence(alpha, n)\n\n return str(int(sequence))",
"def LPSubsequenceLength(str):\n return len(LPSubsequence(str))",
"def GenZeroStr(n):\n\n return \"\".join([\"0\"] * n)",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def esrever2(n, s):\n if n == 0:\n return s\n else:\n result = esrever2(n // 10, s * 10 + n % 10)\n return result"
]
| [
"0.7054025",
"0.62198836",
"0.61006796",
"0.6047664",
"0.59960014",
"0.597458",
"0.5920257",
"0.5876303",
"0.5868132",
"0.58672506",
"0.58189476",
"0.5807939",
"0.57942134",
"0.5793706",
"0.5792252",
"0.5786809",
"0.57753885",
"0.57610023",
"0.5738962",
"0.5732349",
"0.5719402",
"0.57128286",
"0.5690612",
"0.5690602",
"0.5689457",
"0.5684443",
"0.56795555",
"0.5673449",
"0.5672702",
"0.56656206"
]
| 0.727527 | 0 |
Check if the user was mentioned in the given message. | def mentioned(self, message: "Message") -> bool:
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mentioned_in(self, message: Message) -> bool:\n if message.guild is None or message.guild.id != self.guild.id:\n return False\n\n if self._user.mentioned_in(message):\n return True\n\n return any(self._roles.has(role.id) for role in message.role_mentions)",
"def mention(cls, user, message, mentioned):\r\n pass",
"def mention(cls, user, message, mentioned):\n pass",
"def is_mention(self, bot):\n for um in self.original.entities['user_mentions']:\n if um['screen_name'] == bot.screen_name:\n return True\n return False",
"def msg_found(self, msg, message):\n if msg in message:\n return True\n else:\n return False",
"def said(self, text):\n for message in self.messages:\n if text in message:\n return True\n return False",
"def contains_message(response, message):\n if len(response.context['messages']) != 1:\n return False\n\n full_message = str(list(response.context['messages'])[0])\n\n return message in full_message",
"def is_user_message(message):\n return (message.get('message') and\n message['message'].get('text') and\n not message['message'].get(\"is_echo\"))",
"def author_is_me(message: discord.Message) -> bool:\n return message.author == config.bot.user",
"def has_mention(tweet):\n\n pattern = r'^\\@[\\w\\d]+'\n return re.match(pattern, tweet)",
"def mention(bot, msg):\n\n if msg.command != \"PRIVMSG\":\n return\n\n message = msg.args[1]\n\n if bot.nickname.lower() in message.lower():\n bot.privmsg(msg.sendername, \"You called?\")",
"def listening_for(message):\n\n if Utilities.isNotEmpty(message['text']):\n cmds = ['!whois', '!geoloc', '!ping']\n return message['text'].split()[0] in cmds",
"def is_explicit_mention(self, bot):\n for um in self.original.entities['user_mentions']:\n if um['screen_name'] == bot.screen_name:\n this_is_an_xm = um['indices'][0] >= self.original.display_text_range[0]\n this_is_an_xm &= um['indices'][1] <= self.original.display_text_range[1]\n if this_is_an_xm:\n return True\n return False",
"def mention_exists(self, mention):\n return self._entity_symbols.alias_exists(mention)",
"def check_message(check):\n words_of_message = speech_text.split()\n if set(check).issubset(set(words_of_message)):\n return True\n else:\n return False",
"async def check_message(self, message: discord.Message):\n\n if message.channel not in self.meow_chats:\n return\n if message.author.bot:\n return\n if message.author.id in self.bot.owner_ids:\n return\n content = self.EMOJI_REGEX.sub(\"\", message.content.lower())\n if any([i in content for i in self.VALID_KEYWORDS]):\n return\n try:\n await message.delete()\n expiry_time, _ = self.meow_disable_tasks.get(message.channel.id, (None, None))\n if message.channel.permissions_for(message.author).manage_messages:\n text = f\"{message.author.mention}, your message needs to have a 'meow' in it (to disable, run the `meow off` command).\"\n else:\n text = f\"{message.author.mention}, your message needs to have a 'meow' in it :<\"\n if expiry_time:\n text = text.replace(\"in it\", f\"in it until meow chat expires {discord.utils.format_dt(expiry_time, 'R')}\")\n await message.channel.send(text, delete_after=3)\n except discord.HTTPException:\n pass",
"def has_message(self, character):\n messages = get_messages(character)\n messages = [ message[MESSAGE].id for message in messages ]\n if self.message.id in messages:\n return True\n else:\n return False",
"def is_special_message(self):\n if not self.is_valid():\n return False\n \n # TODO: what if the author is wrong? then these don't match at all!\n for nickname in AUTHOR_TO_NICKNAME[self.author]:\n \n if self.content == f\"{nickname} changed the chat theme.\":\n return True\n \n if self.content == f\"{nickname} joined the video chat.\":\n return True\n \n if self.content == f\"{nickname} joined the call.\":\n return True\n \n if self.content.startswith(f\"{nickname} named the group\"):\n return True\n \n if self.content == f\"{nickname} removed the group name.\":\n return True\n \n if self.content == f\"{nickname} sent a link.\":\n return True\n \n if self.content == f\"{nickname} sent an attachment.\":\n return True\n \n if self.content.startswith(f\"{nickname} set the emoji to\"):\n return True\n \n if self.content == f\"{nickname} changed the group photo.\":\n return True\n \n if is_add_remove_member(self.content, nickname):\n return True\n\n if is_set_nickname(self.content, nickname):\n return True\n \n if is_clear_nickname(self.content, nickname):\n return True\n \n if is_create_group(self.content, nickname):\n return True\n if self.content == f\"{nickname} started a video chat.\":\n return True\n \n if self.content == f\"{nickname} left the group.\":\n return True\n \n if is_poll_message(self.content, nickname):\n return True\n return False",
"def is_me(self, m):\n return m.author == self.client.user",
"def _check_has_message(data):\r\n return re.match(r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)",
"def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False",
"def check_if_help_message(message):\n return \"The commands are\" in message",
"async def mention(self, ctx):\n settings = await self.fetch_settings(ctx)\n if settings['require_mention']:\n settings['require_mention'] = False\n await ctx.send('🐱 Okay, I no longer need to be @mentioned for me to respond to messages!')\n else:\n settings['require_mention'] = True\n await ctx.send('🐱 Okay, I will now only respond to messages if they @mention me!')\n await self.db.set(ctx.guild.id, settings)",
"def exactMatch(self, mention):\n w1 = self.allWords()\n w2 = mention.allWords()\n if len(w1) == len(w2) and w1 == w2:\n return True\n else:\n return False",
"def is_for_me(event):\n # check if not my own event\n\n type = event.get('type')\n\n if type and type == 'message' and not(event.get('user') == VALET_SLACK_ID):\n\n if is_private(event):\n return True\n text = event.get('text')\n # channel = event.get('channel')\n if type and type == 'message' and text.startswith(\"@td \"):\n return True\n if type and type == 'message' and text.startswith(\"@t\"):\n return True\n if type and type == 'message' and text.startswith(\"@cl\"):\n return True\n if valet_slack_mention in text.strip().split():\n return True",
"def check_message(msg):\n words_of_message = msg.split()\n find = False\n for key in gc_words:\n if words_of_message in gc_words[key]['groups']:\n getattr(neuron.general_conversations, key)()\n find = True\n break\n for key in fc_words:\n if words_of_message in fc_words[key]['groups']:\n getattr(neuron.forecast, key)()\n find = True\n break\n for key in twitter_words:\n if words_of_message in twitter_words[key]['groups']:\n getattr(neuron.twitter, key)()\n find = True\n break\n for key in pipo_words:\n if words_of_message in pipo_words[key]['groups']:\n getattr(neuron.pipotron, key)()\n find = True\n break\n if not find:\n neuron.general_conversations.undefined()",
"async def check_for_spam(self, message: discord.Message):\n user = message.author\n guild = message.guild\n\n similarity_threshold = await self.config.guild(guild).similarity_threshold()\n\n last_message = await self.config.member(user).last_message()\n current_message = message.content\n\n if last_message is None:\n await self.config.member(user).last_message.set(current_message)\n return False\n\n last_message_time = await self.config.member(user).last_message_time()\n\n if last_message_time is None:\n await self.config.member(user).last_message_time.set(\n message.created_at.timestamp()\n )\n return False\n\n current_message_time = message.created_at.timestamp()\n time_difference_in_seconds = current_message_time - last_message_time\n\n await self.config.member(user).last_message.set(current_message)\n await self.config.member(user).last_message_time.set(current_message_time)\n\n if time_difference_in_seconds < 1800:\n similarity = self.similarity(last_message, message.content)\n\n if similarity > similarity_threshold:\n spam_count = await self.config.member(user).spam_count()\n spam_count = spam_count + 1\n\n spam_threshold = await self.config.guild(guild).spam_threshold()\n\n if spam_count > spam_threshold:\n punish = self.bot.get_cog(\"Punish\")\n punish_hours = await self.config.guild(guild).spam_punish_hours()\n async with punish.config.member(user)() as current:\n now = time.time()\n duration = now + 3600 * punish_hours\n punish_role = await punish.get_role(guild, user, quiet=True)\n\n if punish_role is None:\n return\n\n current[\"start\"] = (\n current[\"start\"] or now\n ) # don't override start time if updating\n current[\"until\"] = duration\n current[\"by\"] = (\n current[\"by\"] or guild.me.id\n ) # don't override original moderator\n current[\"reason\"] = \"Spamming messages\"\n current[\"unmute\"] = False\n current[\"caseno\"] = None\n\n await user.add_roles(punish_role)\n\n await punish.schedule_unpunish(duration, user)\n await message.channel.send(\n \"%s has been muted for 12 hours for Spamming Messages\"\n % user.name\n )\n\n # Reset spam counter since we punished\n await self.config.member(user).spam_count.set(0)\n else:\n await self.config.member(user).spam_count.set(spam_count)\n\n # We delete the message in any case\n await asyncio.sleep(0.5)\n await message.delete()\n\n return True\n\n return False",
"def check_message(self, message):\n for word in self.bad_words:\n if word in message:\n return -1\n for word in self.filter_words:\n if word in message:\n return 0\n return 1",
"def check_for_greeting(sentence):\n for word in sentence.words:\n if word.lower() in greetings:\n return True",
"async def on_message(self, message: discord.Message) -> None:\n\n if message.author == self.user:\n # Skip any messages sent by ourselves so that we don't get stuck in any loops\n return\n\n # Check to see if bot has been mentioned\n has_mentioned = False\n for mention in message.mentions:\n if str(mention) == self.user.name+\"#\"+self.user.discriminator:\n has_mentioned = True\n break\n\n # Only respond randomly (or when mentioned), not to every message\n if random.random() > float(self.response_chance) and has_mentioned == False:\n return\n\n processed_input = self.process_input(message.content)\n\n response = \"\"\n with message.channel.typing():\n response = self.chat_ai.get_bot_response(self.model_name, message.author.nick, processed_input)\n\n await message.channel.send(response)"
]
| [
"0.7721908",
"0.72947556",
"0.7270287",
"0.7094305",
"0.70847654",
"0.70275617",
"0.68945",
"0.6781079",
"0.6633923",
"0.651744",
"0.6505316",
"0.64962983",
"0.63860756",
"0.6355281",
"0.62822205",
"0.62443316",
"0.6185116",
"0.6181461",
"0.61404777",
"0.61131454",
"0.6073126",
"0.6040652",
"0.6007715",
"0.5988043",
"0.5969367",
"0.59126896",
"0.5894685",
"0.5894044",
"0.58113116",
"0.57945365"
]
| 0.80363095 | 0 |
Check if the given string is an action. | def is_action_str(string: str) -> bool: | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_action(self) -> bool:\n return self.is_action_str(self.content)",
"def is_string_action(func: CLIActionType) -> bool:\n return check_function_type(func, [HammerDriver, Callable[[str], None]], Optional[str]) is None",
"def isActionKey(event_string):\n\n actionKeys = [ \"Return\", \"Escape\", \"Tab\", \"BackSpace\", \"Delete\",\n \"Page_Up\", \"Page_Down\", \"Home\", \"End\" ]\n\n reply = event_string in actionKeys\n debug.println(debug.LEVEL_FINEST,\n \"orca.isActionKey: returning: %s\" % reply)\n return reply",
"def str_to_action(str):\n raise NotImplementedError",
"def is_valid_action(self, action):\n if self.board[action[0]][action[1]] == None:\n return True\n \n return False",
"def match_action(pattern, action):\n if pattern == '*' or pattern == '*:*' or pattern == action:\n return True\n\n # build a pattern for the action\n re_pattern = '^{}$'.format(pattern.replace('*', ASTERISK_RE_REPLACE))\n return re.match(re_pattern, action)",
"def is_valid_git_action(action):\n\n return action in GIT_ACTIONS",
"def define_action(char):\n if char == \"checks\":\n return ActionType.CHECK\n elif char == \"folds\":\n return ActionType.FOLD\n elif char == \"bets\":\n return ActionType.BET\n elif char == \"raises\":\n return ActionType.RAISE\n elif char == \"calls\":\n return ActionType.CALL\n else:\n return ActionType.UNDEFINED",
"def match_action(self, action):\n\n return hasattr(self, self._action_handler_name(action))",
"def parse_action(element: Element) -> ActionType:\n # edit is a special type of action for strings\n if \"edit\" in element.attributes:\n if element.attributes[\"edit\"] == \"append\":\n return edit_append\n raise error_at(element)(f'invalid edit=\"{element.attributes[\"edit\"]}\"')\n # default action is replace\n action = element.attributes.get(\"action\", \"replace\")\n if action == \"replace\":\n return replace\n if action == \"append\":\n return append\n if action == \"delete\":\n return delete\n if action == \"merge\":\n return merge\n raise error_at(element)('invalid action=\"{:s}\".'.format(action))",
"def act(self, action: str) -> bool:\n if action in utils.ROTATIONS:\n if self._rotate(self.rotation_table[action]):\n self.last_move = action\n return True\n elif action in utils.MOVEMENT:\n if self._move(action):\n self.last_move = action\n return True\n else:\n raise ValueError('Invalid move \\'{}\\''.format(action))\n\n return False",
"def test_unknown_action(self):\n exit_string = actions.main(['foo'])\n self.assertEqual('Action \"foo\" undefined', exit_string)",
"def test_unknown_action(self):\n exit_string = actions.main([\"foo\"])\n self.assertEqual(\"Action foo undefined\", exit_string)",
"def is_event(event: Any) -> bool:\n return isinstance(event, MenuAction) or str(type(event)) == \"<class 'pygame_menu.events.MenuAction'>\"",
"def choose_action(self, valid_list):\n \n action_str = input(\"Choose action: \").lower()\n print()\n \n if action_str in valid_list:\n return action_str\n \n else:\n print(\"Invalid action!\")\n return False",
"def as_action_str(string: str) -> str:",
"def check_CLIActionType_type(func: CLIActionType) -> None:\n config_check = check_function_type(func, [HammerDriver, Callable[[str], None]], Optional[dict])\n if config_check is None:\n return\n\n string_check = check_function_type(func, [HammerDriver, Callable[[str], None]], Optional[str])\n if string_check is None:\n return\n\n raise TypeError(\n \"func does not appear to be a CLIActionType. Check for config returned {config}; check for string returned {string}\".format(\n config=config_check, string=string_check))",
"def check_type(self):\n if self.action < 0 or self.action >= len(_action_args_dict):\n raise GameActionError('Invalid action type ({0})'.format(self.action))",
"def is_selector_str(cls, s):\n\n # assert type(s) is str\n assert isinstance(s, basestring)\n try:\n cls.parse(s)\n except:\n return False\n else:\n return True",
"def parseInputLine(self, action):\r\n output = None\r\n if action is not None and action is not '':\r\n func = getattr(self, 'cmd_' + str(action[0]).upper(), None)\r\n if func is not None:\r\n output = func()\r\n else:\r\n return fail(InvalidRequest(\"No such action\"))\r\n return succeed(output)",
"def get_action():\n print(\"What do you do next?\")\n print(\" m) move\")\n print(\" a) fire an arrow\")\n action = input(\"> \")\n if action == \"m\" or action == \"a\":\n return action\n else:\n print(action + \"?\")\n print(\"That's not an action that I know about\")\n return False",
"def get_action(command):\n return command.split(\" \")[0]",
"def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def _get_action_from_name(self, name):\n\n container = self._action\n if name is None:\n return None\n\n for action in container:\n if \"/\".join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")",
"def _decode_action(self, action_id):\n legal_actions = self.game.get_legal_actions()\n if self.actions[action_id] not in legal_actions:\n if \"check\" in legal_actions:\n return \"check\"\n else:\n return \"fold\"\n return self.actions[action_id]",
"def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")",
"def _is_string(arg):\n return isinstance(arg, types.StringTypes)",
"def validate_action(self, message=\"This action may delete data from the database. This action cannot be undone.\\nDo you wish to continue? (Y/N): \"):\n \n while True:\n print('\\n\\n')\n inp = input(message)\n \n if (inp.upper() == 'Y'):\n return True\n elif (inp.upper() == 'N'):\n return False\n \n print(\"Invalid input. Try again\")"
]
| [
"0.7728049",
"0.73375154",
"0.687277",
"0.68027675",
"0.66827106",
"0.65902036",
"0.6505975",
"0.64217037",
"0.6407963",
"0.6218089",
"0.61802155",
"0.6137043",
"0.612445",
"0.6097758",
"0.60162944",
"0.59167874",
"0.5903043",
"0.58280855",
"0.5803196",
"0.57699203",
"0.57142556",
"0.5692802",
"0.5667979",
"0.5667979",
"0.56502044",
"0.5574742",
"0.5572021",
"0.55640167",
"0.549673",
"0.5490973"
]
| 0.8675571 | 0 |
Returns the given string as an action. | def as_action_str(string: str) -> str: | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def str_to_action(str):\n raise NotImplementedError",
"def get_action(command):\n return command.split(\" \")[0]",
"def is_action_str(string: str) -> bool:",
"def define_action(char):\n if char == \"checks\":\n return ActionType.CHECK\n elif char == \"folds\":\n return ActionType.FOLD\n elif char == \"bets\":\n return ActionType.BET\n elif char == \"raises\":\n return ActionType.RAISE\n elif char == \"calls\":\n return ActionType.CALL\n else:\n return ActionType.UNDEFINED",
"def actionString(self,action):\n return str(self._mdp.A[action])",
"def as_action(self) -> str:\n return self.as_action_str(self.content)",
"def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> str:\n return pulumi.get(self, \"action\")",
"def label2action(label):\n items = label.split('-')\n if len(items) == 1:\n action = (items[0], None, None)\n elif len(items) == 3:\n action = tuple(items)\n else:\n raise ValueError(\"Unrecognized label: {}\".format(label))\n return action",
"def parse_action(element: Element) -> ActionType:\n # edit is a special type of action for strings\n if \"edit\" in element.attributes:\n if element.attributes[\"edit\"] == \"append\":\n return edit_append\n raise error_at(element)(f'invalid edit=\"{element.attributes[\"edit\"]}\"')\n # default action is replace\n action = element.attributes.get(\"action\", \"replace\")\n if action == \"replace\":\n return replace\n if action == \"append\":\n return append\n if action == \"delete\":\n return delete\n if action == \"merge\":\n return merge\n raise error_at(element)('invalid action=\"{:s}\".'.format(action))",
"def strip_action_str(string: str) -> str:",
"def perform_action(self, action):\n method_name = action.text().lower()\n method_name = method_name + \"_action\"\n action_method = getattr(self, method_name)\n action_method()",
"def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")",
"def _GetAction(self, action, text):\n # TODO: replace \"x in y\" checks by startswith if possible.\n if 'airportdProcessDLILEvent' in action:\n network_interface = text.split()[0]\n return 'Interface {0:s} turn up.'.format(network_interface)\n\n if 'doAutoJoin' in action:\n match = self._CONNECTED_RE.match(text)\n if match:\n ssid = match.group(1)[1:-1]\n else:\n ssid = 'Unknown'\n return 'Wi-Fi connected to SSID: {0:s}'.format(ssid)\n\n if 'processSystemPSKAssoc' in action:\n wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)\n if wifi_parameters:\n ssid = wifi_parameters.group(1)\n bssid = wifi_parameters.group(2)\n security = wifi_parameters.group(3)\n if not ssid:\n ssid = 'Unknown'\n if not bssid:\n bssid = 'Unknown'\n if not security:\n security = 'Unknown'\n\n return (\n 'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '\n 'Security: {2:s}.').format(bssid, ssid, security)\n\n return text",
"def action_name(self) -> \"str\":\n return self._attrs.get(\"actionName\")",
"def parseInputLine(self, action):\r\n output = None\r\n if action is not None and action is not '':\r\n func = getattr(self, 'cmd_' + str(action[0]).upper(), None)\r\n if func is not None:\r\n output = func()\r\n else:\r\n return fail(InvalidRequest(\"No such action\"))\r\n return succeed(output)",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(func):\n ACTIONS[func.__name__.rstrip('_')] = func\n return func",
"def get_action(self, s, eval=False):\n if eval:\n with torch.no_grad():\n action = self.actor.get_best_action(s[None, ...].to(self.device))\n else:\n if self.step_count < 20000:\n action = self.action_space[np.random.randint(self.action_space_len)]\n else:\n with torch.no_grad():\n action, _, _ = self.actor.sample_action(s[None, ...].to(self.device))\n action = action.item()\n return action",
"def get_action(self, action):\n actions = {\n self.GO_ACTION: self.go,\n self.CLICK_ACTION: self.click,\n self.CHECK_ACTION: self.check,\n self.WAIT_ACTION: self.wait,\n self.FILL_FORM_ACTION: self.fill,\n self.SELECT_FORM_ACTION: self.select\n }\n try:\n return actions[action]\n except KeyError:\n raise Exception('{0} is not a valid action, the valid actions are: {1}'.format(action,\n \", \".join(actions.keys())))",
"def _get_action(self):\n return self.__action",
"def rule_action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_action\")",
"def _str_eval_ob(eval, act, ctxt, *obs) :\n if len(obs) == 1 :\n topic = obs[0]\n text = obs[0]\n else :\n topic = obs[0]\n text = obs[1]\n return [make_action_link(text[0], \"ask Irving Q. Tep about \"+topic[0])]",
"def key_to_action(key):\n if key == \"shutdown\":\n return \"STOP\"\n if key == \"delete\":\n return \"DESTROY\"\n if key in ARBITRARY_ACTIONS:\n return None\n else:\n return key.upper()",
"def getAction(self, nameOrAction):\n\t\tif isinstance(nameOrAction, Action):\n\t\t\treturn nameOrAction\n\t\telse:\n\t\t\treturn self.actions[nameOrAction]",
"def act(self, x):\n return self.action",
"def action(self):\n return self._get_field(\"action\")",
"def action(self, observation):\n return self.actor.get_action(observation)"
]
| [
"0.8616799",
"0.6958503",
"0.69501036",
"0.6784521",
"0.6666197",
"0.66375476",
"0.6600629",
"0.64534956",
"0.6441584",
"0.6435479",
"0.6420219",
"0.6363733",
"0.6176962",
"0.614364",
"0.60980374",
"0.6083132",
"0.60809344",
"0.60809344",
"0.60809344",
"0.60620207",
"0.60587096",
"0.60080385",
"0.59964496",
"0.5973172",
"0.59501517",
"0.59487754",
"0.5930646",
"0.59101725",
"0.59095484",
"0.5892322"
]
| 0.79646474 | 1 |
Strip the action formatting from the given string. | def strip_action_str(string: str) -> str: | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_action(self) -> str:\n return self.strip_action_str(self.content)",
"def stripFormatting(self, s):\n # stripColor has to go first because of some strings, check the tests.\n s = stripColor(s)\n s = stripBold(s)\n s = stripReverse(s)\n s = stripUnderline(s)\n return s.replace('\\x0f', '').replace('\\x0F', '')",
"def as_action_str(string: str) -> str:",
"def remove_unused_char(action_list):\r\n clean_action_list = []\r\n for action in action_list:\r\n effect_element = action['action']\r\n clearnedstr = (effect_element[effect_element.index(\"effect\")\r\n + len(\"effect\"):])\r\n clean_action_list.append(clearnedstr[:-1])\r\n return clean_action_list",
"def stripFeat(s):\n if \" Featuring\" in s:\n return s[: s.index(\" Featuring\")]\n elif \" x \" in s:\n return s[: s.index(\" x \")]\n else:\n return s",
"def _StripWS(s):\r\n return re.sub('\\s+', '', s)",
"def _StripWS(s):\r\n return re.sub('\\s+', '', s)",
"def strip(s):\n parser = StrippingParser( )\n parser.feed(s)\n parser.close( )\n parser.cleanup( )\n return ''.join(parser.result)",
"def stripColor(self, s):\n return _stripColorRe.sub('', s)",
"def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})",
"def strip_other_charcter():\n pass",
"def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()",
"def get_strip_string(self, i_str):\n return ''.join(e for e in i_str if e.isalnum())",
"def _strip_ansi(s):\n if isinstance(s, str):\n return _ansi_codes.sub(r\"\\4\", s)\n else: # a bytestring\n return _ansi_codes_bytes.sub(r\"\\4\", s)",
"def strip_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.parse_ansi(string, strip_ansi=True)",
"def strip_string(input):\n return input.lower().replace(\" \", \"\")",
"def strip_raw_codes(self, string):\n return self.ansi_regex.sub(\"\", string)",
"def strip_raw_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.strip_raw_codes(string)",
"def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()",
"def _clean_magic(self, magic):\n if magic.lower() == 'o':\n return ''\n elif magic[:2].lower() == 'o:':\n return magic[2:]\n return magic",
"def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )",
"def _strip(obj):\n return obj.translate(None, STRIP_CHARS)",
"def ws_strip_txt(request) -> Dict[str, str]:\n return request.param",
"def strip_unsafe_tokens(self, string):\n return self.unsafe_tokens.sub(\"\", string)",
"def str_to_action(str):\n raise NotImplementedError",
"def clean_str(data, remove=''):\n return data.translate(None, remove)",
"def unscorize(s):\n return s.replace(\" \", \"_\")",
"def clean_feature(f):\n\n if f.startswith(\"a \"):\n f = f[2:]\n\n if f.startswith(\"an \"):\n f = f[3:]\n\n return str(f)",
"def stripReverse(self, s):\n return s.replace('\\x16', '')",
"def _sanitize(label):\n return re.sub(r'(\\W+| )', '', label)"
]
| [
"0.7534198",
"0.66088766",
"0.6474476",
"0.61960036",
"0.6176254",
"0.6136924",
"0.6136924",
"0.6093519",
"0.60658264",
"0.6020465",
"0.599568",
"0.5958416",
"0.59549266",
"0.5893231",
"0.5802946",
"0.5727048",
"0.56940913",
"0.5693054",
"0.568232",
"0.5680192",
"0.56799215",
"0.5659633",
"0.5655782",
"0.56502056",
"0.563731",
"0.5592339",
"0.55836546",
"0.55568033",
"0.5555221",
"0.5534458"
]
| 0.89726824 | 0 |
Check whether the message is an action. | def is_action(self) -> bool:
return self.is_action_str(self.content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_action(self, action):\n if self.board[action[0]][action[1]] == None:\n return True\n \n return False",
"def match_action(self, action):\n\n return hasattr(self, self._action_handler_name(action))",
"def check_type(self):\n if self.action < 0 or self.action >= len(_action_args_dict):\n raise GameActionError('Invalid action type ({0})'.format(self.action))",
"def is_action_str(string: str) -> bool:",
"def act(self, action: str) -> bool:\n if action in utils.ROTATIONS:\n if self._rotate(self.rotation_table[action]):\n self.last_move = action\n return True\n elif action in utils.MOVEMENT:\n if self._move(action):\n self.last_move = action\n return True\n else:\n raise ValueError('Invalid move \\'{}\\''.format(action))\n\n return False",
"def is_event(event: Any) -> bool:\n return isinstance(event, MenuAction) or str(type(event)) == \"<class 'pygame_menu.events.MenuAction'>\"",
"def is_valid_git_action(action):\n\n return action in GIT_ACTIONS",
"def __action_is_valid(self, action):\n\t\tis_valid = action in self.valid_actions\n\n\t\tif is_valid:\n\t\t\tif self.__column_height(action) >= (self.height - 1):\n\t\t\t\tself.valid_actions.remove(action)\n\t\treturn is_valid",
"def isActionKey(event_string):\n\n actionKeys = [ \"Return\", \"Escape\", \"Tab\", \"BackSpace\", \"Delete\",\n \"Page_Up\", \"Page_Down\", \"Home\", \"End\" ]\n\n reply = event_string in actionKeys\n debug.println(debug.LEVEL_FINEST,\n \"orca.isActionKey: returning: %s\" % reply)\n return reply",
"def is_monoid_action(self) :\n return True",
"def __bool__(self):\n return bool(self._actions)",
"def action_type(self) -> int:\n raise Exception(\"Attempted to get action type of an anonymous Action\")",
"def can_make_action(self) -> bool:\n return not(self.has_pending_action or self.is_dead())",
"def _confirm_action(self, action):\n\t\treturn True",
"def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"",
"def authorize(self, action, author_id=None):\n if action not in CHANGE_TYPES:\n return False\n return True",
"def _msg_is_command(self, msg):\n return isinstance(msg, dict)",
"def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")",
"def has(self, ActionClass):\n for action in self.h:\n if isinstance(action, ActionClass):\n return True\n return False",
"def has_action(action: 'LoggingActions', action_bits: int) -> bool:\n\n return bool(action.value & action_bits)",
"def action_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action_type\")",
"def _get_action(self):\n return self.__action",
"def test_unknown_action(self):\n exit_string = actions.main([\"foo\"])\n self.assertEqual(\"Action foo undefined\", exit_string)",
"def reaction_check(self, payload):\n if payload.message_id != self.message.id:\n return False\n if payload.user_id not in (*self.bot.owner_ids, self._author_id):\n return False\n return payload.emoji in self.buttons",
"def take_action(self, action):\n if action is None:\n return False\n \n try:\n job = self.queue[action.job_index]\n if self.nodes[action.node_index].schedule(job):\n # only remove from queue if scheduled successfully\n self.queue.pop(action.job_index)\n return True\n return False\n except:\n return False # either job index out of range or node index out of range",
"def test_unknown_action(self):\n exit_string = actions.main(['foo'])\n self.assertEqual('Action \"foo\" undefined', exit_string)",
"def match_action(pattern, action):\n if pattern == '*' or pattern == '*:*' or pattern == action:\n return True\n\n # build a pattern for the action\n re_pattern = '^{}$'.format(pattern.replace('*', ASTERISK_RE_REPLACE))\n return re.match(re_pattern, action)",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")"
]
| [
"0.7165937",
"0.70354944",
"0.6937857",
"0.64320385",
"0.63170636",
"0.62816095",
"0.6277914",
"0.6259409",
"0.62416196",
"0.6177585",
"0.6160382",
"0.6158344",
"0.6118185",
"0.6103044",
"0.6081431",
"0.6062426",
"0.5996191",
"0.5986556",
"0.5905781",
"0.59051764",
"0.59048086",
"0.58443385",
"0.584361",
"0.58420247",
"0.58398724",
"0.58289474",
"0.5824399",
"0.57495254",
"0.57495254",
"0.57495254"
]
| 0.8249713 | 0 |
Returns the message as an action. | def as_action(self) -> str:
return self.as_action_str(self.content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_action(self):\n return self.__action",
"def get_action(self):\n return self.__action",
"def create_action_msg(self, action):\n raise NotImplementedError(\"Don't know how to translate the action to a msg\")",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def obtain_action(self):\r\n\t\treturn",
"def get_action(self):\n raise NotImplementedError",
"def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> str:\n return pulumi.get(self, \"action\")",
"def action(self):\n return self._get_field(\"action\")",
"def action(self, observation):\n return self.actor.get_action(observation)",
"def get_action(self):\n return self.current_action",
"def actionString(self,action):\n return str(self._mdp.A[action])",
"def str_to_action(str):\n raise NotImplementedError",
"def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")",
"def get_action(self, context):\n pass",
"def act(self, x):\n return self.action",
"def action(self) -> CommitterActions:\n return self._action",
"def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)",
"def action(self) -> pulumi.Output['outputs.JobStepActionResponse']:\n return pulumi.get(self, \"action\")",
"def action(self, action_id):\r\n return Action(self, action_id)",
"def action(self, action_id):\r\n return Action(self, action_id)",
"def get_action(line, _test_myid=None):\n try:\n line = line.strip()\n if not check(line):\n logging.debug('checksum failure')\n return\n\n line = line[:-8] # discard the checksum and retain the message\n tmp = json.loads(line)\n #print tmp['from']\n #print tmp['to']\n if _test_myid is None:\n _test_myid = gethostname()\n if not tmp.get('to',None) == _test_myid:\n # message not intended for me\n logging.debug('message not intended for this node')\n print('message not intended for {}'.format(_test_myid))\n return\n\n if 'action' not in tmp['payload']:\n # \"action\" is mandatory\n logging.debug('no action defined')\n return\n \n d = {}\n d['action'] = tmp['payload']['action']\n\n '''# \"m\" (sample count) is obsolete\n # ... time for Google Protocol Buffer?\n try:\n d['multi_sample'] = max(1,tmp['payload']['m'])\n except:\n pass'''\n\n d['from'] = tmp.get('from',None)\n return d\n except:\n logging.debug(traceback.format_exc())\n return None"
]
| [
"0.67650163",
"0.67649853",
"0.6624183",
"0.65623546",
"0.65623546",
"0.65623546",
"0.65623546",
"0.65623546",
"0.65623546",
"0.65537053",
"0.6530178",
"0.64753294",
"0.64666075",
"0.6461064",
"0.64377016",
"0.6340056",
"0.6218753",
"0.61965334",
"0.61937577",
"0.6175406",
"0.6175406",
"0.6175406",
"0.61715263",
"0.61673063",
"0.6108501",
"0.6098222",
"0.60774094",
"0.607213",
"0.607213",
"0.6069434"
]
| 0.7495476 | 0 |
Strip the action formatting from the message. | def strip_action(self) -> str:
return self.strip_action_str(self.content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_action_str(string: str) -> str:",
"def remove_unused_char(action_list):\r\n clean_action_list = []\r\n for action in action_list:\r\n effect_element = action['action']\r\n clearnedstr = (effect_element[effect_element.index(\"effect\")\r\n + len(\"effect\"):])\r\n clean_action_list.append(clearnedstr[:-1])\r\n return clean_action_list",
"def strip_command(self, slack_message: str) -> str:\n\n _, message = self.split_message(slack_message)\n return message",
"def stripFormatting(self, s):\n # stripColor has to go first because of some strings, check the tests.\n s = stripColor(s)\n s = stripBold(s)\n s = stripReverse(s)\n s = stripUnderline(s)\n return s.replace('\\x0f', '').replace('\\x0F', '')",
"def strip_symbol_from_msgs(oChecker):\n\n dNewMsgs = {}\n for sKey, tData in oChecker.msgs.items():\n dNewMsgs[sKey] = (tData[0], tData[2])\n # Monkey patch the checker\n oChecker.msgs = dNewMsgs",
"def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts",
"def ws_strip_txt(request) -> Dict[str, str]:\n return request.param",
"def _preprocess_action(self, action: np.ndarray) -> np.ndarray:\n if self.use_raw_actions:\n return action\n else:\n return super()._preprocess_action(action)",
"def as_action(self) -> str:\n return self.as_action_str(self.content)",
"def action_to_pretty_str(action) :\n raise NotImplementedError",
"def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()",
"def remove_formatting(formatted_text):\n return ''.join([formatted_tuple[1] for formatted_tuple in formatted_text]) # pylint: disable=not-an-iterable",
"def action_to_str(action):\n raise NotImplementedError",
"def _clean_magic(self, magic):\n if magic.lower() == 'o':\n return ''\n elif magic[:2].lower() == 'o:':\n return magic[2:]\n return magic",
"def clean_content(self) -> str:",
"def get_plain_message(self, message: str | None = None) -> str:\n if message is None:\n message = self.get_message()\n message_plain = re.sub(r\"[\\t\\n\\r\\f\\v]\", \"\", message)\n message_plain = re.sub(\n \"<style.*?>.+</style>\", \"\", message_plain\n ) # Special case for style tag\n message_plain = message_plain.replace(\"</p>\", \"\\n\")\n message_plain = message_plain.replace(\"</h1>\", \"\\n\\n\")\n message_plain = bleach.clean(message_plain, strip=True)\n return message_plain.strip()",
"def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line",
"def test_stripFormatting(self):\n self.assertEqual(\n irc.stripFormatting(\n irc.assembleFormattedText(\n A.bold[\n A.underline[\n A.reverseVideo[A.fg.red[A.bg.green[\"hello\"]]], \" world\"\n ]\n ]\n )\n ),\n \"hello world\",\n )",
"def _clean_message(comment):\n message = comment['message']\n # Remove comments with linked persons (they mostly contain only emojis)\n if 'message_tags' in comment:\n for tag in comment['message_tags']:\n if 'type' in tag and tag['type'] == 'user':\n message = message.replace(tag['name'], '')\n # Remove links\n message = re.sub(r'http\\S+', '', message)\n return message.strip()",
"def as_action_str(string: str) -> str:",
"def strip_content(content):\n return ' ' + content.upper().\\\n replace('+','').\\\n replace('\"','').\\\n replace('@','').\\\n replace('-','').\\\n replace('?','').\\\n replace('*',''). \\\n replace('.', '') + ' '",
"def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})",
"def stripBold(self, s):\n return s.replace('\\x02', '')",
"def strip_other_charcter():\n pass",
"def _format_action_invocation(self, action):\n txt = super(ColoredHelpFormatter, self)._format_action_invocation(\n action\n )\n if action.option_strings:\n return txt\n\n return colored(txt, 'green')",
"def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])",
"def clean_message_md(self):\n message_md = self.cleaned_data[\"message_md\"]\n lines = filter(None, message_md.splitlines())\n message_md = \" \".join(lines)\n return message_md",
"def _get_action(verb):\n aux_verbs = \"\"\n for child in verb.children:\n if child.dep_ == \"aux\" or child.dep_ == \"neg\":\n aux_verbs += str(child)\n return SpacyEventExtractor._remove_extra_whitespaces(str(aux_verbs) + ' ' + str(verb))",
"def clean_unnecessary_whitespaces(self, tweet):\n tweet = ' '.join(tweet.split())\n\n return tweet",
"def normalize(self, what):\n txt = strippedtxt(what, [\"\\002\", \"\\003\"])\n txt = re.sub(\"\\s+\", \" \", what)\n txt = stripcolor(txt)\n txt = txt.replace(\"\\002\", \"*\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n return txt"
]
| [
"0.721275",
"0.62852895",
"0.6171277",
"0.5884014",
"0.5840245",
"0.5785069",
"0.56816316",
"0.56810516",
"0.5679864",
"0.55925167",
"0.54373753",
"0.54164183",
"0.5408169",
"0.53954107",
"0.53867286",
"0.53814524",
"0.5342451",
"0.5337432",
"0.52963245",
"0.5288288",
"0.52397287",
"0.5226114",
"0.5187748",
"0.5177057",
"0.517304",
"0.51714635",
"0.516279",
"0.51421136",
"0.5120348",
"0.5102"
]
| 0.8087658 | 0 |
Get all available stream names of a module | def getAllStreams(name):
global index
module = index.get_module(name)
if not module:
return None
streams = set()
for s in module.get_all_streams():
streams.add(s.get_stream_name())
return list(streams) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GetStreamNames(self):\n if self._zipfile:\n for stream_name in self._zipfile.namelist():\n yield stream_name",
"def list_streams(self)->List[str]:\n stream_path = self._get_storage_path()\n stream_names = []\n all_streams = self._ls_dir()\n for strm in all_streams:\n stream_names.append(strm.replace(stream_path,\"\").replace(\"stream=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return stream_names",
"def search_stream(self, stream_name)->List[str]:\n stream_path = self._get_storage_path()\n all_streams = self._ls_dir()\n stream_names = []\n for strm in all_streams:\n if stream_name in strm:\n stream_names.append(strm.replace(stream_path,\"\").replace(\"stream=\",\"\"))\n return stream_names",
"def getDepStreams(stream):\n dep = stream.get_dependencies()[0]\n allDeps = []\n for m in dep.get_runtime_modules():\n deps = dep.get_runtime_streams(m)\n if deps:\n allDeps.append((m, deps[0]))\n return allDeps",
"def source_list(self):\n return list(self._client.group.streams_by_name().keys())",
"def getAllContexts(name, stream):\n global index\n module = index.get_module(name)\n if not module:\n return []\n\n allStreams = module.get_all_streams()\n allContexts = []\n for s in allStreams:\n if s.get_stream_name() == stream:\n allContexts.append(s)\n\n return allContexts",
"def source_list(self):\n return list(self._group.streams_by_name().keys())",
"def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names",
"def _list_streams(cls, limit, exclusive_start_stream_name):\n stream_names = []\n request_kwargs = {'Limit': limit}\n if exclusive_start_stream_name:\n request_kwargs['ExclusiveStartStreamNam'] = exclusive_start_stream_name\n response = client.list_streams(**request_kwargs)\n stream_names += response['StreamNames']\n if response['HasMoreStreams']:\n stream_names += cls._list_streams(\n limit=limit,\n exclusive_start_stream_name=stream_names[-1]\n )\n\n return stream_names",
"def modules(self):\n return self._modules.keys()",
"def realtimestreaming_streams(self, **kwargs):\n url_path = 'realtimestreaming/streams'\n self.logger.debug(\"Get list of stream names\")\n return self._common_get(url_path, parameters=kwargs)",
"def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()",
"def info(self):\n unparsed = [x for x in self.run_command('info') if x != '|']\n try:\n streams = [x.split(' ')[2] for x in [x for x in unparsed if x[0] == '+'][:-1]]\n except:\n raise ParseError(\"Could not get streams.\")\n out_list = []\n start = 1\n for stream in streams:\n cur_stream = {'Stream': stream}\n first_char = '|'\n while first_char == '|':\n cur_stream[unparsed[start].split(': ')[0][2:]] = ''.join(unparsed[start].split(': ')[1:])\n start += 1\n first_char = unparsed[start][0]\n start += 1\n out_list.append(cur_stream)\n return out_list",
"def getDeps(stream):\n return stream.get_dependencies()[0].get_runtime_modules()",
"def __get_public_names_and_types_of_module(module_obj):\n if isinstance(module_obj, type_inference_proxy_copy.TypeInferenceProxy):\n return filter(lambda name: not name.startswith(\"__\"), dir(module_obj.get_python_entity()))\n else:\n return module_obj.get_public_names_and_types()",
"def modules():",
"def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)",
"def test_get_ch_names(dummy_streamers, stream_inlets):\n for _, device, source_id, subscriptions in dummy_streamers:\n inlets = stream_inlets[source_id]\n for stream_type in subscriptions:\n ch_names = acquire.get_ch_names(inlets[source_id][stream_type]\n .info())\n assert (device.PARAMS['streams']['ch_names'][stream_type]\n == tuple(ch_names))",
"def _find_gitmodules(p4, stream_name):\n parent = p4gf_util.first_dict(p4.run('stream', '-ov', stream_name))\n for line in parent['View']:\n if '.gitmodules' in line:\n # return everything up to the ' .gitmodules' at the end of the line\n return line[:-12]\n return None",
"def get_module_names(config):\n lambdas_path = config['lambdas_path']\n return [f.strip('.py') for f in os.listdir(lambdas_path) if f.endswith('py') and not f.startswith(\"__\")]",
"def fetch_account_streamers(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['streamers']\n return",
"def getVisitableNodesNamed(self):\n\n return ((\"module\", self.subnode_module),)",
"def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__",
"def encoders(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncoders)['modules']",
"def get_stream_versions(self, stream_name: str) -> list:\n stream_path = self._get_storage_path(stream_name=stream_name)\n stream_versions = []\n if self.is_stream(stream_name):\n all_streams = self._ls_dir(stream_name=stream_name)\n for strm in all_streams:\n stream_versions.append(strm.replace(stream_path,\"\").replace(\"version=\",\"\"))\n return stream_versions\n else:\n raise Exception(stream_name+\" does not exist\")",
"def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1",
"def usage(self):\n names = self.sources.keys()\n return sorted([(n.replace('__', '.'), self._resolve(n)) for n in names],\n key=lambda el: el[0])",
"def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue",
"def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval",
"def get_streams(namespace_url):\n streams = requests.get(namespace_url + '/Streams', headers=headers)\n return streams.json()"
]
| [
"0.69426733",
"0.64697057",
"0.6339101",
"0.6268361",
"0.622487",
"0.6212241",
"0.6140346",
"0.60362804",
"0.60118866",
"0.599059",
"0.5970591",
"0.59448946",
"0.5888642",
"0.5825326",
"0.58059126",
"0.57862926",
"0.5755366",
"0.57007676",
"0.56770694",
"0.5670549",
"0.56057024",
"0.5600605",
"0.5589742",
"0.55580854",
"0.55540186",
"0.55346966",
"0.5525735",
"0.55251074",
"0.55181843",
"0.55010694"
]
| 0.78462434 | 0 |
Get the default stream name of a module | def getDefaultStream(name):
global index
module = index.get_module(name)
if not module:
raise ValueError("Module '{}' not found".format(name))
defaults = module.get_defaults()
if defaults:
return defaults.get_default_stream()
return module.get_all_streams()[0].get_stream_name() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stream_name(self):\n return self._stream_name",
"def stream_name(self):\n return self._stream_name",
"def log_stream_name(self) -> str:\n ...",
"def get_stream_alias(self) -> str:",
"def log_stream_name(self) -> typing.Optional[str]:\n return self._values.get('log_stream_name')",
"def log_stream_name(self) -> typing.Optional[str]:\n return self._values.get('log_stream_name')",
"def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")",
"def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")",
"def _create_name(self) -> str:\n return self.stream.__class__.__name__",
"def module_name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__\n return None",
"def module_name(self):\n return self.name()",
"def module_name(self):\n return self.lib.get_module_name()",
"def GetCurrentStreamName( msg ):\n # First, try to get the info from the RecFlags\n try:\n from RecExConfig.RecFlags import rec\n msg.debug(\"Got the stream name from the RecFlags: %s\" % rec.mergingStreamName())\n streamName = rec.mergingStreamName()\n if streamName == \"\":\n streamName = \"unknownStream\"\n return streamName\n except ImportError:\n msg.info(\"Couldn't get input stream name from the RecFlags... trying AthFile directly.\")\n\n from PyUtils.MetaReader import read_metadata\n from AthenaCommon.AppMgr import ServiceMgr as svcMgr\n input_file = svcMgr.EventSelector.InputCollections[0]\n metadata = read_metadata(input_file)\n metadata = metadata[input_file] # promote all keys one level up\n\n for class_name, name in metadata['metadata_items'].items():\n if name == 'EventStreamInfo':\n return class_name\n return 'unknownStream'",
"def name(cls):\n return MODULE_NAME",
"def get_pipename(self):\n return getattr(self, 'pipename', d6tflow.cache.pipe_default_name)",
"def module_name(self):\n return self.name",
"def get_module_output(self, name):\n if name in self._modules:\n return self._modules[name].outputs['default']\n elif '_' in name:\n module, name = name.rsplit('_', 1)\n if module in self._modules:\n m = self._modules[module]\n if name in m.outputs:\n return m.outputs[name]\n raise KeyError('Could not find module output \"%s\"' % name)",
"def get_module_name(self):\n return self.module_name",
"def name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__.replace('_', '-')\n return None",
"async def stream_source(self) -> str:\n if not self._stream_enabled:\n return None\n return self._stream_source",
"def module_name(self):\n return self.config_section",
"def name(self):\n return f\"{DEFAULT_NAME}_{BINARY_SENSOR}\"",
"def get_stream_type(self) -> str:",
"def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))",
"def module_name(cls):\n return __name__.split(\".\")[0]",
"def get_stream_alias(self) -> str:\n return self.alias",
"def module_name(self) -> str:\n return to_snake_case(self.name.split('/')[-1][:-len('.proto')])",
"def log_stream_prefix(self):\n if not self._log_stream_prefix:\n if self._private_dns_name:\n self._log_stream_prefix = self._private_dns_name\n elif self._node_type:\n if self._head_node:\n self._log_stream_prefix = self._head_node.private_dns_name_short\n else:\n raise FiltersParserError(\"HeadNode instance not available. Node Type filter cannot be used.\")\n return self._log_stream_prefix",
"def get_stream_id(self) -> str:",
"def getDefaultName(self): # real signature unknown; restored from __doc__\n pass"
]
| [
"0.6737794",
"0.6737794",
"0.6623961",
"0.653456",
"0.65149754",
"0.65149754",
"0.64970565",
"0.64970565",
"0.63577265",
"0.6322237",
"0.61933964",
"0.6186538",
"0.61661714",
"0.6154561",
"0.6144581",
"0.6121341",
"0.6100625",
"0.60407394",
"0.60001075",
"0.5991742",
"0.5937913",
"0.59075177",
"0.5907458",
"0.5880111",
"0.5847362",
"0.5846186",
"0.5844139",
"0.58337563",
"0.58174807",
"0.5741768"
]
| 0.80794257 | 0 |
Get the enabled stream name of a module, or the default stream if not enabled | def getEnabledOrDefault(name):
if name == 'platform':
return 'el8'
if name not in enabledStreams:
return getDefaultStream(name)
enabled = enabledStreams[name]
return enabled.get_stream_name() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDefaultStream(name):\n global index\n module = index.get_module(name)\n if not module:\n raise ValueError(\"Module '{}' not found\".format(name))\n defaults = module.get_defaults()\n\n if defaults:\n return defaults.get_default_stream()\n\n return module.get_all_streams()[0].get_stream_name()",
"async def stream_source(self) -> str:\n if not self._stream_enabled:\n return None\n return self._stream_source",
"def stream_name(self):\n return self._stream_name",
"def stream_name(self):\n return self._stream_name",
"def log_stream_name(self) -> typing.Optional[str]:\n return self._values.get('log_stream_name')",
"def log_stream_name(self) -> typing.Optional[str]:\n return self._values.get('log_stream_name')",
"def get_stream_alias(self) -> str:",
"def get_stream_type(self) -> str:",
"def getStream(self,name):\n if (name in self._streams):\n return self._streams[name]\n return None",
"def get_stream_type(self) -> str:\n return self.stream_type",
"def log_stream_name(self) -> str:\n ...",
"def get_module_output(self, name):\n if name in self._modules:\n return self._modules[name].outputs['default']\n elif '_' in name:\n module, name = name.rsplit('_', 1)\n if module in self._modules:\n m = self._modules[module]\n if name in m.outputs:\n return m.outputs[name]\n raise KeyError('Could not find module output \"%s\"' % name)",
"def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")",
"def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")",
"def GetCurrentStreamName( msg ):\n # First, try to get the info from the RecFlags\n try:\n from RecExConfig.RecFlags import rec\n msg.debug(\"Got the stream name from the RecFlags: %s\" % rec.mergingStreamName())\n streamName = rec.mergingStreamName()\n if streamName == \"\":\n streamName = \"unknownStream\"\n return streamName\n except ImportError:\n msg.info(\"Couldn't get input stream name from the RecFlags... trying AthFile directly.\")\n\n from PyUtils.MetaReader import read_metadata\n from AthenaCommon.AppMgr import ServiceMgr as svcMgr\n input_file = svcMgr.EventSelector.InputCollections[0]\n metadata = read_metadata(input_file)\n metadata = metadata[input_file] # promote all keys one level up\n\n for class_name, name in metadata['metadata_items'].items():\n if name == 'EventStreamInfo':\n return class_name\n return 'unknownStream'",
"def disable(name):\n if name in enabledStreams:\n stream = enabledStreams[name]\n del enabledStreams[name]\n print(\"Disabled {} ({})\".format(name, stream.get_NSVCA()))",
"def is_stream(self):\r\n return self.stream",
"async def stream_source(self):\n return self._stream_source",
"def XMLOutputStream_getLibraryName():\n return _libsbml.XMLOutputStream_getLibraryName()",
"def get_stream_alias(self) -> str:\n return self.alias",
"def source(self):\n return self._group.stream",
"def module_name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__\n return None",
"def getAllStreams(name):\n global index\n module = index.get_module(name)\n if not module:\n return None\n streams = set()\n for s in module.get_all_streams():\n streams.add(s.get_stream_name())\n return list(streams)",
"def module_name(self):\n return self.config_section",
"def log_stream_prefix(self):\n if not self._log_stream_prefix:\n if self._private_dns_name:\n self._log_stream_prefix = self._private_dns_name\n elif self._node_type:\n if self._head_node:\n self._log_stream_prefix = self._head_node.private_dns_name_short\n else:\n raise FiltersParserError(\"HeadNode instance not available. Node Type filter cannot be used.\")\n return self._log_stream_prefix",
"def event_streaming_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"event_streaming_type\")",
"def streaming_policy_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"streaming_policy_name\")",
"def source(self):\n return self._client.group.stream",
"def get_bitstream_extension(self, mode=\"sram\"):\n if self._bitstream_ext is None:\n return None\n elif type(self._bitstream_ext) == dict:\n return self._bitstream_ext[mode]\n else:\n return self._bitstream_ext",
"def stream(self):\n\t\tdata = self._client.get(\"streams\", self.name)['stream']\n\t\tif data is not None:\n\t\t\tdata.pop('channel', None)\n\t\treturn data"
]
| [
"0.71392494",
"0.64522684",
"0.61080575",
"0.61080575",
"0.60635865",
"0.60635865",
"0.58935094",
"0.5878183",
"0.58776337",
"0.57697684",
"0.57667184",
"0.56502503",
"0.5636581",
"0.5636581",
"0.5593862",
"0.554713",
"0.5498939",
"0.54252493",
"0.5367109",
"0.533411",
"0.53076166",
"0.53014714",
"0.5267774",
"0.5255117",
"0.525322",
"0.5248483",
"0.52125263",
"0.5205975",
"0.5203512",
"0.5190345"
]
| 0.7416444 | 0 |
Get module names that a stream depends on | def getDeps(stream):
return stream.get_dependencies()[0].get_runtime_modules() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDepStreams(stream):\n dep = stream.get_dependencies()[0]\n allDeps = []\n for m in dep.get_runtime_modules():\n deps = dep.get_runtime_streams(m)\n if deps:\n allDeps.append((m, deps[0]))\n return allDeps",
"def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__",
"def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names",
"def get_module_names(config):\n lambdas_path = config['lambdas_path']\n return [f.strip('.py') for f in os.listdir(lambdas_path) if f.endswith('py') and not f.startswith(\"__\")]",
"def get_required_module_descriptors(self):\r\n return []",
"def get_dependencies(self):\n return [[\"uuid\", \"ossp-uuid\"]]",
"def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()",
"def modules():",
"def modules(self):\n return self._modules.keys()",
"def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]",
"def modules(self):\n return sorted([module for module in self._registry.values()],\n key=lambda scomp: (scomp.order, scomp.label))",
"def get_required_module_descriptors(self):\r\n\r\n # If use_for_single_location is True, this is linked to an open ended problem.\r\n if self.use_for_single_location:\r\n # Try to load the linked module.\r\n # If we can't load it, return empty list to avoid exceptions on progress page.\r\n try:\r\n linked_module = self.system.load_item(self.link_to_location)\r\n return [linked_module]\r\n except (NoPathToItem, ItemNotFoundError):\r\n error_message = (\"Cannot find the combined open ended module \"\r\n \"at location {0} being linked to from peer \"\r\n \"grading module {1}\").format(self.link_to_location, self.location)\r\n log.error(error_message)\r\n return []\r\n else:\r\n return []",
"def get_dependencies(self, alias):\n dependencies = {\"Ensembl2Reactome_All_Levels\": ['ReactomePathways'],\n \"ReactomePathways\": list(),\n \"reactome.homo_sapiens.interactions.tab-delimited\": list(),\n \"ReactomePathwaysRelation\": ['ReactomePathways']}\n return dependencies[alias]",
"def plugin_get_dependency():\n return []",
"def output_modules(self, modules): \n return set([\n names.add(n)\n for m in modules\n for n in self.graph.successors(m.name)\n ])",
"def python_modules(self) -> Sequence[Tuple[str, str]]:\n self_reference = self.meta.address.python_import\n\n answer = {\n t.ident.python_import\n for m in self.all_messages.values()\n # Quick check: We do make sure that we are not trying to have\n # a module import itself.\n for t in m.field_types if t.ident.python_import != self_reference\n }\n\n # Done; return the sorted sequence.\n return tuple(sorted(answer))",
"def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]",
"def get_names(dep):\n res = [dep.name]\n return res",
"def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue",
"def _find_gitmodules(p4, stream_name):\n parent = p4gf_util.first_dict(p4.run('stream', '-ov', stream_name))\n for line in parent['View']:\n if '.gitmodules' in line:\n # return everything up to the ' .gitmodules' at the end of the line\n return line[:-12]\n return None",
"def extract_dependencies(package, dependency_type):\n for dependency_list in package.candidate.get_dependencies(dependency_type):\n for dependency in dependency_list.or_dependencies:\n yield dependency.name",
"def find_with_deps(self, package_names):",
"def get_test_modules_names() -> typing.List[str]:\n\n from services.meter.tests.unit import constants_for_tests\n return constants_for_tests.TESTS_MODULES",
"def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]",
"def __get_public_names_and_types_of_module(module_obj):\n if isinstance(module_obj, type_inference_proxy_copy.TypeInferenceProxy):\n return filter(lambda name: not name.startswith(\"__\"), dir(module_obj.get_python_entity()))\n else:\n return module_obj.get_public_names_and_types()",
"def get_required_module_descriptors(self):\r\n descriptors = []\r\n for location in self.sources_list:\r\n try:\r\n descriptor = self.system.load_item(location)\r\n descriptors.append(descriptor)\r\n except ItemNotFoundError:\r\n msg = \"Invalid module by location.\"\r\n log.exception(msg)\r\n self.system.error_tracker(msg)\r\n\r\n return descriptors",
"def read_deps():\n with open(\"./dependencies.txt\", 'r') as deps:\n return [d for d in re.split(r'\\s', ''.join(deps)) if d]",
"def initial_dependencies(self) -> List[str]:\n return self.options[\"general\"][\"dependencies\"]",
"def find_modules(x):\n return Path(x).rglob('*.py')",
"def get_required_mods(self):\r\n mods = []\r\n unknowntags = []\r\n for key, value in self.dependencies.items():\r\n if value.required_by:\r\n if value.provided_by:\r\n mods.append(list(value.provided_by)[0]) #Pick random'ish if more than one.\r\n else:\r\n unknowntags.append((key, value))\r\n return {\"mods\":sorted(mods, key= lambda x: x.mod.name), \"unknown\": unknowntags}"
]
| [
"0.7177386",
"0.6437847",
"0.6343517",
"0.6296538",
"0.6271704",
"0.61826694",
"0.61645204",
"0.6148593",
"0.60953635",
"0.60798955",
"0.60089207",
"0.5954224",
"0.5952785",
"0.59372693",
"0.5895971",
"0.58944315",
"0.5886773",
"0.58806723",
"0.58730894",
"0.5867702",
"0.5834544",
"0.5825454",
"0.5814835",
"0.57908225",
"0.57628554",
"0.5759117",
"0.57041365",
"0.5695419",
"0.56621367",
"0.56613624"
]
| 0.77787983 | 0 |
Get all available context for a module and a stream name | def getAllContexts(name, stream):
global index
module = index.get_module(name)
if not module:
return []
allStreams = module.get_all_streams()
allContexts = []
for s in allStreams:
if s.get_stream_name() == stream:
allContexts.append(s)
return allContexts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xontrib_context(name):\n spec = find_xontrib(name)\n if spec is None:\n return None\n m = importlib.import_module(spec.name)\n pubnames = getattr(m, \"__all__\", None)\n if pubnames is not None:\n ctx = {k: getattr(m, k) for k in pubnames}\n else:\n ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith(\"_\")}\n return ctx",
"def get_contexts(self):\n return self.__contexts",
"def get_contexts(self):\n return tuple(getattr(self, name) for name in self.__argnames__)",
"def getContext(namespace):",
"def get_context(self, class_name):\n\t\treturn self._get_context_all(class_name)",
"def get_contexts(self):\n return ['Hi!', '']",
"def get_contexts(self):\n return self._contexts",
"def compute_context(modules: List[ModuleHandle]) -> Dict[str, ArtifactDescriptor]:\n context: Dict[str, ArtifactDescriptor] = {}\n for m in modules:\n context = m.provenance.get_database_state(context)\n return context",
"def _context_list(self):\r\n url = \"{}/contexts/\".format(self._org_url)\r\n contexts = self._request(url)\r\n if not contexts:\r\n LOGGER.warning(\"No contexts available\")\r\n return contexts",
"def getAllStreams(name):\n global index\n module = index.get_module(name)\n if not module:\n return None\n streams = set()\n for s in module.get_all_streams():\n streams.add(s.get_stream_name())\n return list(streams)",
"def get_context(devnum=None):\n return _runtime.get_or_create_context(devnum)",
"def iter_context_objects(self):\n use_gevent = is_gevent_enabled()\n use_context = is_context_enabled()\n\n if use_context:\n tid = context_get_ident()\n elif use_gevent:\n tid = greenlet_get_ident()\n else:\n tid = thread_get_ident()\n\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._thread_context, \"stack\", ()))\n\n if use_gevent:\n objects.extend(getattr(self._greenlet_context, \"stack\", ()))\n\n if use_context:\n objects.extend(self._context_stack.get([]))\n\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)",
"def _list_contexts(self):\r\n return sorted(list(self._bbreader.cache.keys()))",
"def init_contexts(self, shared=None) -> None:\n pass",
"def fuse_get_context():\n ctxp = _libfuse.fuse_get_context()\n ctx = ctxp.contents\n return ctx.uid, ctx.gid, ctx.pid",
"def context():\n return dict()",
"def get_context(self, publish=False):\n context = self.project.DEFAULT_CONTEXT\n try:\n file = self.project.CONTEXT_SOURCE_FILE\n # CSV\n if re.search(r'(csv|CSV)$', file):\n context.update(self.get_context_from_csv())\n # Excel\n if re.search(r'(xlsx|XLSX|xls|XLS)$', file):\n context.update(self.get_context_from_xlsx())\n except AttributeError:\n context.update(self.get_context_from_gdoc())\n\n return context",
"def context_get():\n global __context\n if __context is None:\n __context = Context()\n return __context",
"def parameter_contexts(self):\n return self._parameter_contexts",
"def get_context_used(observatory=None):\n observatory = (observatory or 'jwst').lower()\n _connected, final_context = heavy_client.get_processing_mode(observatory)\n return final_context",
"def get_context(self):\n symbol_chain = self.split_completion_object(self.get_word_before())\n current_rs_object = self.module\n\n for symbol in symbol_chain:\n try:\n current_rs_object = current_rs_object.get_object(symbol)\n logging.info(f\"New context found: {current_rs_object.name}\")\n except:\n logging.error(f\"{type(current_rs_object)} has no method get_object yet.\")\n return current_rs_object\n\n return current_rs_object",
"def get_contexts(data):\n df = __title_context_df(data)\n return df['context'].to_list()",
"def filectxs(self):\n mf = self.manifest()\n m = mf.keys()\n m.sort()\n for f in m:\n yield self.filectx(f, fileid=mf[f])",
"def pickStream(name, stream):\n if isEnabled(name):\n return\n\n allDeps = set()\n allContexts = getAllContexts(name, stream)\n for c in allContexts:\n allDeps = allDeps.union(getDeps(c))\n\n enabledDeps = []\n for d in allDeps:\n enabledDeps.append((d ,getEnabledOrDefault(d)))\n\n for ctx in allContexts:\n currDeps = getDepStreams(ctx)\n if all(i in enabledDeps for i in currDeps):\n for dstream in currDeps:\n pickStream(dstream[0], dstream[1])\n\n enable(ctx)\n return\n raise Exception(\"Not all of the dependencies of {} could be resolved using defaults.\".format(name))",
"def get_module_info_list(self):\n self._get_module_info_list = pa_module_info_cb_t(self._module_info_cb)\n pa_context_get_module_info_list(self._context,\n self._get_module_info_list,\n None)",
"def get_contexts(config, vary_fast_math=False):\n\n class CtxCreator:\n def __init__(self, api, pnum, dnum, fast_math=None):\n platform = api.get_platforms()[pnum]\n device = platform.get_devices()[dnum]\n\n fm_suffix = {True:\",fm\", False:\",nofm\", None:\"\"}[fast_math]\n self.device_id = api.API_ID + \",\" + str(pnum) + \",\" + str(dnum)\n self.platform_name = platform.name\n self.device_name = device.name\n self.id = self.device_id + fm_suffix\n\n kwds = dict(device=device)\n if fast_math is not None:\n kwds['fast_math'] = fast_math\n\n self.create = lambda: api.Context.create(**kwds)\n\n def __call__(self):\n return self.create()\n\n def __str__(self):\n return self.id\n\n apis, _ = get_apis(config)\n\n if vary_fast_math:\n fm = config.option.fast_math\n fms = dict(both=[False, True], no=[False], yes=[True])[fm]\n else:\n fms = [None]\n\n include_devices = config.option.device_include_mask\n exclude_devices = config.option.device_exclude_mask\n include_platforms = config.option.platform_include_mask\n exclude_platforms = config.option.platform_exclude_mask\n\n def name_matches_masks(name, includes, excludes):\n if len(includes) > 0:\n for include in includes:\n if re.search(include, name):\n break\n else:\n return False\n\n if len(excludes) > 0:\n for exclude in excludes:\n if re.search(exclude, name):\n return False\n\n return True\n\n ccs = []\n seen_devices = set()\n for api in apis:\n for pnum, platform in enumerate(api.get_platforms()):\n\n seen_devices.clear()\n\n if not name_matches_masks(platform.name, include_platforms, exclude_platforms):\n continue\n\n for dnum, device in enumerate(platform.get_devices()):\n if not name_matches_masks(device.name, include_devices, exclude_devices):\n continue\n\n if (not config.option.include_duplicate_devices and\n device.name in seen_devices):\n continue\n\n seen_devices.add(device.name)\n\n for fm in fms:\n ccs.append(CtxCreator(api, pnum, dnum, fast_math=fm))\n\n return ccs, [str(cc) for cc in ccs]",
"def modules():",
"def get_context(self):\n return self.context.generate()",
"def get_context():\n context = {}\n cfg = load_service_config(\"lighttpd\")\n ip = \"127.0.0.1\"\n enable_caching = False\n try:\n mconfig = load_service_mconfig_as_json('lighttpd')\n enable_caching = mconfig.enable_caching\n except LoadConfigError:\n logging.info(\"Using default values for service 'lighttpd'\")\n\n if enable_caching:\n ip = get_ip_from_if(cfg['interface'])\n\n context['interface_ip'] = ip\n context['store_root'] = cfg['store_root']\n\n return context",
"def context(self) -> _C_out:\n return self._context"
]
| [
"0.6219044",
"0.62187123",
"0.59566826",
"0.5806043",
"0.5780251",
"0.5706612",
"0.5636643",
"0.5569738",
"0.5518517",
"0.54909873",
"0.53959274",
"0.53687716",
"0.53404284",
"0.53111374",
"0.5305881",
"0.52993786",
"0.5292798",
"0.52851295",
"0.5276902",
"0.52571344",
"0.5251149",
"0.52443916",
"0.52355075",
"0.5153039",
"0.5126505",
"0.51264155",
"0.5115895",
"0.5105152",
"0.5061741",
"0.5050096"
]
| 0.80076534 | 0 |
Recursively enable a stream and its dependencies with their default streams, unless already enabled with a different stream. | def pickStream(name, stream):
if isEnabled(name):
return
allDeps = set()
allContexts = getAllContexts(name, stream)
for c in allContexts:
allDeps = allDeps.union(getDeps(c))
enabledDeps = []
for d in allDeps:
enabledDeps.append((d ,getEnabledOrDefault(d)))
for ctx in allContexts:
currDeps = getDepStreams(ctx)
if all(i in enabledDeps for i in currDeps):
for dstream in currDeps:
pickStream(dstream[0], dstream[1])
enable(ctx)
return
raise Exception("Not all of the dependencies of {} could be resolved using defaults.".format(name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_or_continue(S,cfg,bands,confluence_fp='default'):\n if confluence_fp=='default':\n return start_confluence_log_file(S,cfg,bands)\n continue_config(S,cfg,bands,confluence_fp)",
"def enable(\n verbose=False,\n silent=False,\n full_signature=True,\n copy_ok=True,\n calculate_memory=False,\n):\n\n auto_enable(verbose, silent, full_signature, copy_ok, calculate_memory)\n yield\n auto_disable()",
"def chain(*streams):\n return Stream(itertools.chain(*streams))",
"def _init_streams(self):\n @self.streams_wrapper(\"networkx\")\n def get_nx_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_stream(extractor_context, graph)\n\n @self.streams_wrapper(\"neo4j\")\n def get_neo4j_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_stream(extractor_context, graph)\n\n @self.streams_wrapper(\"edgelist\")\n def get_edgelist_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_stream(extractor_context, graph)",
"def enable_logging(self, use_json=False, level='warning', log_format=DEFAULT_MESSAGE_FORMAT, stream=None,\n other_logger_has_first_line=False):\n if self._is_enabled:\n raise ValueError('re-enabling logging. Not sure whether that is ok...')\n\n if stream is None:\n self.target_stream = sys.stdout\n else:\n self.target_stream = stream\n\n if self.target_stream == sys.stdout:\n ensure_stdout_handles_unicode()\n\n log_level = LOG_LEVELS[level]\n logging.basicConfig(level=log_level, format=log_format,\n stream=self.target_stream)\n self._is_enabled = True\n\n self._use_json = use_json\n sys.excepthook = self._get_except_hook(sys.excepthook)\n\n # since there could be loggers already created we go through all of them\n # and set their levels to 0 so they will use the root logger's level\n for name in self._all_names:\n logger = self.get_or_create_silent_logger(name)\n self._set_logger_level(logger, logging.NOTSET)\n\n # add a JSON formatter to the root logger, which will be used by every logger\n if self._use_json:\n _root_logger_wrapper.set_formatter(JsonFormatter(other_logger_has_first_line))\n print('[', file=self.target_stream)",
"def stream(self, enabled=True):\n self._stream = enabled\n return self",
"def extend_module_for_streaming(module_path, use_auth_token: Optional[Union[str, bool]] = None):\n\n module = importlib.import_module(module_path)\n # open files in a streaming fashion\n if use_auth_token:\n patch_submodule(module, \"open\", partial(xopen, use_auth_token=use_auth_token)).start()\n else:\n patch_submodule(module, \"open\", xopen).start()\n # allow to navigate in remote zip files\n patch_submodule(module, \"os.path.join\", xjoin).start()",
"def _init_streams(self) -> None:\n assert self._is_root\n assert torch.cuda.is_available()\n # Stream for all-gathering parameters.\n self._streams[\"all_gather\"] = torch.cuda.Stream()\n # Stream for overlapping grad reduction with the backward pass.\n self._streams[\"post_backward\"] = torch.cuda.Stream()\n # Stream for pre-all-gather copies (e.g. H2D or precision cast).\n self._streams[\"pre_all_gather\"] = torch.cuda.Stream()",
"def reopen_streams(self):\n sys.stdin = os.fdopen(0, 'r', 0)\n sys.stdout = os.fdopen(1, 'w', 0)\n sys.stderr = os.fdopen(2, 'w', 0)",
"def getDepStreams(stream):\n dep = stream.get_dependencies()[0]\n allDeps = []\n for m in dep.get_runtime_modules():\n deps = dep.get_runtime_streams(m)\n if deps:\n allDeps.append((m, deps[0]))\n return allDeps",
"def enable_logging(\n debug=False,\n http_debug=False,\n path=None,\n stream=None,\n format_stream=False,\n format_template='%(asctime)s %(levelname)s: %(name)s %(message)s',\n handlers=None,\n):\n if not stream and not path:\n stream = sys.stderr\n\n if http_debug:\n debug = True\n if debug:\n level = logging.DEBUG\n else:\n level = logging.INFO\n\n formatter = logging.Formatter(format_template)\n\n if handlers:\n for handler in handlers:\n handler.setFormatter(formatter)\n\n else:\n handlers = []\n\n if stream is not None:\n console = logging.StreamHandler(stream)\n if format_stream:\n console.setFormatter(formatter)\n handlers.append(console)\n\n if path is not None:\n file_handler = logging.FileHandler(path)\n file_handler.setFormatter(formatter)\n handlers.append(file_handler)\n\n setup_logging('openstack', handlers=handlers, level=level)\n setup_logging('keystoneauth', handlers=handlers, level=level)\n\n # Turn off logging on these so that if loggers higher in the tree\n # are more verbose we only get what we want out of the SDK. This is\n # particularly useful when combined with tools like ansible which set\n # debug logging level at the logging root.\n # If more complex logging is desired including stevedore debug logging,\n # enable_logging should not be used and instead python logging should\n # be configured directly.\n setup_logging(\n 'urllib3', handlers=[logging.NullHandler()], level=logging.INFO\n )\n setup_logging(\n 'stevedore', handlers=[logging.NullHandler()], level=logging.INFO\n )\n # Suppress warning about keystoneauth loggers\n setup_logging('keystoneauth.discovery')\n setup_logging('keystoneauth.identity.base')\n setup_logging('keystoneauth.identity.generic.base')",
"def initialize():\n for pin in sorted(OUTPUT_PINS.values()):\n _enable_pin(pin, OUT)\n\n for pin in sorted(INPUT_PINS.values()):\n _enable_pin(pin, IN)",
"def enable_streams(self, keywords):\n for keyword in keywords:\n self.crawler.start_stream(keyword, self.mongo_controller)",
"def disable_stream_handler(func: Callable):\n\n def _wrapper(*args, **kwargs):\n for logger in LOGGER_TABLE.values():\n logger.removeHandler(STREAM_HANDLER)\n ret = func(*args, **kwargs)\n for logger in LOGGER_TABLE.values():\n logger.addHandler(STREAM_HANDLER)\n return ret\n\n return _wrapper",
"def __ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n lambda loader, node: object_pairs_hook(loader.construct_pairs(node)))\n return yaml.load(stream, OrderedLoader)",
"def inject_or(\n self,\n base_cls: Type[InjectType],\n settings: Mapping[str, object] = None,\n default: Optional[InjectType] = None,\n ) -> Optional[InjectType]:\n return self._context.inject_or(base_cls, settings, default)",
"def start_stream(self, callback, done, mode='blocking'):\n if mode == 'blocking':\n self.start_blocking_stream(callback, done)\n if mode == 'callback':\n self.start_callback_stream(callback, done)",
"def set_partner_specific_stream_false_if_no_streams(sender, instance, **kwargs):\n\n partner = instance.partner\n if partner.specific_stream and Stream.objects.filter(partner=partner).count() == 0:\n partner.specific_stream = False\n partner.save()",
"def test_multi_source_explicit(self):\n with Graph('g') as graph:\n graph.source | Node('a') | graph.sink\n graph.source * 'out2' | Node('b') | 'in2' * graph.sink",
"def populate_dirent_stream(self, stream):\n for dirent in stream:\n LOG.info(\"%s\", dirent.get_full_path())\n\n # If this directory was deleted, we cannot populate it as the\n # dirent stream it points to is not guaranteed. Once the directory\n # is deleted, the dirent stream it points to may be overwritten.\n if dirent.is_directory() and \\\n not dirent.is_deleted():\n\n chain_map = self.get_cluster_chain(dirent.first_cluster)\n\n if self.debug_log_enabled:\n LOG.debug(\"Reading directory: %s\", dirent.get_full_path())\n LOG.debug(\"Directory First Cluster: %08x\",\n dirent.first_cluster)\n LOG.debug(\"Chain Map: %s\", chain_map)\n\n for cluster in chain_map:\n LOG.debug(\"Reading Cluster: %08x\", cluster)\n\n dirent_stream = self.read_directory_stream(\n self.cluster_to_physical_offset(cluster))\n\n dirent.add_dirent_stream_to_this_directory(dirent_stream)\n # TODO: populate_children()\n self.populate_dirent_stream(dirent_stream)",
"def merge(ss: List[Stream[Any]], topics: List[Any] = None) -> Stream[Any]:\n\n def g(deps, this, src, value):\n if topics is not None:\n return (topics[ss.index(src)], value)\n return value\n\n return combine(g, ss)",
"def select(self, stream):\n\n if stream not in self._selectedStreams:\n self._selectedStreams.append(stream)",
"def enable(self):\n self.enabled = True\n for child in self.children:\n child.enable()",
"def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=AttrDict):\n class Ordered_Loader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n Ordered_Loader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, Ordered_Loader)",
"def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)",
"def yaml_ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n\n return yaml.load(stream, OrderedLoader)",
"def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)\n return yaml.load(stream, Loader=OrderedLoader)",
"def add_statestream(self, base_topic=None):\n config = {}\n if base_topic:\n config['base_topic'] = base_topic\n return setup_component(self.hass, statestream.DOMAIN, {\n statestream.DOMAIN: config})",
"def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n m = depot_re.match(depot_path)\n if m:\n depot = m.group(1)\n if depot in self.stream_depots:\n stream = '//{}/{}'.format(m.group(1), m.group(2))\n human_msg = (\n _(\"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = stream\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)",
"def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(**kwargs)\r\n # self.stream.append(merge)\r\n # self.stream.connect()\r"
]
| [
"0.48670077",
"0.483356",
"0.48210898",
"0.4794903",
"0.4747588",
"0.4735464",
"0.47152197",
"0.4679096",
"0.46170568",
"0.452739",
"0.45159292",
"0.44828388",
"0.44643793",
"0.44415295",
"0.44070897",
"0.44012928",
"0.4388118",
"0.43651462",
"0.4342453",
"0.43344104",
"0.4318421",
"0.429162",
"0.42902493",
"0.4261819",
"0.42535755",
"0.42484668",
"0.4225762",
"0.42252758",
"0.42225096",
"0.4200318"
]
| 0.6946649 | 0 |
Get a list of RPMs to blacklist | def getRpmBlacklist():
global index
enabledRpms = set()
for stream in enabledStreams.values():
enabledRpms = enabledRpms.union(stream.get_rpm_artifacts())
allRpms = set()
for name in index.get_module_names():
module = index.get_module(name)
for stream in module.get_all_streams():
allRpms = allRpms.union(stream.get_rpm_artifacts())
return list(allRpms.difference(enabledRpms)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blacklist(self) -> List[str]:\n return self.raw_config.get(\"blacklist\", [])",
"def blacklist_remove():\n db = unitdata.kv()\n blacklist = db.get(BLACKLIST_KEY, [])\n for device in get_devices():\n try:\n blacklist.remove(device)\n except ValueError:\n raise Error('{}: Device not in blacklist.'.format(device))\n db.set(BLACKLIST_KEY, blacklist)\n db.flush()",
"def getBlackList(filename):\n #filename = \"filelist/blacklist_%s.txt\"%dataset.lstrip('/').replace('/','__')\n blacklist = [ ]\n if os.path.exists(filename):\n with open(filename,'r') as file:\n for line in file:\n line = line.rstrip('\\n')\n if line and '#' not in line:\n blacklist.append(line)\n return blacklist",
"def remove_blacklist(remit, source, commands):\n blacklisted_commands = list()\n for config in spec.BLACKLIST:\n command = make_command(remit=remit,\n source=source,\n writer=config['writer'],\n pandoc_options=config['pandoc_options'],\n extension=config['extension'])\n blacklisted_commands.append(command)\n commands = [command for command in commands\n if command not in blacklisted_commands]\n return commands",
"def pkg_service_blacklist(klass, pkg):\n return klass._pkg_service_blacklist.get(pkg.name, [])",
"async def get_blacklisted_users() -> list:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n async with db.execute(\n \"SELECT user_id, strftime('%s', created_at) FROM blacklist\"\n ) as cursor:\n result = await cursor.fetchall()\n return result",
"def get_whitelist(con):\n\n k, v = con.kv.get(\"service/rebootmgr/ignore_failed_checks\")\n if v and \"Value\" in v.keys() and v[\"Value\"]:\n return json.loads(v[\"Value\"].decode())\n return []",
"def short_whitelist(whitelist):\n for x in [\"guid-4\", \"guid-5\"]:\n whitelist.remove(x)\n return whitelist",
"def filter_buses(list_of_buses):\n for bus in list_of_buses:\n return bus",
"def get_emperor_blacklist(self, details=u'', system=None):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity)\n hosts = runner.get_inventory(group=system)\n self.json = []\n for host in hosts:\n res = self.__get_stats(host)\n blacklist = res.pop(u'blacklist') \n if self.format == u'json':\n self.json.append(blacklist)\n elif self.format == u'text':\n self.text.append(u'\\n%s' % (host))\n for inst in blacklist:\n self.text.append(u'- %s' % (inst.pop(u'id')\\\n .replace(u'.ini', u'')\\\n .replace(u'/etc/uwsgi/vassals/', u'')))\n for k,v in inst.items():\n self.text.append(u' - %-15s : %s' % (k,v))",
"def blacklist_ips(self):\r\n if self.blacklist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member\r",
"def exclude(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if not regex.search(s.name)]",
"def get_all_blacklisted_as_list(self):\n try:\n result = self.table.select().execute()\n if result.rowcount >= 1:\n return [x.values() for x in result]\n elif result.rowcount == 0:\n return []\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible",
"def blacklist_add():\n db = unitdata.kv()\n blacklist = db.get(BLACKLIST_KEY, [])\n for device in get_devices():\n if not os.path.exists(device):\n raise Error('{}: No such file or directory.'.format(device))\n if device not in blacklist:\n blacklist.append(device)\n db.set(BLACKLIST_KEY, blacklist)\n db.flush()",
"def get_blocked_usernames_list():\n return []",
"def badass_buys_by_key(self, key):\n badbuys = []\n for badbuy in self.badassbuy:\n if badbuy.mindkey == key:\n badbuys.append(badbuy)\n return badbuys",
"def generate_blacklist(self, target_edge):\n def contains_target(std_abbrevs):\n return target_edge in std_abbrevs\n\n def is_target(edge):\n return edge == target_edge\n\n blacklist = []\n\n # Get degree feature blacklists\n target = [e for e in self.metagraph.get_edges() if e.get_abbrev() == target_edge][0]\n reverse_target = target.inverse.get_abbrev()\n\n blacklist.append('degree_'+target_edge)\n blacklist.append('degree_'+reverse_target)\n\n # Get the metapaths features to be blacklisted\n for mp, info in self.metapaths.items():\n num_target = sum([is_target(e) for e in info['standard_edge_abbreviations']])\n num_self_refs = sum([self.is_self_referential(e) for e in info['edges']])\n\n # Remove edges with overuse of target edge.\n if num_target > 1:\n blacklist.append('dwpc_' + mp)\n\n # Remove those with 2 self-referential edges and a target edge\n elif num_self_refs > 1 and num_target > 0:\n blacklist.append('dwpc_' + mp)\n\n # Remove those with a self-reverntal edge, travel across the same edge, and a target:\n elif self.contains_self_referential(info['edges']) \\\n and contains_target(info['standard_edge_abbreviations']) \\\n and self.duplicated_edge_source_or_target(info['edges']):\n blacklist.append('dwpc_' + mp)\n\n return blacklist",
"def filter_viable_offers(want, have, offers) -> List:\n return [x for x in offers if is_offer_viable(want, have, x) is True]",
"def _more_properties_blacklist(self) -> List[str]:\n return []",
"def get_flavors_black_list(self):\n return self._sanitize(CONF.powervc.flavor_black_list)",
"def blacklistSource(self, source):\n log.info(\"blacklisting \" + source)\n if source not in GameConsole.blacklistedSources:\n GameConsole.blacklistedSources.append(source)",
"def _not_matching(values, sieve):\n return [val for val in values if val not in sieve]",
"def _parse_blacklist(path):\n if path is None:\n return []\n with open(path, 'rt') as f:\n return [line.strip() for line in f]",
"def _get_blacklist(self):\n blacklist = {}\n for b in TransifexBlacklist.objects.filter(domain=self.app.domain, app_id=self.app.id).all():\n blacklist.setdefault(b.domain, {})\n blacklist[b.domain].setdefault(b.app_id, {})\n blacklist[b.domain][b.app_id].setdefault(b.module_id, {})\n blacklist[b.domain][b.app_id][b.module_id].setdefault(b.field_type, {})\n blacklist[b.domain][b.app_id][b.module_id][b.field_type].setdefault(b.field_name, {})\n blacklist[b.domain][b.app_id][b.module_id][b.field_type][b.field_name][b.display_text] = True\n return blacklist",
"def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))",
"def exclude_from_scan(self, regex, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/excludeFromScan/', {'regex': regex, 'apikey': apikey})))",
"def list_blacklist_by_type(event, context):\n blacklist_type = str(event['pathParameters']['id'])\n\n table = dynamodb.Table(os.environ['BLACKLIST_TABLE'])\n result = table.scan(\n FilterExpression=Attr('type').eq(blacklist_type)\n )\n error_type = None\n error_msg = \"\"\n response = {\n 'statusCode': 200,\n 'body': json.dumps({\n 'items': [r['text_pattern'] for r in result['Items']],\n 'num_items': len(result['Items']),\n 'message': error_msg,\n 'error_type': error_type\n })\n }\n log.debug(response)\n\n return response",
"async def get_blacklist(self, limit=1000):\n\n url = f'https://{self.__api}/v3/blacklist'\n params = {f\"filter[clientid]\": self.clientid, \"limit\": limit}\n async with aiohttp.ClientSession() as session:\n response = await self.fetch(session, url, params=params)\n return response",
"def remove_less_than_treshold(potential_blockings, logger, dashboard_log):\n treshold = get_treshold(logger, dashboard_log)\n # create a new list containing all hosts with security value higher than treshold\n # inspired by https://stackoverflow.com/a/1207461\n return [host for host in potential_blockings if not get_average_security_value(host, logger) < treshold]",
"def filter_out_bans(mappings, bans):\n new_mappings = []\n for mapping in mappings:\n for ban in bans:\n if fnmatch.fnmatch(mapping.pattern, ban):\n break\n else:\n new_mappings.append(mapping)\n return new_mappings"
]
| [
"0.65307343",
"0.63255924",
"0.62569094",
"0.616089",
"0.60532945",
"0.5996806",
"0.5904636",
"0.58727056",
"0.58684236",
"0.58534366",
"0.58209705",
"0.57830745",
"0.5761726",
"0.5739948",
"0.57301134",
"0.5706825",
"0.569636",
"0.5680605",
"0.56762797",
"0.5670396",
"0.5670088",
"0.56510127",
"0.56421816",
"0.5639521",
"0.5623696",
"0.56127363",
"0.558352",
"0.5546732",
"0.55461776",
"0.5544256"
]
| 0.81101996 | 0 |
Get all modular rpms in the repository | def getAllPackages(metadataPaths):
global index
index = createModuleIndex(metadataPaths)
allRpms = []
for name in index.get_module_names():
module = index.get_module(name)
for stream in module.get_all_streams():
allRpms.extend(stream.get_rpm_artifacts())
return allRpms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]",
"def getAllModules(self):\n\n modules = cmds.ls(type=\"network\")\n returnMods = []\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"parent\" in attrs:\n returnMods.append(module)\n\n return returnMods",
"def get_modules(self):\n return self._modules.values()",
"def modules(self):\n return self._modules",
"def get_packages():\n packages = []\n for repo in repositories:\n packages.extend(repo.get_packages())\n return packages",
"def getModules(self):\n\n modules = cmds.ls(type=\"network\")\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"rigModules\" in attrs:\n return module",
"def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)",
"def modules(self):\n return ModuleManager(self)",
"def listRepositories(self):\n return self.mini_catalog.listRepositories()",
"def all_registered_modules():\n yield from iterchain(modules.values() for modules in Registry.monomers.values())",
"def get_repos():\n\n return __do_get_repos()",
"def _get_submodules():\n import sh\n git = sh.git.bake(_tty_out=False)\n submodules = git.submodule().strip().split(\"\\n\")\n return [\n line.strip()[1:].split()[1]\n for line in submodules\n ]",
"def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data",
"def list_modules(self) -> Optional[List[str]]:\n module_list: List[str] = []\n for forge_module in self._forge_modules:\n module_list.append(forge_module.name)\n for git_module in self._git_modules:\n module_list.append(git_module.name)\n return module_list",
"def modules():",
"def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue",
"def get_repo_packages() -> List['Package']:\n return Package.get_packages_from_expac(\"-S\", [], PossibleTypes.REPO_PACKAGE)",
"def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)",
"def modules(self):\n return self.rpc.call(MsfRpcMethod.SessionCompatibleModules, [self.sid])['modules']",
"def __gitSubmodulesList(self):\n self.vcs.gitSubmoduleList(self.project.getProjectPath())",
"def Modules():\n mod = ida_idd.modinfo_t()\n result = ida_dbg.get_first_module(mod)\n while result:\n # Note: can't simply return `mod` here, since callers might\n # collect all modules in a list, and they would all re-use\n # the underlying C++ object.\n yield ida_idaapi.object_t(name=mod.name, size=mod.size, base=mod.base, rebase_to=mod.rebase_to)\n result = ida_dbg.get_next_module(mod)",
"def get_repositories():\n\n repos = json.loads(\n common.run_ffx_command(cmd=('--machine', 'json', 'repository', 'list'),\n check=True,\n capture_output=True).stdout.strip())\n to_prune = set()\n sdk_root_abspath = os.path.abspath(os.path.dirname(common.SDK_ROOT))\n for repo in repos:\n # Confirm the path actually exists. If not, prune list.\n # Also assert the product-bundle repository is for the current repo\n # (IE within the same directory).\n if not os.path.exists(repo['spec']['path']):\n to_prune.add(repo['name'])\n\n if not repo['spec']['path'].startswith(sdk_root_abspath):\n to_prune.add(repo['name'])\n\n repos = [repo for repo in repos if repo['name'] not in to_prune]\n\n remove_repositories(to_prune)\n return repos",
"def modules_registered(self) -> list[Module]:\n return [cmds[0].module for cmds in self._registry[\"by_module\"].values()]",
"def modules(self):\n return self._modules.keys()",
"def all_gene_modules():\n\n\tmodules_result = db.get_engine(current_app, 'methylation_data').execute(\"SELECT DISTINCT(module) FROM gene_modules\").fetchall()\n\tmodules = [{'module': module['module']} for module in modules_result]\n\n\treturn modules",
"def rpms(self, **kwargs):\n return self.session.listRPMs(**kwargs)",
"def avail(self,pattern=str):\t\n import re\n\n availmods = []\n avail_out = self._modulecmd(\"\"\"%s python avail %s\"\"\" % (self.modulecmd, pattern)).decode('utf-8')\n if avail_out.strip() == '':\n return availmods\n alines = [str(x) for x in avail_out.strip().splitlines()]\n repo = None\n top_insert = 0 # keep track of the head based on each time repo changes\n for aline in alines:\n if aline.strip() == '':\n repo = None\n continue\n try:\n repo = re.match(r'^-+\\s*([^-]+)\\s*-+\\s*$', aline).group(1)\n top_insert = len(availmods)\n continue\n except AttributeError:\n pass \n if repo:\n for tmpmod in aline.split():\n fullpath = os.path.join(repo, tmpmod)\n if tmpmod.lower().endswith(\"(default)\"):\n tmpmod = re.sub(r'(?i)\\(default\\)$', '', tmpmod)\n availmods.insert(\n top_insert, (\n tmpmod,\n fullpath\n )\n )\n else:\n availmods.append((tmpmod, fullpath))\n return availmods",
"def get_modules(self):\n return self._module_loader.filelist",
"def query_repositories():\n return buildapi.query_repositories()",
"def getModules(self):\n\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n\n self.locations = None\n self.modules = None\n\n self.good = True\n self.seen = set()\n\n self.getMain()\n self.getRefs()\n self.getStandard()\n\n version = self.version\n good = self.good\n app = self.app\n\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n\n mModules = []\n if mLocations:\n mModules.append(version or \"\")\n\n locations = self.locationsArg\n modules = self.modulesArg\n\n givenLocations = (\n []\n if locations is None\n else [expandDir(app, x.strip()) for x in itemize(locations, \"\\n\")]\n if type(locations) is str\n else [str(x) for x in locations]\n )\n givenModules = (\n []\n if modules is None\n else [normpath(x.strip()) for x in itemize(modules, \"\\n\")]\n if type(modules) is str\n else [normpath(str(x)) for x in modules]\n )\n\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules"
]
| [
"0.6801965",
"0.67250276",
"0.6678874",
"0.656387",
"0.655304",
"0.64945525",
"0.646505",
"0.644537",
"0.6433724",
"0.640705",
"0.6401202",
"0.63888985",
"0.63857746",
"0.6370172",
"0.6369746",
"0.63690275",
"0.63672113",
"0.6319444",
"0.6249335",
"0.6230579",
"0.6226255",
"0.62232053",
"0.621896",
"0.6211385",
"0.6194278",
"0.6185944",
"0.6176776",
"0.61730826",
"0.61618483",
"0.6143501"
]
| 0.70210683 | 0 |
Insert data into an empty linked list and check whether header node's next has None | def test_insert_head_empty_list_2(test_linkedlist):
test_linkedlist.insert_head('A')
assert test_linkedlist.head.next is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node",
"def add_first(self, data):\n # define the head as the new Node\n self.head = Node(data, next=self.head)\n # if list was empty define th tail as the head\n if self.tail is None:\n self.tail = self.head\n # set the skip back pointer if needed\n if self.head.next is not None:\n if self.head.next.next is not None:\n self.head.next.next.skip_back = self.head",
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)",
"def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp",
"def insert(self, data):\n if self.head == None:\n self.head = Node(data)\n else:\n curr = self.head\n while curr.link != None:\n curr = curr.link\n curr.link = Node(data)",
"def add_first(self, data):\n\n node = self.Node(data, self.head)\n self.head = node\n\n self.list_size += 1",
"def test_insert_end_for_empty_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n assert test_linkedlist.head.data == 'A'",
"def is_empty(self):\n if self.head == None:\n return True\n else:\n return False",
"def append(self, data):\n if self.head is None:\n self.head = ListNode(data, None)\n else:\n itr = self.head\n while itr:\n if itr.next is None:\n itr.next = ListNode(data, None)\n return\n itr = itr.next",
"def insert_append(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur",
"def is_empty(self):\n if self.head is None:\n return True\n else:\n return False",
"def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False",
"def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False",
"def isEmpty(self):\n \"\"\"\n :type None\n :rtype Boolean\n \"\"\"\n return self.head == None",
"def empty(self):\n if self.head is None:\n return True\n return False",
"def is_empty(self):\n return self.head is None",
"def push_head(self, data):\n node = Node(data)\n\n if self.is_empty() or self._size == 1:\n self.__init_ll(node)\n else:\n current = self._head\n current._previ = node\n node._next = current\n self._head = node\n\n\n self._size += 1",
"def test_insert_head_one_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.data == 'B'",
"def isEmpty(self):\n if self.head.next == None:\n return True\n return False",
"def empty(self) -> bool:\n return True if self.head is None else False",
"def empty(self):\n return self.head == None",
"def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'",
"def is_empty(self):\n # This is worse style: \"return self._head == None\"\n return self._head is None",
"def prepend(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n return\n new_node.next = self.head\n self.head = new_node",
"def prepend(self, data):\n self.head = SinglyLLNode(data=data , next=self.head)",
"def is_empty(self):\n\n return self.head is None",
"def insertTail(head, data):\n # Using the iterative solution. Recursive also exists, but I don't\n # think it offers any benifit in space/time complexity\n if head is None: # First the initial/null case:\n return ListNode(val=data)\n node = head # Then the general case - scroll tot the end of the list\n while node.next is not None:\n node = node.next\n node.next = ListNode(val=data) # tack on the new value\n return head",
"def append(self, data):\n if self.head is None: # checking a corner case of linked list being empty\n self.head = ListNode(data)\n else: # a normal traversal and append to the end of the tail node\n temp_node = self.head\n new_node = ListNode(data)\n while temp_node.next is not None:\n temp_node = temp_node.next\n temp_node.next = new_node",
"def prepend(self, data):\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node"
]
| [
"0.69598556",
"0.6849278",
"0.67394346",
"0.6602789",
"0.6595135",
"0.64504087",
"0.6411734",
"0.6335657",
"0.63337535",
"0.63198644",
"0.6302688",
"0.62967426",
"0.62671685",
"0.62671685",
"0.62614995",
"0.6217475",
"0.6202579",
"0.61525685",
"0.6090097",
"0.6051709",
"0.6039739",
"0.6029747",
"0.6007958",
"0.6001246",
"0.5992485",
"0.5981956",
"0.5980074",
"0.59277284",
"0.5921825",
"0.59214616"
]
| 0.6989526 | 0 |
Insert head element to a single element linked list and check whether header node has expected data. | def test_insert_head_one_element_list_1(test_linkedlist):
test_linkedlist.insert_head('A')
test_linkedlist.insert_head('B')
assert test_linkedlist.head.data == 'B' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def test_insert_head_empty_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.next is None",
"def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'",
"def insertBeforeHeader(self, e):\n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._start = self._insert_between(e, self._start._prev, self._start)\n except:\n print(\"insertion unsuccessful...\")\n return",
"def test_insert_head_for_two_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n test_linkedlist.insert_head('C')\n assert test_linkedlist.head.data == 'C'",
"def insert_before_item(self, x, data):\n if self.head is None:\n raise ValueError(\"No elements in list\")\n if x == self.head.data:\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node\n return\n n = self.head\n while n.next is not None:\n if n.next.data == x:\n break\n n = n.next\n if n.next is None:\n print(\"Item is not in the list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insert_first(self, e):\n self._insert_between(e, self._head, self._head._next)",
"def insert_first(self, e):\n self._insert_between(e, self._header, self._header._next)",
"def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node",
"def test_linked_list_insertion_on_single_val_successful(empty_list):\n empty_list.insert(42)\n assert empty_list.head.val == 42\n assert len(empty_list) == 1",
"def add_first(self, e):\n self._insert_between(e, self._head, self._head._next)",
"def add_first(self, data):\n # define the head as the new Node\n self.head = Node(data, next=self.head)\n # if list was empty define th tail as the head\n if self.tail is None:\n self.tail = self.head\n # set the skip back pointer if needed\n if self.head.next is not None:\n if self.head.next.next is not None:\n self.head.next.next.skip_back = self.head",
"def add_first(self, data):\n\n node = self.Node(data, self.head)\n self.head = node\n\n self.list_size += 1",
"def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp",
"def add_first(self, elem):\n if self.is_empty():\n self.head = self.tail = self.Node(elem, None, None)\n else:\n self.head.prev = self.Node(elem, None, self.head)\n self.head = self.head.prev\n \n self.size += 1",
"def add_first(self, e):\n return self._insert_between(e, self._header, self._header._next)",
"def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head",
"def addFirst(self, element):\n if element is None:\n raise TypeError('The input element is NoneType')\n\n newNode = Node(element)\n if self.__nelems == 0:\n self.__head = self.__tail = newNode\n\n else:\n newNode.setNext(self.__head)\n self.__head = newNode\n\n self.__nelems += 1",
"def _first_insert(self, e):\n self._start = self._Node(e, None, None)\n self._start._prev = self._start\n self._start._next = self._start\n self._size += 1",
"def test_head_after_append_in_list(new_dll):\n new_dll.append(6)\n assert new_dll.head.value == 3",
"def _insert_first(self, item: Any) -> None:\n if self.is_empty():\n raise IndexError\n return self.insert(0, item)",
"def test_append_to_full_deque_check_head(full_deque):\n full_deque.append(\"Denny Way\")\n assert full_deque._deque.head.value == 3",
"def insert_first(self, e):\n print(self._insert_between(e, self._header, self._header._next)) # después del encabezado",
"def test_insert_end_for_empty_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n assert test_linkedlist.head.data == 'A'",
"def head(self, head):\n\n self._head = head",
"def insert_first(self, item):\n if self.is_empty():\n self._first = item\n self._rest = LinkedListRec([None])\n else:\n new_list = LinkedListRec([self._first])\n new_list._rest = self._rest\n self._first = item\n self._rest = new_list",
"def test_append_to_empty_deque_check_head(empty_deque):\n empty_deque.append(\"Denny Way\")\n assert empty_deque._deque.head.value == \"Denny Way\"",
"def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node",
"def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected",
"def test_insert_end_for_one_element_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n assert test_linkedlist.head.data == 'A'"
]
| [
"0.7000382",
"0.6920265",
"0.68926394",
"0.6658777",
"0.664612",
"0.6491131",
"0.6382923",
"0.63249904",
"0.6275315",
"0.62428296",
"0.61293507",
"0.6121938",
"0.6120845",
"0.6103657",
"0.60847497",
"0.5946656",
"0.5900113",
"0.5896753",
"0.5894562",
"0.5886943",
"0.5839833",
"0.5827674",
"0.58150625",
"0.58038926",
"0.5801836",
"0.5800011",
"0.5799279",
"0.578763",
"0.57581455",
"0.5716475"
]
| 0.7153454 | 0 |
Insert head element to a single element linked list and check whether header node.next points to second element | def test_insert_head_one_element_list_2(test_linkedlist):
test_linkedlist.insert_head('A')
test_linkedlist.insert_head('B')
assert test_linkedlist.head.next.data == 'A' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_insert_head_empty_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.next is None",
"def test_insert_head_one_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.data == 'B'",
"def test_insert_head_for_two_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n test_linkedlist.insert_head('C')\n assert test_linkedlist.head.data == 'C'",
"def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head",
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def insert_before_item(self, x, data):\n if self.head is None:\n raise ValueError(\"No elements in list\")\n if x == self.head.data:\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node\n return\n n = self.head\n while n.next is not None:\n if n.next.data == x:\n break\n n = n.next\n if n.next is None:\n print(\"Item is not in the list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insertBeforeHeader(self, e):\n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._start = self._insert_between(e, self._start._prev, self._start)\n except:\n print(\"insertion unsuccessful...\")\n return",
"def insert_beg(self,node):\n if self.head == node:\n raise CircularReference(\n 'Head and next can not be the same ref'\n )\n\n node.next = self.head\n self.head = node",
"def addAtHead(self, val):\n cur = linkNode(val)\n cur.next = self.head.next\n cur.prev = self.head\n\n self.head.next = cur\n if cur.next:\n cur.next.prev = cur\n\n if cur.next == None: # first node\n self.tail = cur\n # self.printList()",
"def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node",
"def insert_first(self, e):\n self._insert_between(e, self._head, self._head._next)",
"def test_linked_list_insertion_on_single_val_successful(empty_list):\n empty_list.insert(42)\n assert empty_list.head.val == 42\n assert len(empty_list) == 1",
"def insert(self, index, item):\n if 1 <= index <= self.count + 1:\n if self.count == 0: #laat item naar zichzelf wijzen, zodat als er een tweede item komt er een circulaire ketting gevormd kan worden\n self.head.next = item\n item.next = item\n if index == 1: #speciaal geval waar de pointer van de head naar het nieuwe item moet wijzen\n firstItem = self.head.next\n item.next = firstItem.next\n firstItem.next = item\n self.head.next = item #head moet naar het nieuwe item wijzen\n else:\n prev = self.head\n for teller in range(1, index): #zoek de node die net op plaats index-1 staat\n prev = prev.next\n item.next = prev.next\n prev.next = item\n self.count += 1\n return True\n else:\n return False",
"def add_first(self, data):\n # define the head as the new Node\n self.head = Node(data, next=self.head)\n # if list was empty define th tail as the head\n if self.tail is None:\n self.tail = self.head\n # set the skip back pointer if needed\n if self.head.next is not None:\n if self.head.next.next is not None:\n self.head.next.next.skip_back = self.head",
"def split_head(line, is_head=lambda line: line.startswith('>')):\n if is_head(line):\n return True\n else:\n return False",
"def test_insert_end_for_one_element_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n assert test_linkedlist.head.data == 'A'",
"def add_first(self, e):\n self._insert_between(e, self._head, self._head._next)",
"def add_first(self, node_to_add):\n node_to_add.next = self.head\n self.head = node_to_add",
"def insert_after_item(self, x, data):\n n = self.head\n while n is not None:\n if n.data == x:\n break\n n = n.next\n if n is None:\n raise Exception(\"Item not in list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node",
"def add_before(self, pointer, member=None, index=None, clone=True):\n if not(type(pointer)==Member or type(pointer)==dict):\n return False\n else:\n if type(pointer)==dict:\n temp = Member()\n temp.setMember(pointer)\n pointer = temp\n else:\n if clone:\n temp = Member()\n temp.setMember(dict([(x,y) for x,y in pointer.getMember().items()]))\n pointer=temp\n \n if member and type(member)==Member:\n start = self.head\n if start==member:\n pointer.setLink(start)\n self.head=pointer\n return True\n while start:\n if start.getLink()==member:\n temp = start.getLink()\n start.setLink(pointer)\n pointer.setLink(temp)\n if pointer.getLink()==None:\n self.tail=pointer\n return True\n start = start.getLink()\n return False\n \n if type(index)==int:\n if index==0:\n pointer.setLink(self.head)\n self.head=pointer\n return True\n count=0\n start=self.head\n while count<index-1:\n count+=1\n start=start.getLink()\n if not start:\n return False\n else:\n temp = start.getLink()\n start.setLink(pointer)\n pointer.setLink(temp)\n if pointer.getLink()==None:\n self.tail=pointer\n return True\n return False",
"def add_first(self, elem):\n if self.is_empty():\n self.head = self.tail = self.Node(elem, None, None)\n else:\n self.head.prev = self.Node(elem, None, self.head)\n self.head = self.head.prev\n \n self.size += 1",
"def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp",
"def insert_first(self, item):\n if self.is_empty():\n self._first = item\n self._rest = LinkedListRec([None])\n else:\n new_list = LinkedListRec([self._first])\n new_list._rest = self._rest\n self._first = item\n self._rest = new_list",
"def insertAtEnd(self, e):\n # self.insertBeforeHeader(e) # this is not true since the head will be changed\n \n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._insert_between(e, self._start._prev, self._start) # even if the method return a node, the node should not be the head\n except:\n print(\"insertion unsuccessful...\")\n return",
"def insert(self, new_element, position):\n current = self.head\n count = 1\n if position > 1:\n while ((current)and (count < position)):\n if(count == position-1):\n\n new_element.next=current.next\n current.next = new_element\n break\n #print(\"count\",count)\n current = current.next\n count = count + 1\n elif position == 1:\n new_element.next = self.head\n self.head = new_element\n\n pass",
"def addAtHead(self, val):\n node = ListNode(val)\n node.next = self.head.next\n self.head.next = node\n if self.head is self.tail:\n self.tail = node\n self.len += 1",
"def _first_insert(self, e):\n self._start = self._Node(e, None, None)\n self._start._prev = self._start\n self._start._next = self._start\n self._size += 1",
"def insert_before(self, node_to_insert_before, data):\n node = Node(data)\n temp = self.head\n if temp.data == node_to_insert_before:\n node.next = temp\n self.head = node\n return\n while temp.next:\n if temp.next.data == node_to_insert_before:\n break\n temp = temp.next\n if not temp.next:\n print('Item doesn\\t exist')\n return\n node.next = temp.next\n temp.next = node",
"def addAtHead(self, val):\n new_node = ListNode(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1"
]
| [
"0.68290764",
"0.6784454",
"0.67691284",
"0.6407408",
"0.6398689",
"0.634001",
"0.6122741",
"0.6031908",
"0.6015013",
"0.5961826",
"0.5920558",
"0.5915399",
"0.59067553",
"0.5889279",
"0.5888695",
"0.58771574",
"0.5876606",
"0.5869111",
"0.58359754",
"0.5826269",
"0.5800353",
"0.57949495",
"0.57763875",
"0.5752479",
"0.5714373",
"0.5692297",
"0.568827",
"0.56802565",
"0.5660864",
"0.5660229"
]
| 0.6998045 | 0 |
Insert head element to a two element linked list and check whether header node has expected data. | def test_insert_head_for_two_element_list_1(test_linkedlist):
test_linkedlist.insert_head('A')
test_linkedlist.insert_head('B')
test_linkedlist.insert_head('C')
assert test_linkedlist.head.data == 'C' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'",
"def test_insert_head_empty_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.next is None",
"def test_insert_head_one_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.data == 'B'",
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def insertBeforeHeader(self, e):\n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._start = self._insert_between(e, self._start._prev, self._start)\n except:\n print(\"insertion unsuccessful...\")\n return",
"def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node",
"def insert_before_item(self, x, data):\n if self.head is None:\n raise ValueError(\"No elements in list\")\n if x == self.head.data:\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node\n return\n n = self.head\n while n.next is not None:\n if n.next.data == x:\n break\n n = n.next\n if n.next is None:\n print(\"Item is not in the list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp",
"def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head",
"def test_insert_end_for_empty_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n assert test_linkedlist.head.data == 'A'",
"def test_insert_end_for_one_element_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n assert test_linkedlist.head.data == 'A'",
"def add_first(self, data):\n # define the head as the new Node\n self.head = Node(data, next=self.head)\n # if list was empty define th tail as the head\n if self.tail is None:\n self.tail = self.head\n # set the skip back pointer if needed\n if self.head.next is not None:\n if self.head.next.next is not None:\n self.head.next.next.skip_back = self.head",
"def test_head_after_append_in_list(new_dll):\n new_dll.append(6)\n assert new_dll.head.value == 3",
"def test_linked_list_insertion_on_single_val_successful(empty_list):\n empty_list.insert(42)\n assert empty_list.head.val == 42\n assert len(empty_list) == 1",
"def insert_before(self, node_to_insert_before, data):\n node = Node(data)\n temp = self.head\n if temp.data == node_to_insert_before:\n node.next = temp\n self.head = node\n return\n while temp.next:\n if temp.next.data == node_to_insert_before:\n break\n temp = temp.next\n if not temp.next:\n print('Item doesn\\t exist')\n return\n node.next = temp.next\n temp.next = node",
"def insert_beg(self,node):\n if self.head == node:\n raise CircularReference(\n 'Head and next can not be the same ref'\n )\n\n node.next = self.head\n self.head = node",
"def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node",
"def insert_first(self, e):\n self._insert_between(e, self._header, self._header._next)",
"def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected",
"def test_append_left_head_is_new_node(dq_1):\n dq_1.append_left('threve')\n assert dq_1._dll.head.data == 'threve'",
"def insert_first(self, e):\n self._insert_between(e, self._head, self._head._next)",
"def split_head(line, is_head=lambda line: line.startswith('>')):\n if is_head(line):\n return True\n else:\n return False",
"def addAtHead(self, val):\n cur = linkNode(val)\n cur.next = self.head.next\n cur.prev = self.head\n\n self.head.next = cur\n if cur.next:\n cur.next.prev = cur\n\n if cur.next == None: # first node\n self.tail = cur\n # self.printList()",
"def insertBefore(self,p,e):\r\n \r\n if p == self.head: #if p is the head node\r\n e.next = p #link e to p\r\n p.prev = e #link p to e\r\n self.head = e #set e to be the 'new' head node\r\n \r\n else: \r\n e.prev = p.prev #link e to prev node of p\r\n e.next = p #link e to p\r\n (p.prev).next = e #link prev node of p to e\r\n p.prev = e #link prev node to e\r\n \r\n self.size +=1 #increase length of linked list by 1\r",
"def insertTail(head, data):\n # Using the iterative solution. Recursive also exists, but I don't\n # think it offers any benifit in space/time complexity\n if head is None: # First the initial/null case:\n return ListNode(val=data)\n node = head # Then the general case - scroll tot the end of the list\n while node.next is not None:\n node = node.next\n node.next = ListNode(val=data) # tack on the new value\n return head",
"def add_before(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n if self.head.data == data:\n return self.add_first(new_node)\n prev_node = self.head\n for node in self:\n if node.data == data:\n prev_node.next = new_node\n new_node.next = node\n return\n prev_node = node\n raise Exception(\"Node with data '{}' not found\".format(data))",
"def add_first(self, data):\n\n node = self.Node(data, self.head)\n self.head = node\n\n self.list_size += 1",
"def insertAtEnd(self, e):\n # self.insertBeforeHeader(e) # this is not true since the head will be changed\n \n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._insert_between(e, self._start._prev, self._start) # even if the method return a node, the node should not be the head\n except:\n print(\"insertion unsuccessful...\")\n return",
"def insert_after_item(self, x, data):\n n = self.head\n while n is not None:\n if n.data == x:\n break\n n = n.next\n if n is None:\n raise Exception(\"Item not in list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)"
]
| [
"0.71093637",
"0.6993354",
"0.69920653",
"0.6874157",
"0.6395523",
"0.6193493",
"0.6157132",
"0.610049",
"0.59965223",
"0.5888415",
"0.58738816",
"0.5845826",
"0.58392084",
"0.5838037",
"0.57361645",
"0.5719376",
"0.5718986",
"0.56913036",
"0.5687391",
"0.56646043",
"0.564347",
"0.5631183",
"0.5594286",
"0.5578476",
"0.55579066",
"0.5539175",
"0.55363655",
"0.55306137",
"0.55204606",
"0.55083525"
]
| 0.71451944 | 0 |
Insert last element to an empty element linked list and check whether header node has expected data | def test_insert_end_for_empty_list(test_linkedlist):
test_linkedlist.insert_end('A')
assert test_linkedlist.head.data == 'A' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insertAtEnd(self, e):\n # self.insertBeforeHeader(e) # this is not true since the head will be changed\n \n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._insert_between(e, self._start._prev, self._start) # even if the method return a node, the node should not be the head\n except:\n print(\"insertion unsuccessful...\")\n return",
"def test_insert_end_for_one_element_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n assert test_linkedlist.head.data == 'A'",
"def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)",
"def test_insert_head_empty_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.next is None",
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def add_last(self, data):\n node = self.Node(data, None)\n\n if self.is_empty():\n self.head = node\n else:\n tail = self.getNode(self.list_size - 1)\n tail.next_node = node\n\n self.list_size += 1",
"def test_insert_end(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n c = Node('c')\n sll.insert_beg(a)\n sll.insert_end(b)\n sll.insert_beg(c)\n actual = [i.data for i in sll][-1]\n expected = 'b'\n assert(actual==expected)",
"def insertLast(self, value):\n if not self.isFull():\n self._data.append(value)\n return True\n else:\n return False",
"def add_last(self, elem):\n if self.is_empty():\n self.head = self.tail = self.Node(elem, None, None)\n else:\n self.tail.nxt = self.Node(elem, self.tail, None)\n self.tail = self.tail.nxt\n\n self.size += 1",
"def test_last_when_empty(self):\n l_list = DoubleLinkedList()\n with self.assertRaises(Exception) as context:\n l_list.last()\n self.assertTrue('Empty list' in str(context.exception))",
"def add_last(self, data):\n # if list empty set head and tail as the new Node\n if self.head is None:\n self.tail = Node(data, next=None)\n self.head = self.tail\n # else set new tail\n else:\n self.tail.next = Node(data, next=None)\n # set the skip back pointer if needed\n if self.head != self.tail:\n if self.tail.skip_back is None:\n self.tail.next.skip_back = self.head\n else:\n self.tail.next.skip_back = self.tail.skip_back.next\n # set the tail to the new one\n self.tail = self.tail.next",
"def test_linked_list_insertion_on_single_val_successful(empty_list):\n empty_list.insert(42)\n assert empty_list.head.val == 42\n assert len(empty_list) == 1",
"def test_append_to_empty_deque_check_head(empty_deque):\n empty_deque.append(\"Denny Way\")\n assert empty_deque._deque.head.value == \"Denny Way\"",
"def test_deque_append_one_head_is_tail(dq):\n dq.append(8)\n assert dq._dll.head == dq._dll.tail",
"def is_empty(self):\n if self.head == None:\n return True\n else:\n return False",
"def test_deque_append_one_node(dq):\n dq.append(4)\n assert dq._dll.head.data == 4",
"def insert_after(self, node_to_insert_after, data):\n node = Node(data)\n temp = self.head\n while temp:\n if temp.data == node_to_insert_after:\n break\n temp = temp.next\n if not temp:\n print('Item does not exist')\n return\n node.next = temp.next\n temp.next = node",
"def test_insert_head_one_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.data == 'B'",
"def insert_append(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur",
"def is_empty(self):\n if self.head is None:\n return True\n else:\n return False",
"def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False",
"def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False",
"def test_last(self):\n l_list = DoubleLinkedList()\n l_list.push(1234)\n l_list.push(12)\n self.assertEqual(l_list.get_list()[-1], l_list.last())",
"def is_empty(self):\n return self.head is None",
"def test_append_to_full_deque_check_head(full_deque):\n full_deque.append(\"Denny Way\")\n assert full_deque._deque.head.value == 3",
"def test_head_after_append_in_list(new_dll):\n new_dll.append(6)\n assert new_dll.head.value == 3",
"def test_initialization_with_empty_list_last_node_check():\n queue = Queue([])\n assert queue._queue.last_node is None",
"def test_last_element(self):\n self.assertEqual(functions.last_element([1, 2, 3]), 3)\n self.assertEqual(functions.last_element([]), None)",
"def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'",
"def add_first(self, data):\n # define the head as the new Node\n self.head = Node(data, next=self.head)\n # if list was empty define th tail as the head\n if self.tail is None:\n self.tail = self.head\n # set the skip back pointer if needed\n if self.head.next is not None:\n if self.head.next.next is not None:\n self.head.next.next.skip_back = self.head"
]
| [
"0.6620652",
"0.65475994",
"0.6387041",
"0.63748395",
"0.63285327",
"0.63142574",
"0.6290541",
"0.625654",
"0.6144856",
"0.6062653",
"0.6061434",
"0.6054086",
"0.60341364",
"0.5983253",
"0.5946131",
"0.59438145",
"0.5936144",
"0.592606",
"0.5887565",
"0.58757454",
"0.587368",
"0.587368",
"0.5858656",
"0.5844233",
"0.5844069",
"0.5833204",
"0.5832949",
"0.58112264",
"0.5789089",
"0.5775752"
]
| 0.7035622 | 0 |
Insert last element to a single element linked list and check whether the last node has expected data | def test_insert_end_for_one_element_list(test_linkedlist):
test_linkedlist.insert_end('A')
test_linkedlist.insert_end('B')
assert test_linkedlist.head.data == 'A' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_insert_end_for_empty_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n assert test_linkedlist.head.data == 'A'",
"def test_insert_end(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n c = Node('c')\n sll.insert_beg(a)\n sll.insert_end(b)\n sll.insert_beg(c)\n actual = [i.data for i in sll][-1]\n expected = 'b'\n assert(actual==expected)",
"def test_last(self):\n l_list = DoubleLinkedList()\n l_list.push(1234)\n l_list.push(12)\n self.assertEqual(l_list.get_list()[-1], l_list.last())",
"def insertLast(self, value):\n if not self.isFull():\n self._data.append(value)\n return True\n else:\n return False",
"def test_linked_list_insertion_on_single_val_successful(empty_list):\n empty_list.insert(42)\n assert empty_list.head.val == 42\n assert len(empty_list) == 1",
"def add_last(self, elem):\n if self.is_empty():\n self.head = self.tail = self.Node(elem, None, None)\n else:\n self.tail.nxt = self.Node(elem, self.tail, None)\n self.tail = self.tail.nxt\n\n self.size += 1",
"def add_last(self, data):\n node = self.Node(data, None)\n\n if self.is_empty():\n self.head = node\n else:\n tail = self.getNode(self.list_size - 1)\n tail.next_node = node\n\n self.list_size += 1",
"def add_last(self, data):\n # if list empty set head and tail as the new Node\n if self.head is None:\n self.tail = Node(data, next=None)\n self.head = self.tail\n # else set new tail\n else:\n self.tail.next = Node(data, next=None)\n # set the skip back pointer if needed\n if self.head != self.tail:\n if self.tail.skip_back is None:\n self.tail.next.skip_back = self.head\n else:\n self.tail.next.skip_back = self.tail.skip_back.next\n # set the tail to the new one\n self.tail = self.tail.next",
"def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)",
"def _add_last(cls, node, value):\n # Check if element is the last element\n if node.next_ is None:\n node.next_ = Node(value)\n return\n\n # Recursively go to next node\n cls._add_last(node.next_, value)",
"def insertLast(self, value: int) -> bool:\n if not self.isFull():\n # 后端插入始终是先移动后插入,self.rear始终指向后端最后插入的元素\n self.rear = self.move_forward(self.rear)\n self.q[self.rear] = value\n return True\n else:\n return False",
"def insert_after_item(self, x, data):\n n = self.head\n while n is not None:\n if n.data == x:\n break\n n = n.next\n if n is None:\n raise Exception(\"Item not in list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node",
"def insert_after(self, node_to_insert_after, data):\n node = Node(data)\n temp = self.head\n while temp:\n if temp.data == node_to_insert_after:\n break\n temp = temp.next\n if not temp:\n print('Item does not exist')\n return\n node.next = temp.next\n temp.next = node",
"def test_last_when_empty(self):\n l_list = DoubleLinkedList()\n with self.assertRaises(Exception) as context:\n l_list.last()\n self.assertTrue('Empty list' in str(context.exception))",
"def test_insert_head_one_element_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.data == 'B'",
"def add_last(self, node_to_add):\n if self.head == None:\n self.head = node_to_add\n return\n node = self.head\n # while node.next is not None:*\n while node.next is not None:\n node = node.next\n node.next = node_to_add",
"def addLast(self, element):\n if element is None:\n raise TypeError('The input element is NoneType')\n\n newNode = Node(element)\n if self.__nelems == 0:\n self.__head = self.__tail = newNode\n\n else:\n self.__tail.setNext(newNode)\n self.__tail = newNode\n\n self.__nelems += 1",
"def test_insert_head_empty_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.next is None",
"def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'",
"def add_last(self, e):\n newest = self._Node(e, self._tail, None) # node will be new tail node, prev point to old tail\n if self.is_empty():\n self._head = newest # special case: previously empty\n else:\n self._tail._next = newest\n self._tail = newest # update reference to tail node\n self._size += 1",
"def test_tail_after_append_in_list(new_dll):\n new_dll.append(6)\n assert new_dll.tail.value == 6",
"def test_insert_head_empty_list_1(test_linkedlist):\n test_linkedlist.insert_head('A')\n assert test_linkedlist.head.data == 'A'",
"def insert_last(self, e):\n self._insert_between(e, self._tail._prev, self._tail)",
"def insertLast(self, value: int) -> bool:\n if self.isFull():\n return False\n\n self.arr[self.rear] = value\n self.rear = (self.rear + 1) % self.capacity\n return True",
"def add_after(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n for node in self:\n if node.data == data:\n new_node.next = node.next\n node.next = new_node\n return\n raise Exception(\"Node with data '{}' not found\".format(data))",
"def insert_last(self, value: int) -> bool:\r\n if self.size != self.capacity:\r\n self.deque[self.lastIndex] = value\r\n self.size += 1\r\n if self.lastIndex == self.capacity - 1:\r\n self.lastIndex = 0\r\n else:\r\n self.lastIndex += 1\r\n return True\r\n return False",
"def test_last_element(self):\n self.assertEqual(functions.last_element([1, 2, 3]), 3)\n self.assertEqual(functions.last_element([]), None)",
"def test_deque_append_one_head_is_tail(dq):\n dq.append(8)\n assert dq._dll.head == dq._dll.tail",
"def addAfter(self, newItem, afterItem):\n \"\"\"\n start from front\n \"\"\"\n # if self.rear == None:\n # return False\n # curr = self.rear.next # curr at first\n # prev = self.rear\n # while True:\n # if curr.data == afterItem:\n # newNode = Node(newItem, curr.next)\n # curr.next = newNode\n # if curr == self.rear:\n # # newNode becomes new last\n # self.rear = newNode\n # return True\n # prev = curr\n # curr = curr.next\n # if prev == self.rear:\n # break\n # return False\n\n \"\"\"\n start from rear\n \"\"\"\n if self.rear == None:\n return False\n curr = self.rear\n while True:\n if curr.data == afterItem:\n newNode = Node(newItem, curr.next)\n curr.next = newNode\n if curr == self.rear:\n # newNode becomes new last\n self.rear = newNode\n return True\n curr = curr.next\n if curr == self.rear:\n break\n return False",
"def insertAtEnd(self, e):\n # self.insertBeforeHeader(e) # this is not true since the head will be changed\n \n if self.is_empty(): # if this is the first insert method call,\n print(\"As you do not have any start node yet, the given element will be a start node.\")\n self._first_insert(e)\n return\n \n try: # if there is more than one nodes,\n self._insert_between(e, self._start._prev, self._start) # even if the method return a node, the node should not be the head\n except:\n print(\"insertion unsuccessful...\")\n return"
]
| [
"0.72178334",
"0.69149065",
"0.69074094",
"0.6875248",
"0.6817494",
"0.6682206",
"0.65892625",
"0.6489145",
"0.6454365",
"0.64060044",
"0.6381768",
"0.6363875",
"0.6361612",
"0.6344563",
"0.6337286",
"0.63229567",
"0.63185537",
"0.6296967",
"0.62793654",
"0.62726855",
"0.62327504",
"0.6228232",
"0.6220316",
"0.6212585",
"0.61862653",
"0.6180825",
"0.61728007",
"0.61670965",
"0.6150303",
"0.60907716"
]
| 0.7321098 | 0 |
Create a 3 element linked list with a cycle and test whether cycle_present() function detects it | def test_cycle_present_true(test_linkedlist):
test_linkedlist.insert_end('A')
test_linkedlist.insert_end('B')
test_linkedlist.insert_end('C')
# create a cycle - connection from 3rd to 2nd element
test_linkedlist.head.next.next.next = test_linkedlist.head.next
# assert cycle_present() function returns True
assert test_linkedlist.cycle_present() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_cycle(head):\n\n # We specify that an empty list won't have a cycle.\n if head is None:\n return False\n\n assert isinstance(head, Node)\n\n pass",
"def has_cycle(link):\r\n # collect_list = [link]\r\n # while not link == Link.empty:\r\n # collect_list.append(link.first)\r\n # link = link.rest\r\n # if link.rest in collect_list:\r\n # return True\r\n # return False\r\n s = link\r\n while not link == Link.empty:\r\n if link.rest == s:\r\n return True\r\n else:\r\n link = link.rest\r\n return False",
"def contains_cycle(head: ListNode) -> bool:\n p1 = head # p1 be at a distance = d from the head (by advancing by one)\n p2 = head # p2 be at a distance = 2*d from the head (by advancing by two)\n\n while p1 and p2 and p2.next: # If p2 is at last node (and its next is None) then no cycle detected\n if p1 == p2: # Cycle detected if they meet\n return True\n p1 = p1.next\n p2 = p2.next.next\n\n return False # No cycle detected",
"def has_cycle(link):\n\n ###############\n # My Solution #\n ###############\n \n def tracker(link, seen = []):\n if link in seen:\n print('True')\n return\n if link.rest == Link.empty:\n print('False')\n return\n seen.append(link)\n tracker(link.rest)\n\n\n return tracker(link)",
"def check_cycle(cycle):\n try:\n if cycle[0][0] == cycle[-1][1]:\n return True\n else:\n return False\n except:\n return False",
"def hasCycle(self, head: ListNode) -> bool:\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow: return True\n return False",
"def test_cycle(self):\n g = Graph(3)\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n # g.add_edge(0, 0)\n assert g.contains_cycle() is False\n g.add_edge(1, 2)\n assert g.contains_cycle() is True",
"def cycle_check(node):\n node_marker1 = node_marker2 = node\n\n while node_marker2 and node_marker2.nextnode:\n node_marker1 = node_marker1.nextnode # node_marker1 runs 1 node at each step\n node_marker2 = node_marker2.nextnode.nextnode # node_marker2 runs 2 node at each step\n if node_marker1 == node_marker2:\n return True # meaning that they are meeting each other at some point because of linked list is cycle.\n # End of while\n return False",
"def has_cycle_constant(link):\n \n ###############\n # My Solution #\n ###############\n\n one = link\n two = link\n while (two.rest != Link.empty and two.rest.rest != Link.empty):\n one = one.rest\n two = two.rest.rest\n if(one == two):\n return True\n\n return False",
"def is_cyclic(linked_list):\n # if linked-list is empty\n if not linked_list.head or not linked_list.head.next:\n return False\n\n slow = fast =linked_list.head\n \n while fast and fast.next:\n slow=slow.next #slow takes one step at a time\n fast= fast.next.next #fast takes two step at a time\n \n #if slow meets with fast that means linked-list is cyclic\n if fast==slow:\n return True\n return False",
"def detectCycle(self, head: ListNode) -> ListNode:\n fast = slow = head \n while fast and fast.next:\n fast, slow = fast.next.next, slow.next\n if fast == slow: \n fast = head \n while fast != slow: fast, slow = fast.next, slow.next\n return fast\n return None",
"def has_cycle(self):\n traversed = dict()\n for i in self.adj_list:\n traversed[i] = False\n\n for i in self.adj_list:\n if not traversed[i]:\n if self.has_cycle_helper(i, traversed, -1):\n return True\n return False",
"def has_cycle(head):\n\n def cycle_len(end):\n \"\"\" Detect the length of cycle \"\"\"\n start, step = end, 0\n while True:\n step += 1\n start = start.next\n if start is end:\n return step\n\n fast = slow = head\n\n # None appears => list has an end => no cycle\n while fast and fast.next and fast.next.next:\n slow, fast = slow.next, fast.next.next\n if slow is fast:\n print(slow.data, fast.data)\n # set cycle_len_advanced_iter to head\n cycle_len_advanced_iter = head\n \n print(cycle_len(slow))\n # advance cycle_len_advanced_iter to the current point of slow\n for _ in range(cycle_len(slow)):\n cycle_len_advanced_iter = cycle_len_advanced_iter.next\n\n print(cycle_len_advanced_iter.data)\n # both iterator advance in tandom\n it = head\n while it is not cycle_len_advanced_iter:\n print(\"it: {} cycle: {}\".format(it.data, cycle_len_advanced_iter.data))\n it = it.next\n cycle_len_advanced_iter = cycle_len_advanced_iter.next\n \n print(\"it: {} cycle: {}\".format(it.data, cycle.data))\n\n return it # iter is the start of the cycle\n\n return None # No cycle",
"def has_cycle(G,start):\n if G == None:\n return False\n else:\n for vertex in G:\n vertex.setColor(\"white\")\n vertex.setPredecessor(None)\n vertex.setDistance(sys.maxint)\n vertex.setEntryTime(0)\n vertex.setExitTime(0)\n \n dfsTraversal = []\n stack = []\n time = 0\n start.setColor(\"gray\")\n start.setPredecessor(None)\n time += 1\n start.setEntryTime(time)\n stack.append(start)\n while stack:\n currNode = stack.pop()\n dfsTraversal.append(currNode.getId())\n for node in currNode.getConnections():\n if node.getColor() == \"gray\":\n return True\n if node.getColor() == \"white\":\n time += 1\n node.setColor(\"gray\")\n node.setPredecessor(currNode)\n node.setEntryTime(time)\n stack.append(node)\n currNode.setColor(\"black\")\n time += 1\n currNode.setExitTime(time)\n \n return False",
"def detectCycle(self, head):\n node_dict = {}\n current_node = head\n index = 0\n while current_node:\n if current_node in node_dict:\n return current_node\n node_dict[current_node] = index\n current_node = current_node.next\n index += 1\n return None",
"def creates_cycle(connections, test):\n node_in, node_out = test\n\n if node_in == node_out:\n # Self-loop\n return True\n\n visited = {node_out}\n while True:\n num_added = 0\n for a, b in connections:\n if a in visited and b not in visited:\n if b == node_in:\n return True\n\n visited.add(b)\n num_added += 1\n\n if num_added == 0:\n return False",
"def is_cyclic(linked_list):\n # if linked-list is empty\n if not linked_list.head or not linked_list.head.next:\n return False\n\n visited=set()\n current=linked_list.head\n\n while current:\n #if the node is already in visited set that mean linked-list is cyclic\n if current.data in visited:\n return True\n #if the node is not visited set that means it appears 1st time so add it on the visited set\n else:\n visited.add(current.data)\n #incement the loop counter \n current=current.next \n\n return False",
"def find_cycle(self):\n # from guido's blog :\n # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html\n worklist = set(self.successors)\n while worklist:\n stack = [worklist.pop()]\n while stack:\n top = stack[-1]\n for node in self.successors.get(top, ()):\n try:\n # raises ValueError if node is not in stack.\n cycle = stack[stack.index(node) :]\n succs = dict(\n (source, [cycle[(i + 1) % len(cycle)]])\n for i, source in enumerate(cycle)\n )\n return Digraph(succs, self.get_score, self.get_label)\n except ValueError:\n pass\n if node in worklist:\n stack.append(node)\n worklist.remove(node)\n break\n else:\n stack.pop()\n return None",
"def test_cycles():\n graph = Graph()\n for one, two in [(1, 2), (2, 3), (3, 1)]:\n graph.add_edge(one, two)\n cycles = list(graph.find_cycles())\n eq_(len(cycles), 1)\n eq_(cycles[0], [1, 2, 3])",
"def solution(head: ListNode) -> ListNode:\n p1 = head # p1 be at a distance = d from the head (by advancing by one)\n p2 = head # p2 be at a distance = 2*d from the head (by advancing by two)\n\n while p1 and p2 and p2.next:\n p1 = p1.next\n p2 = p2.next.next\n if p1 == p2: # Cycle detected if they meet\n break\n if p2 is None or p2.next is None:\n return None # No cycle\n\n # Cycle is detected and now we must find the starting point of the cycle, this is where the maths needs to be proven\n # Move either pointer to the head. Each pointer is k steps away from the loop's start. If they move at the same\n # pace, they MUST meet at the start of the loop.\n p1 = head\n while p1 != p2:\n p1 = p1.next\n p2 = p2.next\n\n # Both pointers point to the start of the cycle\n return p1\n\n return False # No cycle detected",
"def _check_cycle(self, node):\n # run a DFS for each child node until we find\n # a) a leaf (then backtrack)\n # b) node (cycle)\n outgoing = self._outgoing\n if node in outgoing:\n iter_ = iter\n stack = [(node, iter_(outgoing[node]).next)]\n exhausted, push, pop = StopIteration, stack.append, stack.pop\n\n while stack:\n try:\n child = stack[-1][1]()\n except exhausted:\n pop()\n else:\n if child == node:\n raise DependencyCycle(map(_op.itemgetter(0), stack))\n elif child in outgoing:\n push((child, iter_(outgoing[child]).next))",
"def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False",
"def is_cycle(self, state, visited):\n substate = (state.getPacmanPosition(),\n state.getGhostPositions(),\n state.getFood())\n\n if substate in visited:\n return True\n else:\n visited.append(substate)\n\n return False",
"def cyclic(n):\n if seen[n]: return seen[n] == -1\n seen[n] = -1 #GRAY\n if any(cyclic(nn) for nn in digraph.get(n, []) if seen[nn] != 1): return True\n seen[n] = 1 #BLACK\n ans.append(n)\n return False",
"def make_circular(self):\n if self.head is None:\n raise EmptyListError(\"please add items to the list before trying again\")\n \n if self.head.next is None:\n self.head.next = self.head\n return None\n \n curr_node = self.head\n while curr_node.next is not None:\n curr_node = curr_node.next\n curr_node.next = self.head\n return None",
"def has_cycle(graph):\n ds = DisjointSet()\n\n # creates a set of all graph nodes\n node_set = set()\n for edge in graph:\n node_set.add(edge.node1)\n node_set.add(edge.node2)\n\n for item in node_set:\n ds.make_set(item)\n\n for edge in graph:\n same_set = ds.union(edge.node1, edge.node2)\n if same_set:\n return True\n\n return False",
"def solution_alternate(head: ListNode) -> ListNode:\n DUMMY = ListNode(-1)\n curr = head\n while curr:\n if curr.next == DUMMY: # If the node is already pointing to the dummy, then it indicates start of cycle\n return curr\n\n next_copy = curr.next # Save link to the next node that is about to be re-pointed to the dummy\n curr.next = DUMMY # Re-point the next node to the dummy node\n curr = next_copy # Using the saved link, update curr\n return None",
"def has_cycle(graph):\n stack = deque()\n visited_vertices = set()\n\n random_vertex = next(iter(graph.get_vertices().values()))\n prev_vertex = random_vertex\n vertex = random_vertex\n\n # prev_vertex let us to track \"parent\" vertex in undirected graph\n stack.append((prev_vertex, vertex))\n\n while len(stack) > 0:\n prev, vertex = stack.pop()\n\n visited_vertices.add(vertex)\n\n for adjacent_edge in vertex.get_outbound_edges():\n # this graph is undirected, so end_vertex may be start_vertex and vice versa\n if vertex != adjacent_edge.get_end_vertex():\n adjacent_vertex = adjacent_edge.get_end_vertex()\n else:\n adjacent_vertex = adjacent_edge.get_start_vertex()\n\n # If there is an adjacent vertex that has been already visited\n # and this vertex is not previous(parent) then there is a cycle in graph.\n if adjacent_vertex in visited_vertices and adjacent_vertex != prev:\n return True\n\n if adjacent_vertex not in visited_vertices:\n stack.append((vertex, adjacent_vertex))\n\n return False",
"def detectCycle(self):\n slowP = self.numberList[-1]\n fastP = self.numberList[-1]\n while True:\n # step fast twice and slow once\n fastP = self.numberList[fastP - 1]\n fastP = self.numberList[fastP - 1]\n slowP = self.numberList[slowP - 1]\n print fastP, slowP\n if slowP == fastP:\n cycleVal = slowP\n break\n count = 0\n while True:\n count += 1\n slowP = self.numberList[slowP - 1]\n if slowP == cycleVal:\n return count",
"def create_cycle_list(self):\r\n self.cycle_list = None\r\n self.list_label = (\"Enter a list of cycle times (in integer PLC) \"\r\n \"separated by commas (e.g., 1, 5, 7). Must \"\r\n \"have same number of points as current list or be \"\r\n \"empty. An empty list will autocopy the cycle time.\"\r\n )\r\n self.window_title = \"Cycle Intervals\"\r\n if self.spd_type_index == 2 and self.connected:\r\n self.cycle_list = self.list_box.getText(self, self.window_title,\r\n self.list_label)[0]\r\n self.cycle_list_float =list(\r\n map(float, re.sub(',', '', self.cycle_list).split())\r\n )\r\n\r\n self.cycle_list_time = [i * 16.667e-3 for i\r\n in self.cycle_list_float]\r\n\r\n if self.cycle_list:\r\n self.cmd = \", \".join(str(e) for e in\r\n self.cycle_list_time)\r\n print(self.cmd)\r\n self.I_source.write(\"SOUR:LIST:DEL \" + self.cmd)\r\n self.SweepPulseDeltaCycle.setValue(self.cycle_list_float[0])\r\n\r\n #print(self.I_source.query(\"SOUR:LIST:COMP:POIN?\"))\r\n\r\n else:\r\n self.cmd = ', '.join(str(e) for e in\r\n [self.SweepPulseDeltaCycle.value()*16.667e-3\r\n for x in range(self.spd_points)])\r\n self.cycle_list_float = [self.SweepPulseDeltaCycle.value()\r\n for x in range(self.spd_points)]\r\n self.cycle_list = ' '.join(str(e) for e in\r\n self.cycle_list_float)\r\n self.I_source.write(\"SOUR:LIST:DEL \" + self.cmd)\r\n #print(self.I_source.query(\"SOUR:LIST:DEL?\"))\r\n self.update_spd_sweep_type()"
]
| [
"0.6907678",
"0.687814",
"0.67647743",
"0.671243",
"0.6531942",
"0.6443912",
"0.6440989",
"0.64005125",
"0.63992447",
"0.6299232",
"0.62967825",
"0.62917066",
"0.61344707",
"0.6031512",
"0.6025081",
"0.5982313",
"0.59708685",
"0.5873659",
"0.58675855",
"0.57296795",
"0.56469685",
"0.5572512",
"0.5565117",
"0.553364",
"0.55174565",
"0.5463104",
"0.5412818",
"0.5407886",
"0.5406727",
"0.5389542"
]
| 0.8052806 | 0 |
Class constructor. Creates one of seven basic shapes. | def __init__(self, shape_num):
self.shape_num = shape_num
if shape_num == 1:
self.width = 4
self.height = 4
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[0][2] = 1
self.grid[1][2] = 1
self.grid[2][2] = 1
self.grid[3][2] = 1
self.color = Color.SilverPink
elif shape_num == 2:
self.width = 3
self.height = 3
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[0][1] = 1
self.grid[0][2] = 1
self.grid[1][2] = 1
self.grid[2][2] = 1
self.color = Color.TuftsBlue
elif shape_num == 3:
self.width = 3
self.height = 3
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[2][1] = 1
self.grid[0][2] = 1
self.grid[1][2] = 1
self.grid[2][2] = 1
self.color = Color.ChromeYellow
elif shape_num == 4:
self.width = 2
self.height = 2
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[0][0] = 1
self.grid[0][1] = 1
self.grid[1][0] = 1
self.grid[1][1] = 1
self.color = Color.Independence
elif shape_num == 5:
self.width = 3
self.height = 3
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[1][0] = 1
self.grid[2][0] = 1
self.grid[0][1] = 1
self.grid[1][1] = 1
self.color = Color.ForestGreen
elif shape_num == 6:
self.width = 3
self.height = 3
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[1][1] = 1
self.grid[0][2] = 1
self.grid[1][2] = 1
self.grid[2][2] = 1
self.color = Color.Byzantine
elif shape_num == 7:
self.width = 3
self.height = 3
self.grid = [[0 for x in range(self.height)] for y in range(self.width)]
self.grid[0][0] = 1
self.grid[1][0] = 1
self.grid[1][1] = 1
self.grid[2][1] = 1
self.color = Color.Coquelicot
self.top_space = self.get_top_space()
self.bottom_space = self.get_bottom_space()
self.x = int((12 - self.width) / 2)
self.y = 1 - self.top_space
self.last_drop_time = perf_counter() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, shape):\n\n self.shape = shape",
"def __init__(self, shape_num):\n\n\t\tself.shape_num = shape_num\n\t\tself.shape = shapes[shape_num].copy()\n\t\tself.width = len(self.shape[0])\n\t\tself.height = len(self.shape)",
"def __init__(self, shape):\n self.eyes = [(), ()]\n self.shape = shape\n self.state = 0\n self.new_frame()",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def shape(self) -> Shape:",
"def __init__(self, shape: Tuple[int, int] = (3, 3)):\r\n\r\n if shape[0] <= 2 or shape[1] <= 2:\r\n raise exc.MeshException(\r\n \"The rectangular pixelization must be at least dimensions 3x3\"\r\n )\r\n\r\n self.shape = (int(shape[0]), int(shape[1]))\r\n self.pixels = self.shape[0] * self.shape[1]\r\n super().__init__()\r\n\r\n self.run_time_dict = {}",
"def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h",
"def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h",
"def __init__(self):\n self.vertices = ((0, 0, 0),(1, 0, 0),(0, 1, 0),(0, 0, 1))\n self.edges=(0,1),(0,2),(0,3)",
"def __init__(self):\n self.superelevations = []\n self.shapes = []",
"def __init__(self, shape, data):\n self.shape = shape\n self.data = data",
"def shape(self):",
"def shape(self):",
"def __init__ (self, shape) :\r\n self._shape=[int(s) for s in shape]\r\n offset=[1]\r\n for i,incr in enumerate(self._shape[:-1]) :\r\n offset.append(offset[i]*incr)\r\n self._offset=offset",
"def NewShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_NewShape(self, *args)",
"def setup(self):\n self.poly2 = Polygon([(145, 60), (201, 69), (265, 46), (333, 61), (352, 99), (370, 129), (474, 138), (474, 178), (396, 225), (351, 275), (376, 312), (382, 356), (338, 368), (287, 302), (224, 304), (128, 338), (110, 316), (129, 270), (83, 231), (65, 51), (83, 163), (103, 201), (90, 74), (126, 162)])\n self.poly2.set_direction(\"E\")\n self.poly1 = Polygon([(905, 328),(877, 367),(944, 413),(1004, 384),(1019, 307),(953, 248),(880, 250),(865, 278),(883, 325)])\n self.poly1.set_direction(\"SW\")\n self.poly3 = Polygon([(900, 600), (950,650), (1000, 500)])\n self.poly3.set_direction(\"N\")\n self.p1 = Point(485, 138)\n self.p1.set_direction(\"SE\")\n self.p2 = Point(self.width/2, self.height/2)\n self.p2.set_direction(\"NW\")\n self.p3 = Point(86,163)\n self.p3.set_direction(\"SE\")\n #a separate list for each different type of shape for collision purposes.\n self.polys = [self.poly1, self.poly2, self.poly3]\n self.points = [self.p1, self.p2, self.p3]",
"def __init__( self, name, shape = None, **attributes):\n if shape is None: shape = defaultShape\n ElementContainer.__init__( self, name, shape = shape, **attributes)\n return",
"def __init__(self, the_input_shape, num_classes):\n self.the_input_shape = the_input_shape\n self.num_classes = num_classes",
"def __init__(self, myShape, ignore_orientation=False):\n self.myShape = myShape\n self.ignore_orientation = ignore_orientation\n\n # the topoFactory dicts maps topology types and functions that can\n # create this topology\n self.topoFactory = {\n TopAbs_VERTEX: topods.Vertex,\n TopAbs_EDGE: topods.Edge,\n TopAbs_FACE: topods.Face,\n TopAbs_WIRE: topods.Wire,\n TopAbs_SHELL: topods.Shell,\n TopAbs_SOLID: topods.Solid,\n TopAbs_COMPOUND: topods.Compound,\n TopAbs_COMPSOLID: topods.CompSolid\n }\n self.topExp = TopExp_Explorer()",
"def __init__(self, shape: Tuple[int, int], spacing: float, asymmetric_grid: bool):\n cols, rows = shape\n super().__init__(\n CalibrationTargetType.CircleGrid,\n rows,\n cols,\n spacing=spacing,\n asymmetric_grid=asymmetric_grid,\n )",
"def __init__(self, posn : Point, w : float, h: float):\n self.corner = posn\n self.width = w\n self.height = h",
"def __init__(self, posn, w, h, c):\r\n self.corner = posn\r\n self.width = w\r\n self.height = h\r\n self.collision = c",
"def __init__( self , wingspan = 1.0 ):\n OGLDrawable.__init__( self ) # ------------------------- Parent class init\n \n # 1. Calc and set geo\n length = wingspan / 2.0\n fuseLen = length / 2.0\n depth = fuseLen / 2.0\n \n fuseHalf = fuseLen / 2.0\n dpthHalf = depth / 2.0\n wingHalf = wingspan / 2.0\n \n front = [ fuseHalf , 0.0 , 0.0 ]\n bottom = [ 0.0 , 0.0 , -dpthHalf ]\n back = [ -fuseHalf , 0.0 , 0.0 ]\n top = [ 0.0 , 0.0 , dpthHalf ]\n rghtWTip = [ -length+fuseHalf , -wingHalf , 0.0 ]\n leftWTip = [ -length+fuseHalf , wingHalf , 0.0 ]\n \n self.set_verts( [ front , bottom , back , top , rghtWTip , leftWTip ] )\n # 0 , 1 , 2 , 3 , 4 , 5\n self.triangles = (\n 3 , 0 , 5 , # Top Front Left\n 2 , 3 , 5 , # Top Back Left\n 0 , 1 , 5 , # Bottom Front Left\n 1 , 2 , 5 , # Bottom Back Left\n \n 0 , 3 , 4 , # Top Front Right\n 3 , 2 , 4 , # Top Back Right\n 1 , 0 , 4 , # Bottom Front Right\n 2 , 1 , 4 , # Bottom Back Right\n )\n \n # 2. Set color\n # FIXME",
"def __init__(self,x_size=32,y_size=32):\n\n self._shape = (x_size,y_size)\n self._offset = (0,0)\n self._chain_offset = 0 \n self._transform_function = None",
"def __init__(self, *args):\n _ShapeBuild.ShapeBuild_ReShape_swiginit(self,_ShapeBuild.new_ShapeBuild_ReShape(*args))",
"def __init__(self, shape, r=2, d=-1):\n self.radius = r\n if d == -1:\n self.stride = 2*r+1\n else:\n self.stride = d\n self.image_shape = shape\n self.patch_shape = ( r*2+1, 2*r+1 )",
"def __init__(self, pen, square_side_size, squares=None):\n self.border_color = (128, 101, 23)\n self.square_dark = (188, 100, 75)\n self.square_light = (255, 255, 255)\n self.not_select_color = (0, 0, 0)\n self.select_color = (0, 0, 255)\n self.pen = pen\n self.next_square = square_side_size + 1\n self.board_side = square_side_size * 8 + 7\n self.board_top_y = self.next_square * 4\n self.board_lft_x = self.next_square * -4\n self.square_side_size = square_side_size\n self.border_size = square_side_size * 1.2\n if squares is not None:\n self.squares = squares\n else:\n self.squares = [[None for _ in range(8)] for _ in range(8)]",
"def get_shape(self, name):\n\n if name == \"circle\":\n return Circle(random.randint(1, 10))\n\n elif name == \"square\":\n return Square(random.randint(1, 10))\n\n elif name == \"rectangle\":\n return Rectangle(random.randint(1, 10), random.randint(1, 10))",
"def __init_accessors (self):\n self.colors = ay.utils.Colors\n self.layout = Layout(self.seed)\n self.shapes = Shapes",
"def __init__(self, rings=False, branches=False):\n self.rings = rings\n self.branches = branches"
]
| [
"0.6991661",
"0.6827439",
"0.6623674",
"0.6510729",
"0.6309907",
"0.6284515",
"0.6269464",
"0.6269464",
"0.62600565",
"0.6234398",
"0.61847866",
"0.6112943",
"0.6112943",
"0.61017305",
"0.6051707",
"0.60218245",
"0.599573",
"0.5987703",
"0.59692216",
"0.59690464",
"0.5945936",
"0.59449863",
"0.5944288",
"0.59369975",
"0.5932635",
"0.5926892",
"0.5922044",
"0.59102863",
"0.58913004",
"0.58861405"
]
| 0.6911856 | 1 |
A high level description of the cell with global identifier gid. For example the morphology, synapses and ion channels required to build a multicompartment neuron. | def cell_description(self, gid):
tree = arbor.segment_tree()
tree.append(
arbor.mnpos,
arbor.mpoint(0, 0, 0, self.radius),
arbor.mpoint(self.length, 0, 0, self.radius),
tag=1,
)
labels = arbor.label_dict({"cable": "(tag 1)", "start": "(location 0 0)"})
decor = (
arbor.decor()
.set_property(Vm=self.Vm, cm=self.cm, rL=self.rL)
.paint('"cable"', arbor.density(f"pas/e={self.Vm}", g=self.g))
.place(
'"start"',
arbor.iclamp(
self.stimulus_start, self.stimulus_duration, self.stimulus_amplitude
),
"iclamp",
)
)
policy = arbor.cv_policy_max_extent(self.cv_policy_max_extent)
decor.discretization(policy)
return arbor.cable_cell(tree, decor, labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n\n return \"\\n<Grid ID: {} Name: {}>\".format(self.grid_id,\n self.grid_name)",
"def __repr__(self):\n\n return \"\\n Glitz ID: {} Title: {}\\n\".format(self.glitz_id,\n self.title)",
"def gid(self):\n return safeInt(self.tag(\"gid\"))",
"def global_replication_group_description(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"global_replication_group_description\")",
"def description(self):\n retstr = \"GNP with \" + str(self.n) + \" nodes and p = \" + str(self.p)\n return retstr",
"def _get_grid_representations(self):\n\n representation = '-----------Loading Sequence----------------------------------------------------------------\\n'\n for row in self.grid:\n for col in row:\n if col == -1:\n representation += 'X\\t'\n elif col == 0:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n representation += '-----------VehicleType--------------------------------------------------------------------\\n'\n for row in self.grid_vehicle_type:\n for col in row:\n if col == -2:\n representation += 'X\\t'\n elif col == -1:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n representation += '-----------Destination--------------------------------------------------------------------\\n'\n for row in self.grid_destination:\n for col in row:\n if col == -1:\n representation += 'X\\t'\n elif col == 0:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n return representation",
"def showId(self):\n #Here I'm supposing that the name of the table, and the extent polygon gives a unique mapping.\n try:\n extent = self.geometry.extent\n name = self.grid_name\n res = self.dArea\n string = \"%s:%s:%s:%s\" %(self.parent_id,name,extent,res)\n return string\n except:\n logger.error(\"[biospatial.gbif.taxonomy.GriddedTaxonomy] \\n The total geometry area has not been defined. Try running mergeGeometries first\")\n raise Exception(\"Geometry Extent has not been instantiated\")\n return None",
"def label(self):\n G = self.__f.group()\n if is_Gamma0(G):\n group = ''\n elif is_Gamma1(G):\n group = 'G1'\n elif is_GammaH(G):\n group = 'GH[' + ','.join([str(z) for z in G._generators_for_H()]) + ']'\n return '%s%s%s'%(self.level(), cremona_letter_code(self.factor_number()), group)",
"def __str__(self):\n return str(self._grid)",
"def _repr_html_(self):\n return (\n f'<b>GalaxyCluster:</b> {self.unique_id} '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'<br>> <b>with columns:</b> {self._str_colnames()}'\n f'<br>> {len(self.galcat)} source galaxies'\n f'<br>{self.galcat._html_table()}'\n )",
"def global_replication_group_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"global_replication_group_description\")",
"def global_replication_group_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"global_replication_group_description\")",
"def getGraphicalObjectId(self):\n return _libsbml.TextGlyph_getGraphicalObjectId(self)",
"def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )",
"def __str__(self):\r\n out = (\r\n ' *** Grid dimensions ***\\n'\r\n ' Origin: ( {0.ox:f}, {0.oy:f}, {0.oz:f})\\n'\r\n ' Delta: ( {0.dx:f}, {0.dy:f}, {0.dz:f})\\n'\r\n ' Size: ( {0.lx:f}, {0.ly:f}, {0.lz:f})\\n'\r\n ' N: ( {0.nx:d}, {0.ny:d}, {0.nz:d})\\n'\r\n ' type: {0.gtype}\\n'\r\n ' points: {0.points}\\n'\r\n ' cells: {0.cells}\\n'\r\n ' name: {0.gname}\\n'\r\n ).format(self)\r\n\r\n return out",
"def showId(self):\n #Here I'm supposing that the name of the table, and the extent polygon gives a unique mapping.\n try:\n extent = self.biomeGeometry.extent\n name = \"tax\"\n res = self.biomeGeometry.area\n string = \"%s-%s:%s:%s\" %(name,self.gid,extent,res)\n return string\n except:\n logger.error(\"[biospatial.gbif.taxonomy.GriddedTaxonomy] \\n The total geometry area has not been defined. Try running mergeGeometries first\")\n raise Exception(\"Geometry Extent has not been instantiated\")\n return None",
"def __repr__(self, ):\n return '{}({})'.format(self.__class__.__name__, self.atlasID)",
"def __str__(self):\n return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(\n self.genes) + '; Fitness: ' + str(self.fitness_value))",
"def get_CG_id(gid, conn):\n\n get_CG = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'FlyBase Annotation IDs\\' AND '\n 'dx.accession NOT LIKE \\'%%-%%\\' AND fd.is_current = \\'t\\' AND f.uniquename = %s')\n CG_id = connect(get_CG,gid,conn)\n return(CG_id)",
"def board_game_geek_id(title):\n pass",
"def __repr__(self):\n return str(self.group)",
"def get_id(self):\n if self.mlat:\n return f'm{-self.mlat}_{self.mlng}'\n else:\n return f'{-self.clat}_{self.clng}'",
"def __str__(self):\n return str(self._cells)",
"def _cell_dimensions_string(self):\n return_str = ''\n return_str += '0.0 {:2.6f} xlo xhi\\n0.0 {:2.6f} ylo yhi\\n0.0 {:2.6f} zlo zhi\\n\\n'.format(*self.cell_lengths)\n return_str += '{:2.5f} {:2.5f} {:2.5f} xy xz yz \\n\\n'.format(*self.tilt_factors)\n \n return return_str",
"def G0(self):\n if self.g0_ref is None:\n return self.g0\n return self.g0_ref.nid",
"def __str__(self):\n s = 'Gene: ' + self._dna[:6] + '...' + self._dna[-6:] + \\\n ', length=%d' % len(self._dna)\n if self._exons is not None:\n s += ', %d exon regions' % len(self._exons)\n return s",
"def G(self):\n return self._properties['G']",
"def __str__(self):\r\n string_rep_of_grid=\"\"\r\n row=\"\"\r\n for dummy_j in range(self._height):\r\n for dummy_i in range(self._width):\r\n row=row+str(self._grid[dummy_j][dummy_i])+\" \" \r\n string_rep_of_grid=string_rep_of_grid+\"row number \"+str(dummy_j)+\": \"+row\r\n row=\"\"\r\n return string_rep_of_grid",
"def render(self):\n print(self._get_grid_representations())",
"def __str__(self):\r\n \r\n #return \"The 2048 board is \" + str(self._cells)\r\n string = \"\"\r\n for row in range(self._grid_height):\r\n for column in range(self._grid_width):\r\n if column == self._grid_width -1:\r\n string += str(self._cells[row][column]) + \"\\n\"\r\n else:\r\n string += str(self._cells[row][column]) +\", \"\r\n return \"The 2048 board is \"+ str(self._grid_height) + \"x\" + str(self._grid_width) + \" and contains: \" + \"\\n\" + string"
]
| [
"0.62961143",
"0.59455574",
"0.59399664",
"0.58813816",
"0.58670366",
"0.5848497",
"0.58048916",
"0.57531345",
"0.5731641",
"0.5647178",
"0.55872154",
"0.55872154",
"0.55754495",
"0.5572215",
"0.55627483",
"0.55473393",
"0.553182",
"0.55220306",
"0.54978687",
"0.5445748",
"0.54125583",
"0.5396554",
"0.5387145",
"0.538394",
"0.5379921",
"0.5376174",
"0.5369299",
"0.5360084",
"0.5346781",
"0.53221035"
]
| 0.7032956 | 0 |
Verify LMBiSeNet.post_process() is the same as Bilinear and Softmax post process | def test_lm_bisenet_post_process():
tf.InteractiveSession()
image_size = [96, 64]
batch_size = 2
classes = Camvid.classes
data_format = "NHWC"
model = LMBiSeNet(
image_size=image_size,
batch_size=batch_size,
classes=classes,
data_format=data_format,
)
post_process = Sequence([
Bilinear(
size=image_size,
data_format=data_format,
compatible_tensorflow_v1=True,
),
Softmax()
])
shape = (batch_size, image_size[0]//8, image_size[1]//8, len(classes))
np_output = np.random.uniform(-10., 10., size=shape).astype(np.float32)
output = tf.constant(np_output)
output = model.post_process(output)
expected = post_process(outputs=np_output)["outputs"]
assert np.allclose(output.eval(), expected, atol=1e-5, rtol=1e-5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_processing(conf_thresh, nms_thresh, output):\n # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]\n # num_anchors = 9\n # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n # strides = [8, 16, 32]\n # anchor_step = len(anchors) // num_anchors\n\n # [batch, num, 1, 4]\n box_array = output[0]\n # [batch, num, num_classes]\n confs = output[1]\n\n if type(box_array).__name__ != \"ndarray\":\n box_array = box_array.cpu().detach().numpy()\n confs = confs.cpu().detach().numpy()\n\n num_classes = confs.shape[2]\n\n # [batch, num, 4]\n box_array = box_array[:, :, 0]\n\n # [batch, num, num_classes] --> [batch, num]\n max_conf = np.max(confs, axis=2)\n max_id = np.argmax(confs, axis=2)\n\n bboxes_batch = []\n for batch in range(box_array.shape[0]):\n\n argwhere = max_conf[batch] > conf_thresh\n l_box_array = box_array[batch, argwhere, :]\n l_max_conf = max_conf[batch, argwhere]\n l_max_id = max_id[batch, argwhere]\n\n bboxes = []\n # nms for each class\n for cls_id in range(num_classes):\n\n cls_argwhere = l_max_id == cls_id\n ll_box_array = l_box_array[cls_argwhere, :]\n ll_max_conf = l_max_conf[cls_argwhere]\n ll_max_id = l_max_id[cls_argwhere]\n\n keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)\n\n if keep.size > 0:\n ll_box_array = ll_box_array[keep, :]\n ll_max_conf = ll_max_conf[keep]\n ll_max_id = ll_max_id[keep]\n\n for box in range(ll_box_array.shape[0]):\n bboxes.append(\n [\n ll_box_array[box, 0],\n ll_box_array[box, 1],\n ll_box_array[box, 2],\n ll_box_array[box, 3],\n ll_max_conf[box],\n ll_max_conf[box],\n ll_max_id[box],\n ]\n )\n\n bboxes_batch.append(bboxes)\n\n return bboxes_batch",
"def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n # your code here\n # loop over each of the layer output (I guess the outs is the number of anchor boxes)\n for output in outs:\n # loop over each of the detection\n for detection in output:\n # extract the class ID and confidence of the current object detection\n # the detection is an array of [bx, by, bw, bh, Pc, c1, c2, ..., c80]\n # Pc is the probability that there is an object\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n \n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n \n classIds.append(classID)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n \n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n # your code here\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n \n # get the bounding bxoes after performing non maximum suppression\n # your code here\n output_boxes = []\n if len(idxs) > 0:\n for i in idxs.flatten(): # idxs = [[1],[2],[5],...], idxs.flatten() = [1,2,5,...]\n output_boxes.append(boxes[i])\n left = boxes[i][0]\n top = boxes[i][1]\n width = boxes[i][2]\n height = boxes[i][3]\n right = left + width\n bottom = top + height\n frame = self.drawPred(frame, classIds[i], confidences[i], left, top, right, bottom)\n \n output_image = frame\n return output_image, output_boxes",
"def get_optimal_postprocess(loaders=None, runner=None, logdir: str = \"\"):\n loaders[\"infer\"] = loaders[\"valid\"]\n\n runner.infer(\n model=runner.model,\n loaders=loaders,\n callbacks=[\n CheckpointCallback(resume=f\"{logdir}/checkpoints/best.pth\"),\n InferCallback(),\n ],\n )\n valid_masks = []\n probabilities = np.zeros((2220, 350, 525))\n for i, (batch, output) in enumerate(\n zip(loaders[\"infer\"].dataset, runner.callbacks[0].predictions[\"logits\"])\n ):\n image, mask = batch\n for m in mask:\n if m.shape != (350, 525):\n m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n valid_masks.append(m)\n\n for j, probability in enumerate(output):\n if probability.shape != (350, 525):\n probability = cv2.resize(\n probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR\n )\n probabilities[i * 4 + j, :, :] = probability\n\n class_params = {}\n for class_id in range(4):\n print(class_id)\n attempts = []\n for t in range(0, 100, 10):\n t /= 100\n for ms in [\n 0,\n 100,\n 1000,\n 5000,\n 10000,\n 11000,\n 14000,\n 15000,\n 16000,\n 18000,\n 19000,\n 20000,\n 21000,\n 23000,\n 25000,\n 27000,\n 30000,\n 50000,\n ]:\n masks = []\n for i in range(class_id, len(probabilities), 4):\n probability = probabilities[i]\n predict, num_predict = post_process(sigmoid(probability), t, ms)\n masks.append(predict)\n\n d = []\n for i, j in zip(masks, valid_masks[class_id::4]):\n if (i.sum() == 0) & (j.sum() == 0):\n d.append(1)\n else:\n d.append(dice(i, j))\n\n attempts.append((t, ms, np.mean(d)))\n\n attempts_df = pd.DataFrame(attempts, columns=[\"threshold\", \"size\", \"dice\"])\n\n attempts_df = attempts_df.sort_values(\"dice\", ascending=False)\n print(attempts_df.head())\n best_threshold = attempts_df[\"threshold\"].values[0]\n best_size = attempts_df[\"size\"].values[0]\n\n class_params[class_id] = (best_threshold, int(best_size))\n\n print(class_params)\n return class_params",
"def post_processing(\n cfg: CfgNode, y: torch.Tensor, orig_img_size: torch.Tensor, transformed_labels: torch.Tensor\n) -> Tuple[Tuple[List[np.array], List[np.array]], float]:\n post_processing_start_time = time.time()\n pruned_preds_batch = post_process_prediction(y, orig_img_size, cfg)\n post_processing_end_time = time.time()\n processed_labels_batch = post_process_labels(transformed_labels, orig_img_size, cfg)\n\n return (pruned_preds_batch, processed_labels_batch), (post_processing_end_time - post_processing_start_time)",
"def test_reverse_sigmoid_beta(self):\n (_, _), (x_test, _) = self.mnist\n classifier = get_image_classifier_kr_tf()\n preds = classifier.predict(x_test[0:1])\n postprocessor = ReverseSigmoid(beta=0.75, gamma=0.1)\n post_preds = postprocessor(preds=preds)\n\n classifier_prediction_expected = np.asarray(\n [\n [\n 0.12109935,\n 0.0498215,\n 0.0993958,\n 0.06410096,\n 0.11366928,\n 0.04645343,\n 0.06419807,\n 0.30685693,\n 0.07616714,\n 0.05823757,\n ]\n ],\n dtype=np.float32,\n )\n post_classifier_prediction_expected = np.asarray(\n [\n [\n 0.1097239,\n 0.07264659,\n 0.09752058,\n 0.07914664,\n 0.10549247,\n 0.07124537,\n 0.07919333,\n 0.22350204,\n 0.08514594,\n 0.07638316,\n ]\n ],\n dtype=np.float32,\n )\n\n np.testing.assert_array_almost_equal(preds, classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(post_preds, post_classifier_prediction_expected, decimal=4)",
"def post_process(self, outputs, source_image_shape, conf_thres=0.5, image=None):\n scaled = []\n grids = []\n for out in outputs:\n out = self.sigmoid_v(out)\n _, _, width, height, _ = out.shape\n grid = self.make_grid(width, height)\n grids.append(grid)\n scaled.append(out)\n z = []\n for out, grid, stride, anchor in zip(scaled, grids, self.strides, self.anchor_grid):\n _, _, width, height, _ = out.shape\n out[..., 0:2] = (out[..., 0:2] * 2. - 0.5 + grid) * stride\n out[..., 2:4] = (out[..., 2:4] * 2) ** 2 * anchor\n out = out.reshape((1, -1, self.feature_count))\n z.append(out)\n pred = np.concatenate(z, 1)\n xc = pred[..., 4] > conf_thres\n pred = pred[xc]\n boxes, scores, cids = self.nms(pred)\n\n # Normalise box coordinates to be in the range (0, 1]\n h, w = source_image_shape[:2]\n h1, w1 = h, w\n if self.keep_ratio and h != w:\n # Padding was used during pre-process to make the source image square\n h1 = w1 = max(h, w)\n\n y_scale = h1 / float(self.input_size) / h\n x_scale = w1 / float(self.input_size) / w\n boxes[:, 0] *= x_scale\n boxes[:, 1] *= y_scale\n boxes[:, 2] *= x_scale\n boxes[:, 3] *= y_scale\n boxes = np.clip(boxes, 0, 1)\n\n if image is not None:\n self.draw_cv2(image, boxes, scores, cids)\n\n return (boxes, scores, cids), image",
"def test_reverse_sigmoid_beta_binary(self):\n (_, _), (x_test, _) = self.mnist\n classifier = get_image_classifier_kr_tf_binary()\n preds = classifier.predict(x_test[0:1])\n postprocessor = ReverseSigmoid(beta=0.75, gamma=0.1)\n post_preds = postprocessor(preds=preds)\n\n classifier_prediction_expected = np.asarray([[0.5301345]], dtype=np.float32)\n post_classifier_prediction_expected = np.asarray([[0.5278717]], dtype=np.float32)\n\n np.testing.assert_array_almost_equal(preds, classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(post_preds, post_classifier_prediction_expected, decimal=4)",
"def _postprocess(img):\n img = _scale_to_zero_one(img)\n img = img.reshape(1, -1) # to avoid a scikit-learn deprecation warning later\n return img",
"def _postprocess(self, M, postprocess):\n reshaped = False\n if self.labels_.ndim == 1:\n reshaped = True\n # promote it to a matrix with 1 row\n self.labels_.shape = (1, self.labels_.shape[0])\n start_partition = 0\n else:\n # duplicate the 1st row (create the \"0\"-partition that will\n # not be postprocessed):\n self.labels_ = np.vstack((self.labels_[0,:], self.labels_))\n start_partition = 1 # do not postprocess the \"0\"-partition\n\n self.is_noise_ = (self.labels_[0,:] < 0)\n\n # postprocess labels, if requested to do so\n if M == 1 or postprocess == \"none\":\n pass\n elif postprocess == \"boundary\":\n for i in range(start_partition, self.labels_.shape[0]):\n self.labels_[i,:] = internal.merge_boundary_points(\n self._mst_ind_, self.labels_[i,:],\n self._nn_ind_, M)\n elif postprocess == \"all\":\n for i in range(start_partition, self.labels_.shape[0]):\n self.labels_[i,:] = internal.merge_noise_points(\n self._mst_ind_, self.labels_[i,:])\n\n if reshaped:\n self.labels_.shape = (self.labels_.shape[1],)",
"def test_reverse_sigmoid_binary(self):\n (_, _), (x_test, _) = self.mnist\n classifier = get_image_classifier_kr_tf_binary()\n preds = classifier.predict(x_test[0:1])\n postprocessor = ReverseSigmoid(beta=1.0, gamma=0.1)\n post_preds = postprocessor(preds=preds)\n\n classifier_prediction_expected = np.asarray([[0.5301345]], dtype=np.float32)\n post_classifier_prediction_expected = np.asarray([[0.52711743]], dtype=np.float32)\n\n np.testing.assert_array_almost_equal(preds, classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(post_preds, post_classifier_prediction_expected, decimal=4)",
"def validate(net, val_data, ctx, eval_metric, size, args):\n clipper = gcv.nn.bbox.BBoxClipToImage()\n eval_metric.reset()\n # set nms threshold and topk constraint\n net.set_nms(nms_thresh=0.45, nms_topk=400)\n mx.nd.waitall()\n net.hybridize()\n\n with tqdm(total=size) as pbar:\n for ib, batch in enumerate(val_data):\n # if(ib >= 200):\n # break\n batch = split_and_load(batch, ctx_list=ctx)\n det_bboxes = []\n det_ids = []\n det_scores = []\n det_coefs = []\n det_infos = []\n\n for x, im_info in zip(*batch):\n # get prediction results\n t1 = time.time()\n ids, scores, bboxes, coefs = net(x)\n t2 = time.time()\n det_bboxes.append(clipper(bboxes, x))\n det_ids.append(ids)\n det_scores.append(scores)\n det_coefs.append(coefs)\n det_infos.append(im_info)\n\n # update metric\n for det_bbox, det_id, det_score, def_coef, det_info in zip(det_bboxes, det_ids, det_scores, det_coefs, det_infos):\n for i in range(det_info.shape[0]):\n # numpy everything\n det_bbox = det_bbox[i].asnumpy()\n det_id = det_id[i].asnumpy()\n det_score = det_score[i].asnumpy()\n def_coef = def_coef[i].asnumpy()\n det_info = det_info[i].asnumpy()\n # filter by conf threshold\n im_height, im_width = det_info\n valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]\n det_id = det_id[valid]\n det_score = det_score[valid]\n # To bbox of original img size\n det_bbox = det_bbox[valid]\n det_bbox[:, 0] *= (im_width / 416.0)\n det_bbox[:, 2] *= (im_width / 416.0)\n det_bbox[:, 1] *= (im_height / 416.0)\n det_bbox[:, 3] *= (im_height / 416.0)\n\n def_coef = def_coef[valid]\n eval_metric.update(det_bbox, det_id, det_score, def_coef, int(im_height), int(im_width))\n\n pbar.update(len(ctx))\n\n return eval_metric.get()",
"def postprocess(self, predicted_output, original_input=None, stats=None,\n **kwargs):\n pass",
"def postprocess(frame, outs, save_image=False):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # non maximum suppression to eliminate redundant overlapping boxes with lower confidences\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\n for i in indices:\n i = i[0]\n # Skip classes that aren't people\n if classIds[i] != 0:\n continue\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n if save_image:\n # Save cropped image of detected object\n class_name = classes[classIds[i]]\n dimensions = (top, top + height, left, left + width)\n utils.write_image(frame, \"output/yolo\", class_name, dimensions)\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)",
"def postprocess(m):\n logger.info(\"Postprocessing the model...\")\n while len(m.graph.value_info) > 0:\n m.graph.value_info.pop()\n m = other.polish_model(m)\n eliminating.eliminate_single_input_Concat(m.graph)\n eliminating.eliminate_nop_Maxpool_and_AveragePool(m.graph)\n eliminating.eliminate_trivial_elementwise_calculation(m.graph)\n m = other.polish_model(m)\n\n replacing.replace_depthwise_1x1_with_bn(m.graph)\n m = other.polish_model(m)\n\n # removing transpose\n m = removing_transpose.eliminate_transposes(m)\n m = other.polish_model(m)\n removing_transpose.remove_trivial_transpose(m.graph)\n removing_transpose.fuse_Transpose_into_Gemm_weight(m.graph)\n\n # fuse some nodes\n fusing.fuse_mul_and_add_into_bn(m.graph)\n m = other.polish_model(m)\n fusing.fuse_mul_and_add_into_gemm(m.graph)\n m = other.polish_model(m)\n fusing.fuse_conv_and_add_into_conv(m.graph)\n m = other.polish_model(m)\n replacing.replace_mul_to_bn(m.graph)\n replacing.replace_div_to_bn(m.graph)\n replacing.replace_add_to_bn(m.graph)\n replacing.replace_sub_to_bn(m.graph)\n replacing.replace_sub_with_bn_and_add(m.graph)\n m = other.polish_model(m)\n\n other.add_output_to_value_info(m.graph)\n m = optimizer.optimize(m, [\"eliminate_deadend\"])\n m.producer_name = \"kneron_formatter\"\n return m",
"def postprocess_img(img):\n img = img.transpose((1, 2, 0))\n img += 1.0\n img = (img * 128.0).astype(np.uint8)\n return img",
"def PostProcess(EMWRaw,muList):\n KeepIt = np.ones(NREP,dtype=bool)\n for res_b in EMWRaw:\n for (i,res_rep) in enumerate(res_b):\n if np.isnan(res_rep).any():\n KeepIt[i] = False\n\n dxdparam_2 = (muList[1:,:,:,0] - muList[:-1,:,:,0]) / np.diff(paramgrid).reshape(len(paramgrid)-1,1,1)\n\n\n EMW = np.zeros((len(paramgrid)-1,NCOMP))\n EdWdxEdxdparam = np.zeros((len(paramgrid)-1,NCOMP))\n Cov = np.zeros((len(paramgrid)-1,NCOMP))\n Cov_2 = np.zeros((len(paramgrid)-1))\n std_dWdx = np.zeros((len(paramgrid)-1))\n std_dxdparam = np.zeros((len(paramgrid)-1))\n for (i,res_b) in enumerate(EMWRaw):\n u = muList[i][KeepIt,:,1]\n IIdWdx = res_b[KeepIt,:-1,:]\n dxdparam = res_b[KeepIt,-1,:]\n dxdparam_2i = dxdparam_2[i,KeepIt,:]\n tmp = np.ones((dxdparam.shape[0],NCOMP,dxdparam.shape[1])) # don't multiply the insurance and incentives terms against dxdparam\n tmp[:,2:,:] = np.tile(np.reshape(dxdparam,(dxdparam.shape[0],1,dxdparam.shape[1])),(1,NCOMP-2,1))\n EMW[i,:] = (IIdWdx * tmp * beta**np.arange(Tsim)).sum(axis=2).sum(axis=0)/ KeepIt.sum()\n EdWdxEdxdparam[i,:] = (IIdWdx.sum(axis=0)/ KeepIt.sum() * tmp.sum(axis=0)/ KeepIt.sum() * beta**np.arange(Tsim)).sum(axis=1)\n for t in range(Tsim):\n std_dWdx[i] += beta**t * np.std(IIdWdx[:,2:,t].sum(axis=1))\n std_dxdparam[i] += beta**t * np.std(dxdparam[:,t])\n for j in range(2,NCOMP):\n Cov[i,j] += beta**t * np.cov(IIdWdx[:,j,t].flatten(),dxdparam[:,t].flatten())[0,1]\n Cov_2 += beta**t * np.cov(IIdWdx[:,2:,t].sum(axis=1), dxdparam_2i[:,t].flatten() )[0,1]\n\n return EMW, EdWdxEdxdparam, Cov, std_dWdx, std_dxdparam, Cov_2",
"def post_process(dtw_threshold, fidx, TESTSET_NAME):\n\n data_dir = str(Path(os.getcwd()).parent) + '/data/{}/'.format(TESTSET_NAME)\n X_test = np.load(data_dir+'/processed_dataset/scaled_ppgs.npy')\n y_seg_trues = np.load(data_dir+'/processed_dataset/seg_labels.npy')\n\n working_dir = 'results/{}/{}/dtw_thresh_{}/'.format(TESTSET_NAME, fidx, dtw_threshold)\n\n check_mkdir('results/{}'.format(TESTSET_NAME))\n check_mkdir('results/{}/{}/'.format(TESTSET_NAME, fidx))\n check_mkdir(working_dir)\n\n pool_args = []\n for row in X_test:\n pool_args.append([row, dtw_threshold, fidx])\n pool = Pool(processes=8)\n y_seg_preds = pool.starmap(make_predictions, pool_args)\n pool.terminate()\n\n y_seg_preds = np.asarray(y_seg_preds)\n np.save(working_dir+'/y_pred_{}.npy'.format(dtw_threshold), y_seg_preds)\n np.save(working_dir+'/y_true_{}.npy'.format(dtw_threshold), y_seg_trues)",
"def post_process_prediction(y: torch.Tensor, orig_img_sizes: torch.Tensor, cfg: CfgNode) -> List[np.array]:\n output = torch.cat(y, axis=1).float()\n predictions = nms(output, cfg.inference.iou_threshold, cfg.inference.class_conf_threshold)\n scaled_preds = []\n for i, pred in enumerate(predictions):\n if pred is None:\n continue\n pred = pred.detach().numpy()\n pred = xyxy_to_xywh(pred)\n orig_img_size = orig_img_sizes[i]\n scaled_pred = scale_boxes_to_orig(pred, orig_img_size[1], orig_img_size[0], cfg.model.image_size)\n scaled_preds.append(scaled_pred)\n return scaled_preds",
"def postprocess(image: np.ndarray, results_list: list, threshold_confidence: float, threshold_nms: float) -> list:\n frameHeight = image.shape[0]\n frameWidth = image.shape[1]\n\n # Scan through all the bounding boxes output from the network and..\n # 1. keep only the ones with high confidence scores.\n # 2. assign the box class label as the class with the highest score.\n # 3. construct a list of bounding boxes, class labels and confidence scores\n\n classIds = []\n confidences = []\n boxes = []\n for result in results_list:\n for detection in result:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > threshold_confidence:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = max(0, int(center_x - width / 2))\n top = max(0, int(center_y - height / 2))\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences\n list_of_tuples = []\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, threshold_confidence, threshold_nms)\n for i in indices:\n i = i[0]\n list_of_tuples.append((classIds[i], confidences[i], boxes[i]))\n # return post processed lists of classIds, confidences and bounding boxes\n return list_of_tuples",
"def pnet_process(self, image, height, width):\n image = cv2.resize(image, (width, height)).astype(np.float32)\n image[:, :, 0] -= self.mean[0]\n image[:, :, 1] -= self.mean[1]\n image[:, :, 2] -= self.mean[2]\n image *= self.scale_factor\n image = np.transpose(image, (2, 0, 1))\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n return image.copy()",
"def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):\n keep_by_nms = batched_nms(\n boxes=mask_boxes.float(),\n scores=iou_scores,\n idxs=torch.zeros(mask_boxes.shape[0]),\n iou_threshold=amg_crops_nms_thresh,\n )\n\n iou_scores = iou_scores[keep_by_nms]\n rle_masks = [rle_masks[i] for i in keep_by_nms]\n mask_boxes = mask_boxes[keep_by_nms]\n masks = [_rle_to_mask(rle) for rle in rle_masks]\n\n return masks, iou_scores, rle_masks, mask_boxes",
"def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)",
"def do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture,sigma_min,sigma_max, downsample_value):\n if np.ndim(img)==3:\n features = extract_features(\n img,\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n\n if mask is None:\n raise ValueError(\"If no classifier clf is passed, you must specify a mask.\")\n training_data = features[:, mask > 0].T\n\n training_data = memmap_feats(training_data)\n\n training_labels = mask[mask > 0].ravel()\n\n training_data = training_data[::downsample_value]\n training_labels = training_labels[::downsample_value]\n\n lim_samples = 100000 #200000\n\n if training_data.shape[0]>lim_samples:\n logging.info('Number of samples exceeds %i'% lim_samples)\n ind = np.round(np.linspace(0,training_data.shape[0]-1,lim_samples)).astype('int')\n training_data = training_data[ind,:]\n training_labels = training_labels[ind]\n logging.info('Samples have been subsampled')\n logging.info('Number of samples in training data: %i' % (training_data.shape[0]))\n print(training_data.shape)\n\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Initializing MLP model')\n\n clf.fit(training_data, training_labels)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('MLP model fit to data')\n\n del training_data, training_labels\n\n logging.info('Create and memory map model input data')\n\n data = features[:, mask == 0].T\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n data = memmap_feats(data)\n logging.info('Memory mapped model input data')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n labels = clf.predict(data)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Model used on data to estimate labels')\n\n if mask is None:\n result = labels.reshape(img.shape[:2])\n result2 = result.copy()\n else:\n result = np.copy(mask)#+1\n result[mask == 0] = labels\n del labels, mask\n result2 = result.copy()\n del result\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF feature extraction and model fitting complete')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2",
"def _post_process(self, X, y, check_input=False):\n if check_input:\n X, y = check_X_y(X, y)\n\n n_samples = X.shape[0]\n y_matrix = np.empty((n_samples, self.n_classes_, self.n_estimators))\n original_y = y\n\n for i in xrange(self.n_estimators):\n y_matrix[:,:,i] = self._tree_predict_proba(i, X)\n\n self.coef_ = np.empty((self.n_estimators, self.n_classes_))\n self.intercept_ = np.empty((self.n_classes_,))\n \n self.post_model.set_params(alpha=self.alpha)\n for k in xrange(self.n_classes_):\n if self.n_classes_ > 2:\n y = np.array(original_y == k, dtype=np.float64)\n self.post_model.fit(y_matrix[:,k,:], y)\n self.coef_[:,k] = self.post_model.coef_\n self.intercept_[k] = self.post_model.intercept_\n\n self.coef_[np.abs(self.coef_) < EPS] = 0.0\n\n return self",
"def post_process_stage1(self, pred):\n return (pred>0)*1",
"def postprocess(self, images):\n if not isinstance(images, np.ndarray):\n raise ValueError(f'Images should be with type `numpy.ndarray`!')\n\n if images.ndim != 4 or images.shape[1] != self.image_channels:\n raise ValueError(f'Input should be with shape [batch_size, channel, '\n f'height, width], where channel equals to '\n f'{self.image_channels}!\\n'\n f'But {images.shape} is received!')\n images = (images - self.min_val) * 255 / (self.max_val - self.min_val)\n images = np.clip(images + 0.5, 0, 255).astype(np.uint8)\n images = images.transpose(0, 2, 3, 1)\n if self.image_channels == 3 and self.channel_order == 'BGR':\n images = images[:, :, :, ::-1]\n\n return images",
"def det_post_process(params: Dict[Any, Any], cls_outputs: Dict[int, tf.Tensor],\n box_outputs: Dict[int, tf.Tensor], scales: List[float]):\n if params.get('combined_nms', None):\n # Use combined version for dynamic batch size.\n nms_boxes, nms_scores, nms_classes, _ = postprocess.postprocess_combined(\n params, cls_outputs, box_outputs, scales)\n else:\n nms_boxes, nms_scores, nms_classes, _ = postprocess.postprocess_global(\n params, cls_outputs, box_outputs, scales)\n\n batch_size = tf.shape(cls_outputs[params['min_level']])[0]\n img_ids = tf.expand_dims(\n tf.cast(tf.range(0, batch_size), nms_scores.dtype), -1)\n detections = [\n img_ids * tf.ones_like(nms_scores),\n nms_boxes[:, :, 0],\n nms_boxes[:, :, 1],\n nms_boxes[:, :, 2],\n nms_boxes[:, :, 3],\n nms_scores,\n nms_classes,\n ]\n return tf.stack(detections, axis=-1, name='detections')",
"def postprocess(pb, A, mat, solutions, results, primaldual):\n tim = Timer(name='postprocessing')\n print('\\npostprocessing')\n matrices = {}\n for pp in pb.postprocess:\n if pp['kind'] in ['GaNi', 'gani']:\n order_name = ''\n Nname = ''\n if A.name is not 'A_GaNi':\n A = mat.get_A_GaNi(pb.solve['N'], primaldual)\n\n elif pp['kind'] in ['Ga', 'ga']:\n if 'order' in pp:\n Nbarpp = 2*pb.solve['N'] - 1\n if pp['order'] is None:\n Nname = ''\n order_name = ''\n A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual,\n order=pp['order'])\n else:\n order_name = '_o' + str(pp['order'])\n Nname = '_P%d' % np.mean(pp['P'])\n A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual,\n order=pp['order'], P=pp['P'])\n else:\n order_name = ''\n Nname = ''\n else:\n ValueError()\n\n name = 'AH_%s%s%s_%s' % (pp['kind'], order_name, Nname, primaldual)\n print('calculated: ' + name)\n\n AH = assembly_matrix(A, solutions)\n\n if primaldual is 'primal':\n matrices[name] = AH\n else:\n matrices[name] = np.linalg.inv(AH)\n tim.measure()\n\n pb.output.update({'sol_' + primaldual: solutions,\n 'res_' + primaldual: results,\n 'mat_' + primaldual: matrices})",
"def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):\n keep_by_nms = tf.image.combined_non_max_suppression(\n boxes=mask_boxes.float(),\n scores=iou_scores,\n idxs=torch.zeros(mask_boxes.shape[0]),\n iou_threshold=amg_crops_nms_thresh,\n )\n\n iou_scores = iou_scores[keep_by_nms]\n rle_masks = [rle_masks[i] for i in keep_by_nms]\n mask_boxes = mask_boxes[keep_by_nms]\n masks = [_rle_to_mask(rle) for rle in rle_masks]\n\n return masks, iou_scores, rle_masks, mask_boxes",
"def validate(net, val_data, val_items, val_shapes, ctx, size, classes):\n clipper = gcv.nn.bbox.BBoxClipToImage()\n net.hybridize(static_alloc=True)\n print(\"---Detect Total {:d} Image Start.---\".format(len(val_items))) \n\n result_dict = {}\n for ib, (batch, item) in enumerate(zip(val_data, val_items)):\n batch = split_and_load(batch, ctx_list=ctx)\n for x, y, im_scale in zip(*batch):\n ids, scores, bboxes = net(x)\n bboxes = clipper(bboxes, x)\n im_scale = im_scale.reshape((-1)).asscalar()\n bboxes *= im_scale\n inds = nd.argsort(nd.squeeze(ids, axis=(0, 2)), is_ascend=False)\n ids = nd.squeeze(ids, axis=(0, 2)).asnumpy().astype(np.int8).tolist()\n valid_ids = [id for id in ids if id is not -1]\n valid_len = len(valid_ids)\n if valid_len > 0: # valid_len must > 0\n inds = nd.slice_axis(inds, begin=0, end=valid_len, axis=0)\n scores = nd.take(scores, inds, axis=1)\n bboxes = nd.take(bboxes, inds, axis=1)\n scores = scores.asnumpy()\n bboxes = bboxes.asnumpy()\n for i, id in enumerate(valid_ids):\n score = scores[:, i, 0][0]\n xmin, ymin, xmax, ymax = bboxes[:, i, 0][0], bboxes[:, i, 1][0], bboxes[:, i, 2][0], bboxes[:, i, 3][0] \n result_dict[id] = result_dict.get(id, []) + [[item, score, xmin, ymin, xmax, ymax]]\n print(\"Detect Image {:s} Done.\".format(item))\n print(\"---Detect Total {:d} Image Done.---\".format(len(val_items)))\n return result_dict"
]
| [
"0.5815606",
"0.5808166",
"0.58040047",
"0.575359",
"0.57129437",
"0.5690851",
"0.56780434",
"0.5647363",
"0.5629743",
"0.5564466",
"0.55191976",
"0.5496618",
"0.54881227",
"0.5456672",
"0.54489857",
"0.544301",
"0.53556955",
"0.5354755",
"0.5352226",
"0.53475475",
"0.5321695",
"0.5311544",
"0.53053826",
"0.52777636",
"0.52657163",
"0.5252113",
"0.5249137",
"0.52437574",
"0.52427346",
"0.52342564"
]
| 0.77813905 | 0 |
Custom Modules Menu for help | def menu_help(cls, **attr):
menu_help = MM("Help", c="default", f="help", **attr)(
MM("Contact us", f="contact"),
MM("About", f="about"),
MM(current.T("Ask MSW"), c="org", f="ask_msw"),
MM("spiegel.de", c="org", f="spiegel"),
)
return menu_help | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def module_help(self):\n print(\"Module \" + str(self))\n print(\"Available commands:\")\n for k in self.commands.values():\n n = k.get_name()\n if len(n) >= CONST_BINDCTL_HELP_INDENT_WIDTH:\n print(\" %s\" % n)\n print(textwrap.fill(k.get_desc(),\n initial_indent=\" \",\n subsequent_indent=\" \" +\n \" \" * CONST_BINDCTL_HELP_INDENT_WIDTH,\n width=70))\n else:\n print(textwrap.fill(\"%s%s%s\" %\n (k.get_name(),\n \" \"*(CONST_BINDCTL_HELP_INDENT_WIDTH - len(k.get_name())),\n k.get_desc()),\n initial_indent=\" \",\n subsequent_indent=\" \" +\n \" \" * CONST_BINDCTL_HELP_INDENT_WIDTH,\n width=70))",
"def help(self):",
"def help(self):",
"def menu_python_library(self, event=None):\n self.parentPanel.python_help('lib')",
"def help(self):\n\t\treturn",
"def _help_menu(self):\n # Get a list of all method names\n names = self.get_names()\n\n # Remove any command names which are explicitly excluded from the help menu\n for name in self.exclude_from_help:\n names.remove(name)\n\n cmds_doc = []\n cmds_undoc = []\n help_dict = {}\n for name in names:\n if name[:5] == 'help_':\n help_dict[name[5:]] = 1\n names.sort()\n # There can be duplicates if routines overridden\n prevname = ''\n for name in names:\n if name[:3] == 'do_':\n if name == prevname:\n continue\n prevname = name\n command = name[3:]\n if command in help_dict:\n cmds_doc.append(command)\n del help_dict[command]\n elif getattr(self, name).__doc__:\n cmds_doc.append(command)\n else:\n cmds_undoc.append(command)\n self.poutput(\"%s\\n\" % str(self.doc_leader))\n self.print_topics(self.doc_header, cmds_doc, 15, 80)\n self.print_topics(self.misc_header, list(help_dict.keys()), 15, 80)\n self.print_topics(self.undoc_header, cmds_undoc, 15, 80)",
"def _helpmenu_about():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"About\")\n self.helpindex.geometry(\"500x300\")\n self.helpindex.label()",
"def help(self):\n pass",
"def help(self):\n pass",
"def get_menus():\n\n pass",
"def help():\n print(UI.HELP)",
"def __admin_menu(self):\n log.debug(\"Displaying __admin_menu\")\n self.menu = TelegramMenu(\"config/comunda_admin_menu.bpmn\", self, \"MenuStart\")\n self.menu.admin_menu(\"MenuStart\", \"menu_admin_main_txt\")\n return",
"def modulesHelp(module):\n\tscripts = os.listdir('syncity/{}/'.format(module))\n\toutput = []\n\t\n\tfor s in scripts:\n\t\tif s[:2] != '__' and s != 'template.py' and s[-3:] != 'pyc':\n\t\t\toutput.append('\\t{}:'.format(s[:-3]))\n\t\t\t\n\t\t\ttry:\n\t\t\t\timport_script = __import__('syncity.{}.{}'.format(module,s[:-3]), fromlist=['syncity.{}'.format(module)])\n\t\t\t\thl = import_script.help()\n\t\t\t\thl = re.sub(r'^', '\\t\\t', hl).replace('\\n', '\\n\\t')\n\t\t\t\toutput.append(hl)\n\t\t\texcept:\n\t\t\t\toutput.append('\\tNo description')\n\t\n\treturn '\\n'.join(output)",
"def create_menus( self ):",
"def help():\n \n pass",
"def help(self):\n self.logger.debug(\"module.Module.help()\")\n return os.linesep.join([\"{}:\".format(self.name),\n self.helptext])",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def main_menu_toolbar():\n\n pass",
"def init_helpmenu(self):\n self.menubar[\"helpmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"helpmenu\"].add_command(label=\"Help Index\", command=todo)\n self.menubar[\"helpmenu\"].add_command(label=\"About...\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Help\", menu=self.menubar[\"helpmenu\"])",
"def create_menu():",
"def habHelp(self):\n rf = os.path.join('docs','helpButtons','prefsHabitat.html')\n self.showHelpFile( rf )",
"def help(ctx):\n click.echo(ctx.parent.get_help())",
"def admin(frame): # pylint: disable=too-many-branches\n\n ctx, msg, modconf = frame.ctx, frame.msg, frame.value\n action, _, modname = frame.text.partition(' ')\n\n hidden = set(modconf.get('hidden', '').split())\n\n if action == 'hide':\n if modname in hidden:\n msg.add('<code>%s</code> is already hidden!', modname)\n else:\n msg.add('<code>%s</code> is now hidden.', modname)\n hidden.add(modname)\n elif action == 'unhide':\n if modname not in hidden:\n msg.add('<code>%s</code> is not hidden!', modname)\n else:\n msg.add('<code>%s</code> is now visible.', modname)\n hidden.remove(modname)\n\n if hidden:\n modconf['hidden'] = ' '.join(sorted(hidden))\n else:\n modconf.pop('hidden')\n\n modules = collections.defaultdict(lambda: collections.defaultdict(set))\n for modname, module in ctx.bot.multibot.modules.items():\n modhelp = getattr(module, 'modhelp', None)\n if modhelp:\n modhelp(ctx, ctx.bot.config['issue37'][modname], modules[modname])\n\n msg.action = 'Select a module'\n for modname, sections in sorted(modules.items()):\n label = '%s (%s)' % (modname, ' \\u2022 '.join(sorted(sections['commands'])))\n if modname in hidden:\n msg.button('Show ' + label, 'unhide ' + modname)\n elif sections:\n msg.button('Hide ' + label, 'hide ' + modname)",
"def help():",
"def menu_python_reference(self, event=None):\n self.parentPanel.python_help('ref')",
"def generalHelp(self):\n rf = os.path.join('docs','helpButtons','prefsGeneral.html')\n self.showHelpFile( rf )",
"def topLevelMenu(c):\n global thePluginController \n thePluginController.showManagerDialog(c)",
"def Help(self, event):\n Help(self)",
"def _helpmenu_helpindex():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"Help Index\")\n self.helpindex.geometry(\"300x500\")",
"def _help(self):\n self.onecmd('help')"
]
| [
"0.69515866",
"0.6950947",
"0.6950947",
"0.6891426",
"0.6883034",
"0.68470794",
"0.68117195",
"0.6777707",
"0.6777707",
"0.67304593",
"0.67135394",
"0.6699524",
"0.6642193",
"0.66408026",
"0.66307276",
"0.6624621",
"0.6574334",
"0.65732974",
"0.65549225",
"0.65472573",
"0.65390813",
"0.65201443",
"0.6519893",
"0.6514035",
"0.6511004",
"0.6474987",
"0.6435543",
"0.63989186",
"0.6389247",
"0.6372532"
]
| 0.71073353 | 0 |
ORG / Organization Registry | def org():
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
SECTORS = "Clusters" if settings.get_ui_label_cluster() \
else "Sectors"
stats = lambda i: settings.has_module("stats")
return M(c="org")(
M("Organizations MSW", f="organisation")(
M("Create", m="create"),
M("Import", m="import"),
M("TestSpiegel", c="org",f="spiegel")
),
M("Offices", f="office")(
M("Create", m="create"),
M("Map", m="map"),
M("Import", m="import")
),
M("Facilities", f="facility")(
M("Create", m="create"),
M("Import", m="import"),
),
M("Resources", f="resource", m="summary",
check=stats)(
M("Create", m="create"),
M("Import", m="import")
),
M("Organization Types", f="organisation_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Office Types", f="office_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Facility Types", f="facility_type",
restrict=[ADMIN])(
M("Create", m="create"),
),
M(SECTORS, f="sector", restrict=[ADMIN])(
M("Create", m="create"),
),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atlas_organizations():\n pass",
"def organizations(self):\n self.elements('organizations')",
"def test_get_organization(self):\n pass",
"def test_retrieve_l_organization(self):\n pass",
"def org():\n\n sysroles = current.auth.get_system_roles()\n\n ADMIN = sysroles.ADMIN\n ORG_GROUP_ADMIN = sysroles.ORG_GROUP_ADMIN\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Hierarchy\", m=\"hierarchy\"),\n M(\"Create\", m=\"create\", restrict=(ADMIN, ORG_GROUP_ADMIN)),\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Administration\", restrict=(ADMIN, ORG_GROUP_ADMIN))(\n M(\"Facility Types\", f=\"facility_type\"),\n M(\"Organization Types\", f=\"organisation_type\"),\n M(\"Sectors\", f=\"sector\"),\n )\n )",
"def org():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if current.deployment_settings.get_ui_label_cluster() \\\n else \"Sectors\"\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Service Types\", f=\"service\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def org_urn(self):\n return f\"psc:org:{self.credentials.org_key}\"",
"def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]",
"def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map",
"def find_organization(self):\n if self.org_id is not None:\n ItopapiPrototype.get_itop_class('Organization').find(self.org_id)\n return None",
"def test_addOrganization(self):\r\n #fetch the object form the datastore\r\n org_obj = db.GqlQuery(\"SELECT * FROM Organization\")\r\n organization = addOrganization(org_obj.run().next())\r\n #view it as a dict\r\n organization_d = importer.etree_to_dict(organization)\r\n assert [{'name': u'Test Organization'},\r\n {'kind': u'TestOrgKind'},\r\n {'description': u'TestOrgDescription'},\r\n {'location': [{'city': u'Organization City'}, {'country': u'USA'}]},\r\n {'images': [\r\n {'image': [\r\n {'source': u'http://www.testimage.com'},\r\n {'description': u'Description of TestImage'}]}]},\r\n {'maps': [\r\n {'map': [{'source': u'http://maps.google.com'}, {'description': u'Map Description'}]}]},\r\n {'videos': [{u'youtube': u'r_8om4dsEmw'}]},\r\n {'social': [{u'twitter': u'@billgates'}]},\r\n {'citations': [\r\n {'citation': [\r\n {'source': u'http://maps.google.com'},\r\n {'description': u'Map Description'}]}]},\r\n {'external-links': [\r\n {'external-link': [\r\n {'source': u'http://www.google.com'},\r\n {'description': u'Google'}]}]}] in organization_d.values()",
"def test_retrieve_l_organizations(self):\n pass",
"def _get_org(self, context, org):\r\n try:\r\n rtn = {'context': context,\r\n 'org': org,\r\n 'space': self._bbreader.cache[context][org]['space'],\r\n 'org_config': self._bbreader.cache[context][org]['org'],\r\n }\r\n except KeyError:\r\n raise RequestError('No such context/org: {}/{}'.format(context, org))\r\n\r\n return rtn",
"def test_organizations_read(self):\n pass",
"def _get_org(self, org_name):\n org = SpokeOrg()\n result = org.get(org_name)\n if result == []:\n msg = \"Can't find org %s\" % org_name\n self.log.error(msg)\n raise error.NotFound(msg) \n return result",
"def test_getorgs(self):\n pass",
"async def get_organization(request: Request, org: str):\n\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n return {org: organizations_obj[org]}",
"def organization(self):\r\n return Organization(self)",
"def organization(self):\r\n return Organization(self)",
"def organization(self):\r\n return Organization(self)",
"def build_org(self, doc, entity):\n match = self.org_re.match(entity)\n if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):\n name = match.group(self.ORG_NAME_GROUP).strip()\n email = match.group(self.ORG_EMAIL_GROUP)\n if (email is not None) and (len(email) != 0):\n return creationinfo.Organization(name=name, email=email.strip())\n else:\n return creationinfo.Organization(name=name, email=None)\n else:\n raise SPDXValueError('Failed to extract Organization name')",
"def addOrganisation(SID, name):\n return call(\"addOrganisation\", SID, name)",
"def importOrg ( c, orgInstance ):\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n assert str(type(orgInstance)) == \"<type 'instance'>\"\n orgID = orgInstance.attrib[\"organizationIdent\"]\n\n #Gets location sub elements in list. Inserts into CrisisLocation table by indexing list\n allLocations = orgInstance.findall(\"Location\") \n if len(allLocations) != 0:\n for instance in allLocations:\n sqlQuery ( c, \"insert into OrganizationLocations values ( '\"+orgID+\"', '\"+instance[0].text+\"', '\"+instance[1].text+\"', '\"+instance[2].text+\"');\")\n\n #Get all resources. Checks for Citation because it's the only one not ending in 'URL'. Get index of URL for others to splice off. Add to table\n externalResources = orgInstance.find(\"ExternalResources\")\n if len(externalResources) != 0:\n for instance in externalResources:\n sqlQuery ( c, \"insert into OrganizationExternalResources values ( '\"+orgID+\"', '\"+instance.tag+\"', '\"+instance.text+\"');\")\n\n #Get all values for Organizations table and insert to DB\n postalAddress = orgInstance.find(\".//\" + \"PostalAddress\")\n name = orgInstance.find(\"Name\").text\n kind = orgInstance.find(\"Kind\").attrib[\"organizationKindIdent\"]\n history = orgInstance.find(\"History\").text\n phone = str(orgInstance.find(\".//\" + \"Telephone\").text)\n fax = str(orgInstance.find(\".//\" + \"Fax\").text)\n email = orgInstance.find(\".//\" + \"Email\").text\n address = postalAddress[0].text\n locality = postalAddress[1].text\n region = postalAddress[2].text\n postalCode = postalAddress[3].text\n country = postalAddress[4].text\n sqlQuery (c , \"insert into Organizations values ( '\"+orgID+\"', '\"+name+\"', '\"+kind+\"', '\"+history+\"', \"+phone+\", \"+fax+\", '\"+email+\"', '\"+address+\"', '\"+locality+\"', '\"+region+\"', '\"+postalCode+\"', '\"+country+\"');\")",
"def org(self):\r\n raise NotImplementedError()",
"def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")",
"def get_org(self):\n return Org.deserialize(self._get_single('org', {}, from_results=False))",
"def test_add_organization(self):\n pass",
"def get_org(self, name: str):\n org = self._get_org(name)\n if org.keychain:\n assert org.keychain is self\n else:\n org.keychain = self\n return org",
"def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text"
]
| [
"0.67622626",
"0.64280254",
"0.6281099",
"0.6248272",
"0.61873627",
"0.6182633",
"0.6163306",
"0.60936064",
"0.60586184",
"0.60250324",
"0.5994845",
"0.5978774",
"0.5963733",
"0.5891216",
"0.5847455",
"0.5847007",
"0.58193946",
"0.57460207",
"0.5737204",
"0.5737204",
"0.5737204",
"0.5729079",
"0.5699641",
"0.569251",
"0.56890833",
"0.5678236",
"0.56685627",
"0.5659311",
"0.56480914",
"0.5607926"
]
| 0.6491232 | 1 |
Return a new naturally sorted list from the items in iterable. The returned list is in natural sort order. The string is ordered lexicographically (using the Unicode code point number to order individual characters), except that multidigit numbers are ordered as a single character. Has two optional arguments which must be specified as keyword arguments. key specifies a function of one argument that is used to extract a | def natural_sorted(iterable, key=None, reverse=False):
prog = re.compile(r"(\d+)")
def alphanum_key(element):
"""Split given key in list of strings and digits"""
return [int(c) if c.isdigit() else c for c in prog.split(element[0])]
return sorted(iterable, key=alphanum_key, reverse=reverse) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sorted_nicely(l, key):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda item: [ convert(c) for c in re.split('([0-9]+)', key(item)) ]\n return sorted(l, key = alphanum_key)",
"def sorted_nicely(ls, key, rev=False):\n def convert(text):\n return int(text) if text.isdigit() else text\n\n def alphanum_key(item):\n return [convert(c) for c in re.split('([0-9]+)', key(item))]\n\n return sorted(ls, key=alphanum_key, reverse=rev)",
"def natural_sort( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )\n return l",
"def sort_nicely(alist, dict_key=None):\n convert = lambda text: int(text) if text.isdigit() else text\n if dict_key is None:\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n else:\n alphanum_key = operator.itemgetter(dict_key)\n alist.sort(key=alphanum_key)",
"def _alphanumeric_sort(iterable):\n convert = lambda text: int(text) if text.isdigit() else text\n sort_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]\n return sorted(iterable, key=sort_key)",
"def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l",
"def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l",
"def _natural_key_sort(string_to_sort):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_to_sort)]",
"def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)",
"def sort_nicely(l):\n l.sort(key=alphanum_key)",
"def sort_nicely(l):\n l.sort(key=alphanum_key)",
"def sorted_nicely( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)",
"def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)",
"def naturalSortKey(s):\n return [(str, int)[k](\"\".join(v)) for k, v in groupby(s, str.isdigit)]",
"def sorted_nicely( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key = alphanum_key)",
"def natural_order(key):\n return [number_else_string(c) for c in re.split(r'(\\d+)', key)]",
"def sort_mixed(iterable):\n return sorted(iterable, key=lambda x: split_string_at_numbers(x))",
"def sorted_nicely(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)",
"def sort_nicely(l): \n import re\n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key=alphanum_key)",
"def _sorted_nicely(self, l):\n\n import re\n\n convert = lambda text: int(text) if text.isdigit() else \"\"\n\n alphanum_key = lambda key: [\n convert(c) for c in re.split(\n '([0-9]+)', key)]\n\n return sorted(l, key=alphanum_key)",
"def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)",
"def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))",
"def NaturalSort(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)",
"def natsorted(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key)",
"def human_sort(l):\n l.sort(key=alphanum_key)\n return l",
"def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l",
"def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )",
"def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)",
"def sort_nicely(l):\n\n def tryint(s):\n try:\n return int(s)\n except:\n return s\n\n def alphanum_key(s):\n \"\"\" Turn a string into a list of string and number chunks.\n \"z23a\" -> [\"z\", 23, \"a\"]\n \"\"\"\n return [tryint(c) for c in re.split('([0-9]+)', s)]\n\n l.sort(key=alphanum_key)\n return l",
"def _natural_sort_key(value):\n return map(try_int_cast, re.findall(r'(\\d+|\\D+)', value))"
]
| [
"0.76012194",
"0.72604644",
"0.72291124",
"0.7211663",
"0.71234494",
"0.70481193",
"0.7033714",
"0.69929475",
"0.6972046",
"0.6964402",
"0.6964402",
"0.6928824",
"0.6904572",
"0.69022584",
"0.6884324",
"0.6873669",
"0.6862452",
"0.67815965",
"0.6772535",
"0.67579895",
"0.6745621",
"0.6744543",
"0.67374885",
"0.6697848",
"0.66268045",
"0.6620528",
"0.65956193",
"0.6575616",
"0.655502",
"0.65545285"
]
| 0.7791482 | 0 |
Get the selection method. | def selection(self) -> str:
return self._selection | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSelection(self):\n return self.selection",
"def _getAsSelection(self):\n return self._asSelection",
"def GetSelection(self):\r\n\r\n return self.selection",
"def get_selection(self, selection_name, format=None):",
"def get_method_name(self) -> Optional[str]:\n current_mode = self.get_mode()\n # Check that 'Solvent' program is enabled.\n # Retreiving the remaining time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Method':\n self.logger.warning(\"Can't retreive selected method of the 'Method' \"\n \"program since this program is not currently \"\n f\"selected (selected program is '{current_mode}'). \"\n \"Select 'Method' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_METHOD_NAME)",
"def GetSelection(self):\n \n return self.selected",
"def GetSelection(self):\r\n \r\n return self._curpage",
"def method(self):\n return self._method",
"def method(self):\n return self._method",
"def getSelector(self, node):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.getSelector(node)",
"def GetSelection(self):\r\n\r\n return self._current",
"def dbt_selector_method(self):\n if self.formatter:\n self.formatter.dispatch_compilation_header(\n \"dbt templater\", \"Compiling dbt project...\"\n )\n\n if \"0.17\" in self.dbt_version:\n from dbt.graph.selector import PathSelector\n\n self.dbt_selector_method = PathSelector(self.dbt_manifest)\n else:\n from dbt.graph.selector_methods import (\n MethodManager as DbtSelectorMethodManager,\n MethodName as DbtMethodName,\n )\n\n selector_methods_manager = DbtSelectorMethodManager(\n self.dbt_manifest, previous_state=None\n )\n self.dbt_selector_method = selector_methods_manager.get_method(\n DbtMethodName.Path, method_arguments=[]\n )\n\n if self.formatter:\n self.formatter.dispatch_compilation_header(\n \"dbt templater\", \"Project Compiled.\"\n )\n\n return self.dbt_selector_method",
"def method(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"method\")",
"def selectMethod(self):\n\n try:\n self.methPrior = getattr(self, self.namePrior)\n except:\n self.methPrior = self.uninformative\n\n ### Consider reorganizing this!\n if self.namePrior.find('ixed') > -1:\n self.findMixedMethods()",
"def selector(self):\n return self._selector",
"def select_search_method():\n st.sidebar.markdown('### Search method:')\n search_method = st.sidebar.selectbox('', ['Individual', 'Department'], index=0)\n return search_method",
"def getMethod(self):\n return self.__get('method')",
"def GetSelection(self):\n return self.__selected_item",
"def get_selected(self):\n return self.selected",
"def Method(self, default=None):\n return self.data.get('method', default)",
"def getSelMode(self):\n if self.rbTreeMode.isChecked():\n return 'treeMode'\n elif self.rbStepMode.isChecked():\n return 'stepMode'",
"def selector(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"selector\")",
"def selector(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"selector\")",
"def selector(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"selector\")",
"def select(self):\n return",
"def getcurrentmethod(self):\n if self._methodname == None:\n print(\"No method defined.\")\n else:\n return self._methodname",
"def matching_method(self) -> str:\n return pulumi.get(self, \"matching_method\")",
"def _select_implementation(self):\n return",
"def _select_implementation(self):\n return",
"def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)"
]
| [
"0.69442207",
"0.688181",
"0.6810091",
"0.6737498",
"0.67364687",
"0.6490553",
"0.6357488",
"0.63299996",
"0.63299996",
"0.6322587",
"0.6310337",
"0.6286217",
"0.62577385",
"0.62555695",
"0.6244326",
"0.6195419",
"0.61379856",
"0.61312324",
"0.6102224",
"0.6090719",
"0.6085502",
"0.60417163",
"0.5998784",
"0.5998784",
"0.598553",
"0.5976049",
"0.59489757",
"0.59427416",
"0.59427416",
"0.5896725"
]
| 0.69850576 | 0 |
Fill the internal buffer with domains. | def fill_buffer(self, num_domains: int):
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def domains(self, domains):\n\n self._domains = domains",
"def domain(self, domain):",
"def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)",
"def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return",
"def par_domain(self):",
"def __init__(self, cfg):\n self.domains = []\n\n # process domains in order\n for i in range(1, len(cfg)+1):\n this_dom = cfg[str(i)]\n par_dom = self.domains[this_dom['parent_id']-1] if 'parent_id' in this_dom else None\n self.domains.append(WPSDomainLCC(i, this_dom, par_dom))",
"def _iterate_domains(self):\n\n class DomainIter:\n # Indices refer to positions between the nucleotides, as usual for \n # slices in python.\n\n def __init__(self, domain, cursor, rel_start, rel_end):\n self.domain = domain\n self.start = cursor\n self.rel_start = rel_start\n self.rel_end = rel_end\n\n def __repr__(self):\n return ('DomainIter('\n 'domain={0.domain!r}, '\n 'start={0.start}, '\n 'rel_start={0.rel_start}, '\n 'rel_end={0.rel_end})'.format(self))\n @property\n def len(self):\n return self.rel_end - self.rel_start\n\n @property\n def end(self):\n return self.start + self.len\n\n def rel_index(self, index):\n return index - self.start + self.rel_start\n\n def abs_index(self, rel_index):\n return self.start + rel_index - self.rel_start\n\n domain_cursor = 0\n index_cursor = 0\n \n while domain_cursor < len(self._domains):\n domain = self._domains[domain_cursor]\n\n # If this domain doesn't have anything attached to it, then we can \n # just yield the whole thing right away.\n\n if domain not in self._attachments:\n yield DomainIter(domain, index_cursor, 0, len(domain))\n index_cursor += len(domain)\n\n # If this domain does have something attached to it, then we need \n # to carefully yield only the parts of it that aren't covered by \n # the attachment.\n\n else:\n attachment = self._attachments[domain]\n\n # Yield whatever fraction of this domain comes before the \n # attachment.\n\n yield DomainIter(domain,\n index_cursor, 0, attachment.start_index)\n index_cursor += attachment.start_index\n\n # Yield the domains in the attachment itself by recursively \n # calling this method.\n\n for domain_iter in attachment.construct._iterate_domains():\n domain_iter.start += index_cursor\n yield domain_iter\n index_cursor += len(attachment.construct)\n\n # Skip domains until we reach the one where the attachment \n # ends.\n\n while domain is not attachment.end_domain:\n domain_cursor += 1\n domain = self._domains[domain_cursor]\n\n # Yield whatever fraction of that domain comes after the \n # attachment.\n\n yield DomainIter(domain,\n index_cursor, attachment.end_index, len(domain))\n index_cursor += len(domain) - attachment.end_index\n\n domain_cursor += 1",
"def __init__(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()",
"def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain",
"def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain",
"def submit_domain(self, name, data, byte_start=0):\n data = numpy.array(data, dtype=numpy.float32)\n byte_count = min(\n ArrayDatatype.arrayByteCount(data),\n self.buffer_configuration[name]['byte_count'])\n\n vbo = self.buffer_configuration[name]['vbo'].get(0)\n vbo.glBufferSubData(byte_start, byte_count, data)",
"def support_pruning(self):\r\n if self.curr_domains is None:\r\n self.curr_domains = {v: list(self.domains[v]) for v in self.variables}",
"def _infer_domain(self, name, domain, elements):\n if '*' not in domain:\n return domain\n debug('guessing a better domain for {}: {}'.format(name, domain))\n\n # Domain as a list of references to Variables in the File/xr.Dataset\n domain_ = [self[d] for d in domain]\n\n for i, d in enumerate(domain_): # Iterate over dimensions\n e = set(elements[i])\n if d.name != '*' or len(e) == 0: # pragma: no cover\n assert set(d.values).issuperset(e)\n continue # The stated domain matches the data; or no data\n # '*' is given\n if (self._state[name]['attrs']['type_code'] == gdxcc.GMS_DT_PAR and\n self._implicit):\n d = '_{}_{}'.format(name, i)\n debug(('Constructing implicit set {} for dimension {} of {}\\n'\n ' {} instead of {} elements')\n .format(d, name, i, len(e), len(self['*'])))\n self.coords[d] = elements[i]\n d = self[d]\n else:\n # try to find a smaller domain for this dimension\n # Iterate over every Set/Coordinate\n for s in self.coords.values():\n if s.ndim == 1 and set(s.values).issuperset(e) and \\\n len(s) < len(d):\n d = s # Found a smaller Set; use this instead\n domain_[i] = d\n\n # Convert the references to names\n inferred = [d.name for d in domain_]\n\n if domain != inferred:\n # Store the result\n self._state[name]['attrs']['domain_inferred'] = inferred\n debug('…inferred {}.'.format(inferred))\n else:\n debug('…failed.')\n\n return inferred",
"def resolveOriginalDomains():\n print('[+] Populating Domain Name Resolution for later check ')\n\n try:\n for domain in domains:\n response = dns.resolver.query(domain)\n d = Domain_Poison_Check(domain)\n print('[+] Domain: %s' % domain)\n for record in response:\n print(' |____> maps to %s.' % (record.address))\n d.pushAddr(record)\n check_domain_poison_results.append(d)\n return time.time()\n except Exception as err:\n print('[+] Exception: %s' % err)\n traceback.print_exc()\n return time.time()",
"def __init__(self, var1):\n self.url = var1\n self.b = [ord(i) for i in var1]\n var2 = self.domain_head\n var3 = self.domain_head\n self.domain_tail = self.domain_head\n var4 = False\n var5 = False\n var6 = 0\n while var6 < len(self.b):\n if self.b[var6] == 46:\n var5 = True\n else:\n if self.b[var6] == 47:\n break\n if self.b[var6] == 58:\n if var6 + 2 < len(self.b) and self.b[var6 + 1] == 47 and self.b[var6 + 2] == 47:\n var6 = var6 + 2\n self.host_head = var6\n self.domain_head = var6\n var2 = var6\n var3 = var6\n self.domain_tail = var6\n var6 = var6 + 1\n continue\n if not var4:\n var5 = True\n var4 = True\n if var5:\n var2 = self.domain_head\n self.domain_head = var3\n var3 = self.domain_tail\n self.domain_tail = var6\n var5 = False\n var6 = var6 + 1\n self.host_tail = var6\n if not var4:\n var2 = self.domain_head\n self.domain_head = var3\n var3 = self.domain_tail\n self.domain_tail = var6\n if self.in_second_domain_set(self.b, var3 - self.domain_head - 1, self.domain_head + 1) > 0 and self.in_top_domain_set(self.b, self.domain_tail - var3 - 1, var3 + 1) == 0:\n self.domain_head = var2\n self.domain_head = self.domain_head + 1\n self.host_head = self.host_head + 1",
"def _combine_domains(self, axes):\n def stack_dvalues(prev, x):\n if 0:\n print 'stack_dvalues ...'\n print 'prev:', prev\n print 'x:', x\n res = prev + [list(np.tile(x, len(prev) == 0 or len(prev[-1])))]\n if 0:\n print 'result:', res\n print ''\n return res\n return reduce(stack_dvalues, [self.axes_domains[a] for a in axes], [])",
"def _identify_domains(self):\n\n domains = [FEMDomain(TR3, MeshPart(self.mesh, labels=(0,)), self.media, self.labels)]\n return domains",
"def fillBuffer():\n buff[bufferCounter].next = dataIn",
"def initializeDomainBorder(self):\n #Read image of the structure\n if (self.PictureExistance == \"'yes'\"):\n self.__processImage()\n #re-define the domain size with the layers of boundaries and ghost points\n self.ny, self.nx = self.effectiveDomain.shape\n print('Now the size of domain is %g and %g' %(self.ny, self.nx))\n else:\n self.isDomain = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isSolid = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isDomain, self.isSolid = defineGeometry(self.nx, self.ny)\n if (self.PictureExistance == \"'yes'\"):\n self.originalXdim = self.nx\n self.isDomain = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isSolid = sp.empty([self.ny, self.nx], dtype = np.bool)\n #define the boundary position\n if (self.isCycles == \"'yes'\"):\n self.isBoundaryFluid2 = sp.empty([self.ny, self.nx], \\\n dtype = np.bool)\n# self.isFluidBoundary = sp.empty([self.ny, self.nx], dtype = np.bool)\n self.isDomain[:, :] = 1; self.isSolid[:, :] = 0\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (self.effectiveDomain[i, j] == 0.0):\n self.isDomain[i, j] = 0\n self.isSolid[i, j] = 1\n self.voidSpace = np.count_nonzero(self.isDomain)\n print('The number of vexls in void space is %g.' % self.voidSpace)\n print('The porosity of the layout is %f.' % (self.voidSpace / (self.isDomain.size)))",
"def set_all_domains(self, domains_dict) :\n if not set(domains_dict.keys()) <= set(self.variables):\n invalid_vars = filter(lambda v: v not in self.variables, domains_dict.keys())\n raise KeyError(str(invalid_vars) + \" are not variables in this problem.\")\n self.domains = deepcopy(domains_dict)\n return self",
"def resolve(self,\n ns_servers: List[Dict[str, str]] = [{'IPv4 address': '8.8.8.8', 'MAC address': '01:23:45:67:89:0a'}],\n domain: str = 'google.com',\n subdomains_list: List[str] = ['www', 'mail', 'ns', 'test'],\n subdomains_file: Union[None, str] = None,\n subdomains_brute: bool = False,\n max_threats_count: int = 10,\n udp_destination_port: int = 53,\n timeout: int = 30) -> List[Dict[str, str]]:\n\n try:\n\n # region Clear results list\n self.index_of_dns_query = 0\n self.results.clear()\n self.uniq_hosts.clear()\n # endregion\n\n # region Set target domain\n assert not (domain == ''), \\\n 'Target domain is empty, please set target domain in this parameter: ' + self.base.info_text('domain')\n self.domain = domain\n # endregion\n\n # region Subdomains list\n if len(subdomains_list) > 0:\n self.subdomains = subdomains_list\n # endregion\n\n # region Subdomains file\n if subdomains_file is not None:\n assert isfile(subdomains_file), \\\n 'File with subdomain list:' + self.base.error_text(subdomains_file) + ' not found!'\n with open(subdomains_file) as subdomains_file_descriptor:\n for subdomain in subdomains_file_descriptor.read().splitlines():\n self.subdomains.append(subdomain)\n # endregion\n\n # region Subdomains brute\n if subdomains_brute:\n\n if not self.quiet:\n self.base.print_info('Make subdomains list for brute .... ')\n\n for character1 in RawDnsResolver.available_characters:\n self.subdomains.append(character1)\n for character2 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2)\n for character3 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2 + character3)\n # endregion\n\n # region Check length of subdomains list\n assert len(self.subdomains) != 0, \\\n 'List containing subdomains is empty, please set any of this parameters: ' \\\n + self.base.info_text('subdomain_list') + ' or ' \\\n + self.base.info_text('subdomain_file') + ' or ' \\\n + self.base.info_text('subdomain_brute')\n # endregion\n\n # region Create raw socket\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n # endregion\n\n # region Truncate temporary results file\n temporary_results_file = open(RawDnsResolver.temporary_results_filename, 'r+')\n temporary_results_file.truncate()\n temporary_results_file.close()\n # endregion\n\n # region Sniff DNS answers\n if not self.quiet:\n self.base.print_info('Start DNS answers sniffer for domain: ', self.domain)\n\n threats: ThreadManager = ThreadManager(max_threats_count)\n self._sniff_start(self.your_mac_address, self.your_ipv4_address,\n self.your_ipv6_address, udp_destination_port)\n threats.add_task(self._sniff_check)\n # endregion\n\n # region Send DNS queries\n if not self.quiet:\n self.base.print_info('Start sending DNS queries, time: ', str(datetime.now()))\n\n self._send_queries(send_socket=raw_socket,\n source_mac_address=self.your_mac_address,\n source_ipv4_address=self.your_ipv4_address,\n source_ipv6_address=self.your_ipv6_address,\n domain=domain,\n ns_servers=ns_servers,\n destination_port=udp_destination_port,\n max_threats_count=int(max_threats_count) - 1,\n subdomains=self.subdomains)\n # endregion\n\n # region Timeout\n if not self.quiet:\n self.base.print_info('Wait timeout: ', str(timeout) + ' sec')\n sleep(timeout)\n # endregion\n\n # region Return results\n self._sniff_stop()\n if not self.quiet:\n if len(self.results) > 0:\n self.base.print_success('Found ', str(len(self.results)),\n ' subdomains and addresses for domain: ', self.domain)\n else:\n self.base.print_error('Not found subdomains in domain: ', self.domain)\n return self.results\n # endregion\n\n except AssertionError as Error:\n self.base.print_error(Error.args[0])\n exit(1)",
"def __learn__(self):\n\n return domains # including 3 values, (begin, end, key)",
"def fixDomains(self, domainMin, domainMax, fixToDomain):\n\n return 0",
"def domainRouterSet(self, domain, body):\n pass",
"def extract_domains(self, resp):\n return",
"def domain(self, domain):\n self._domain = domain",
"def domain(self, domain):\n self._domain = domain",
"def impute_domain(handles, domain_num, overlap=50):\n out = get_domain_region(handles, domain_num)\n (lat_range, lon_range, min_lat, max_lat, min_lon,\n max_lon, n_lats, n_lons) = out\n regridder = Regridder(handles[domain_num].XLAT[0],\n handles[domain_num].XLONG[0],\n min_lat, max_lat, min_lon, max_lon, n_lats, n_lons)\n handles[domain_num] = regridder.regrid_data(handles[domain_num])\n for field in handles[0]:\n if field not in ['Times']:\n arr1 = handles[0][field].loc[dict(south_north=lat_range,\n west_east=lon_range)]\n arr2 = handles[domain_num][field]\n out = blend_domains(arr1, arr2, overlap=overlap)\n handles[0][field].loc[dict(south_north=lat_range,\n west_east=lon_range)] = out\n return handles",
"def domain(self, value: ArrayLike):\n\n value = as_float_array(value, self.dtype)\n\n if not np.all(np.isfinite(value)):\n runtime_warning(\n f'\"{self.name}\" new \"domain\" variable is not finite: {value}, '\n f\"unpredictable results may occur!\"\n )\n else:\n attest(\n np.all(value[:-1] <= value[1:]),\n \"The new domain value is not monotonic! \",\n )\n\n if value.size != self._range.size:\n self._range = np.resize(self._range, value.shape)\n\n self._domain = value\n self._function = None # Invalidate the underlying continuous function.",
"def SetDomainsList(self, domainsList) :\n\t\t..."
]
| [
"0.58577764",
"0.5815134",
"0.56737477",
"0.5397528",
"0.5369211",
"0.5332131",
"0.5293622",
"0.52866274",
"0.52802956",
"0.52799034",
"0.52597743",
"0.5250782",
"0.52413416",
"0.5233103",
"0.5192772",
"0.51702565",
"0.5167267",
"0.5160602",
"0.51385796",
"0.5080049",
"0.5040997",
"0.5009076",
"0.5008406",
"0.49996537",
"0.49911863",
"0.49773318",
"0.49773318",
"0.49646237",
"0.49645755",
"0.49637443"
]
| 0.68794465 | 0 |
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses). | def remove_all_dr_wrappers(env: Env, verbose: bool = False):
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
env = remove_env(env, DomainRandWrapper)
return env | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def env_cleanup(self):\n pass",
"def CleanUpEnvironment(self):\n for prop in self._wrap_properties:\n self._adb.RunShellCommand('setprop %s \"\"' % (prop,))\n SetChromeTimeoutScale(self._adb, None)",
"def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)",
"def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()",
"def teardown_test_env():\n if not keep_tmp_dirs:\n print('\\nCleaning up temporary directories...')\n shutil.rmtree(tmp_elm_dpath, ignore_errors=True)\n shutil.rmtree(tmp_elm_examples_dpath, ignore_errors=True)\n\n print('Removing conda environment used for testing...')\n sp.call('conda env remove -y -q -n {}'.format(test_env_name), shell=True, executable='/bin/bash', stdout=sp.DEVNULL)",
"def unload_all():\n module_utils.unload_package_modules(__name__)",
"def teardown_scripts(test=None):\n for key, value in original_environ.iteritems():\n if value is None:\n del os.environ[key]\n else:\n os.environ[key] = value\n original_environ.clear()\n\n for path in tmp_paths:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n del tmp_paths[:]",
"def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]",
"def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]",
"def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]",
"def _cleanup():\n for itr_ref in ITERATORS_LIST:\n if context:\n device_type = context.get_context(\"device_target\")\n if device_type == \"GPU\":\n itr_ref.release()\n else:\n itr = itr_ref()\n if itr is not None:\n itr.release()\n else:\n itr = itr_ref()\n if itr is not None:\n itr.release()",
"def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def clean_python(context):\n context.run(\"find . -name '*.pyc' -exec rm -f {} +\")\n context.run(\"find . -name '*.pyo' -exec rm -f {} +\")\n context.run(\"find . -name '*~' -exec rm -f {} +\")\n context.run(\"find . -name '__pycache__' -exec rm -fr {} +\")",
"def tearDown(self):\n self.patcher_logging.stop()\n self.patcher_config.stop()\n self.patcher_ldap3.stop()",
"def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None",
"def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]",
"def clear_mpi_env_vars():\n removed_environment = {}\n for k, v in list(os.environ.items()):\n for prefix in [\"OMPI_\", \"PMI_\"]:\n if k.startswith(prefix):\n removed_environment[k] = v\n del os.environ[k]\n try:\n yield\n finally:\n os.environ.update(removed_environment)",
"def clear_mpi_env_vars():\n removed_environment = {}\n for k, v in list(os.environ.items()):\n for prefix in ['OMPI_', 'PMI_']:\n if k.startswith(prefix):\n removed_environment[k] = v\n del os.environ[k]\n try:\n yield\n finally:\n os.environ.update(removed_environment)",
"def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True",
"def unsetEnv(self, checked):\n\n # resolve circular dependencies\n if( self.name in checked ):\n return\n else:\n checked.append( self.name )\n\n # delete environment variables\n for k, v in self.env.iteritems():\n trydelenv(k)\n\n # restore path variables (only need to do this at the root module, skip recursivity!)\n if( len( checked ) == 1 ):\n for k, v in self.parent.envpathbak.iteritems():\n os.environ[k] = v\n\n # delete environment for dependencies\n mods = self.optmodules + self.reqmodules + self.reqmodules_buildonly + self.reqmodules_external\n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).unsetEnv(checked)",
"def _clear_context():\n for var in [x for x in __context__ if x.startswith(\"lxc.\")]:\n log.trace(\"Clearing __context__['%s']\", var)\n __context__.pop(var, None)",
"def removeRadarSims(self):\n for sim in self.radarSims:\n self.world.removeFromWorld(sim)\n self.radarSims = []",
"def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)",
"def unload_bindings(self):\n self.ignoreAll()",
"def tearDownClass(cls):\n os.remove(cls._no_default)\n os.remove(cls._no_workers)\n os.remove(cls._zero_workers)\n os.remove(cls._no_server)\n os.remove(cls._server_no_save_timeout)\n os.remove(cls._server_no_retain_image)\n os.remove(cls._server_no_region)\n os.remove(cls._server_valid_minimal)\n os.remove(cls._server_valid_override)",
"def agent_cleanup():\n\t# clean up\n\n\treturn",
"def clean_up_factories():\n for name, obj in inspect.getmembers(factories):\n if inspect.isclass(obj) and \"factory\" in name.lower():\n obj.reset_sequence(0)",
"def module_cleanup():\n from bokeh.core.has_props import _default_resolver\n to_reset = list(panel_extension._imports.values())\n\n _default_resolver._known_models = {\n name: model for name, model in _default_resolver._known_models.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }"
]
| [
"0.61490715",
"0.6136373",
"0.6094279",
"0.5847474",
"0.58205855",
"0.5792032",
"0.5697961",
"0.5677101",
"0.5677101",
"0.5677101",
"0.56502455",
"0.5646729",
"0.56398857",
"0.56398857",
"0.5638647",
"0.56314355",
"0.561462",
"0.5610801",
"0.5609453",
"0.5607916",
"0.55971336",
"0.5581443",
"0.5555488",
"0.5555398",
"0.5549015",
"0.5546624",
"0.55401874",
"0.55374515",
"0.5535579",
"0.55326104"
]
| 0.8253526 | 0 |
Test draw an empty circuit | def test_empty_circuit(self):
filename = self._get_resource_path('test_empty.tex')
qc = QuantumCircuit(1)
circuit_drawer(qc, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_include_empty_wires(self):\r\n\r\n dev = qml.device(\"default.qubit\", wires=[-1, \"a\", \"q2\", 0])\r\n\r\n @qml.beta.qnode(dev)\r\n def circuit():\r\n qml.Hadamard(wires=-1)\r\n qml.CNOT(wires=[-1, \"q2\"])\r\n return qml.expval(qml.PauliX(wires=\"q2\"))\r\n\r\n res = qml.draw(circuit, show_all_wires=True)()\r\n expected = [\r\n \" -1: ──H──╭C──┤ \",\r\n \" a: ─────│───┤ \",\r\n \" q2: ─────╰X──┤ ⟨X⟩ \",\r\n \" 0: ─────────┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)",
"def test_create_circuit_noname(self):\n qr = QuantumRegister(size=3)\n cr = ClassicalRegister(size=3)\n qc = QuantumCircuit(qr, cr)\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Chi(circuit)\n target = Chi(target)\n self.assertEqual(op, target)",
"def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]",
"def test_create_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=3)\n cr = q_program.create_classical_register(size=3)\n qc = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_normal_circuit(self):\n filename = self._get_resource_path('test_normal.tex')\n qc = QuantumCircuit(5)\n for qubit in range(5):\n qc.h(qubit)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_missing_wire(self):\r\n\r\n dev = qml.device(\"default.qubit\", wires=[\"a\", -1, \"q2\"])\r\n\r\n @qml.beta.qnode(dev)\r\n def circuit():\r\n qml.Hadamard(wires=-1)\r\n qml.CNOT(wires=[\"a\", \"q2\"])\r\n qml.RX(0.2, wires=\"a\")\r\n return qml.expval(qml.PauliX(wires=\"q2\"))\r\n\r\n # test one missing wire\r\n res = qml.draw(circuit, wire_order=[\"q2\", \"a\"])()\r\n expected = [\r\n \" q2: ──╭X───────────┤ ⟨X⟩ \",\r\n \" a: ──╰C──RX(0.2)──┤ \",\r\n \" -1: ───H───────────┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)\r\n\r\n # test one missing wire\r\n res = qml.draw(circuit, wire_order=[\"q2\", -1])()\r\n expected = [\r\n \" q2: ─────╭X───────────┤ ⟨X⟩ \",\r\n \" -1: ──H──│────────────┤ \",\r\n \" a: ─────╰C──RX(0.2)──┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)\r\n\r\n # test multiple missing wires\r\n res = qml.draw(circuit, wire_order=[\"q2\"])()\r\n expected = [\r\n \" q2: ─────╭X───────────┤ ⟨X⟩ \",\r\n \" -1: ──H──│────────────┤ \",\r\n \" a: ─────╰C──RX(0.2)──┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)",
"def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())",
"def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')",
"def test_no_ops_draws(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.beta.qnode(dev)\r\n def qnode():\r\n return qml.expval(qml.PauliX(wires=[0]) @ qml.PauliX(wires=[1]) @ qml.PauliX(wires=[2]))\r\n\r\n res = qml.draw(qnode)()\r\n expected = [\r\n \" 0: ──╭┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n \" 1: ──├┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n \" 2: ──╰┤ ⟨X ⊗ X ⊗ X⟩ \\n\",\r\n ]\r\n\r\n assert res == \"\".join(expected)",
"def test_missing_wire():\r\n\r\n dev = qml.device(\"default.qubit\", wires=[\"a\", -1, \"q2\"])\r\n\r\n @qml.beta.qnode(dev)\r\n def circuit():\r\n qml.Hadamard(wires=-1)\r\n qml.CNOT(wires=[\"a\", \"q2\"])\r\n qml.RX(0.2, wires=\"a\")\r\n return qml.expval(qml.PauliX(wires=\"q2\"))\r\n\r\n # test one missing wire\r\n res = qml.draw(circuit, wire_order=[\"q2\", \"a\"])()\r\n expected = [\r\n \" q2: ──╭X───────────┤ ⟨X⟩ \",\r\n \" a: ──╰C──RX(0.2)──┤ \",\r\n \" -1: ───H───────────┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)\r\n\r\n # test one missing wire\r\n res = qml.draw(circuit, wire_order=[\"q2\", -1])()\r\n expected = [\r\n \" q2: ─────╭X───────────┤ ⟨X⟩ \",\r\n \" -1: ──H──│────────────┤ \",\r\n \" a: ─────╰C──RX(0.2)──┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)\r\n\r\n # test multiple missing wires\r\n res = qml.draw(circuit, wire_order=[\"q2\"])()\r\n expected = [\r\n \" q2: ─────╭X───────────┤ ⟨X⟩ \",\r\n \" -1: ──H──│────────────┤ \",\r\n \" a: ─────╰C──RX(0.2)──┤ \\n\",\r\n ]\r\n\r\n assert res == \"\\n\".join(expected)",
"def test_wrong_output_provided(self):\n with self.assertRaises(VisualizationError):\n circuit_drawer(None, output='wrong_output')",
"def test_empty_input(self):\n discs = calc_disc_c(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))",
"def test_empty_input(self):\n discs = calc_disc_b(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))",
"def _check_empty(circuits):\n return len(circuits) == 0",
"def test_blank(self):\n self._calibration_test(\"blank\")",
"def test_create_several_circuits_noname(self):\n q_program = QuantumProgram()\n qr1 = q_program.create_quantum_register(size=3)\n cr1 = q_program.create_classical_register(size=3)\n qr2 = q_program.create_quantum_register(size=3)\n cr2 = q_program.create_classical_register(size=3)\n qc1 = q_program.create_circuit(qregisters=[qr1], cregisters=[cr1])\n qc2 = q_program.create_circuit(qregisters=[qr2], cregisters=[cr2])\n qc3 = q_program.create_circuit(qregisters=[qr1, qr2], cregisters=[cr1, cr2])\n self.assertIsInstance(qc1, QuantumCircuit)\n self.assertIsInstance(qc2, QuantumCircuit)\n self.assertIsInstance(qc3, QuantumCircuit)",
"def test_invalid_wires(self):\r\n dev = qml.device(\"default.qubit\", wires=[\"a\", -1, \"q2\"])\r\n\r\n @qml.beta.qnode(dev)\r\n def circuit():\r\n qml.Hadamard(wires=-1)\r\n qml.CNOT(wires=[\"a\", \"q2\"])\r\n qml.RX(0.2, wires=\"a\")\r\n return qml.expval(qml.PauliX(wires=\"q2\"))\r\n\r\n with pytest.raises(ValueError, match=\"contains wires not contained on the device\"):\r\n res = qml.draw(circuit, wire_order=[\"q2\", 5])()",
"def test_correct_output_provided(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.draw_methods:\n with self.subTest('Test calling of the {} draw method'.format(draw_method),\n draw_method=draw_method):\n\n # Patch function corresponding to the current circuit drawer such that\n # it does nothing\n with patch.object(_cv, self.draw_methods[draw_method], return_value=None)\\\n as mock_draw_method:\n\n # Check that corresponding function was called once with the correct arguments\n circuit_drawer(None, output=draw_method)\n mock_draw_method.assert_called_once_with(None, **self.calls[draw_method])",
"def test_add_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=2)\n cr = q_program.create_classical_register(size=2)\n qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc1.h(qr[0])\n qc1.measure(qr[0], cr[0])\n qc2.measure(qr[1], cr[1])\n new_circuit = qc1 + qc2\n q_program.add_circuit(quantum_circuit=new_circuit)\n backend = 'local_qasm_simulator_py' # cpp simulator rejects non string IDs (FIXME)\n shots = 1024\n result = q_program.execute(backend=backend, shots=shots, seed=78)\n counts = result.get_counts(new_circuit.name)\n target = {'00': shots / 2, '01': shots / 2}\n threshold = 0.04 * shots\n self.assertDictAlmostEqual(counts, target, threshold)\n self.assertRaises(QISKitError, result.get_counts)",
"def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]",
"def test_drawWire(self):\n\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L',(640,480),'white')\n\n drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))\n drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))\n drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))\n drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))\n drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)",
"def test_empty_circuit_grad(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(analytic_op=op)\n circuit = tf.convert_to_tensor([], dtype=tf.string)\n psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n\n # Calculate tfq gradient.\n symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)\n with tf.GradientTape() as g:\n g.watch(symbol_values_tensor)\n expectations = op(circuit, symbol_names_tensor,\n symbol_values_tensor, psums)\n grads = g.gradient(expectations, symbol_values_tensor)\n self.assertShapeEqual(grads.numpy(),\n tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))",
"def test_no_false_positives(self, dim):\r\n g = nx.empty_graph(dim)\r\n assert not clique.is_clique(g)",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def test_empty(self):\n pass",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_invalid_wires():\r\n dev = qml.device(\"default.qubit\", wires=[\"a\", -1, \"q2\"])\r\n\r\n @qml.beta.qnode(dev)\r\n def circuit():\r\n qml.Hadamard(wires=-1)\r\n qml.CNOT(wires=[\"a\", \"q2\"])\r\n qml.RX(0.2, wires=\"a\")\r\n return qml.expval(qml.PauliX(wires=\"q2\"))\r\n\r\n with pytest.raises(ValueError, match=\"contains wires not contained on the device\"):\r\n qml.draw(circuit, wire_order=[\"q2\", 5])()",
"def test_tiny_circuit(self):\n filename = self._get_resource_path('test_tiny.tex')\n qc = QuantumCircuit(1)\n qc.h(0)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)"
]
| [
"0.6924551",
"0.6568286",
"0.64662397",
"0.64222956",
"0.6421285",
"0.64043254",
"0.6376929",
"0.63387245",
"0.629913",
"0.62639713",
"0.6204953",
"0.61285377",
"0.6089309",
"0.6065642",
"0.605834",
"0.605694",
"0.5992427",
"0.59759027",
"0.5941306",
"0.5927979",
"0.591923",
"0.58896697",
"0.58641636",
"0.5862239",
"0.5847913",
"0.5830108",
"0.5826661",
"0.5826661",
"0.58144087",
"0.5791923"
]
| 0.7348291 | 0 |
Test draw tiny circuit. | def test_tiny_circuit(self):
filename = self._get_resource_path('test_tiny.tex')
qc = QuantumCircuit(1)
qc.h(0)
circuit_drawer(qc, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')",
"def test_normal_circuit(self):\n filename = self._get_resource_path('test_normal.tex')\n qc = QuantumCircuit(5)\n for qubit in range(5):\n qc.h(qubit)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_change_color_of_the_device__true():",
"def test_change_color_of_the_device__false():",
"def test_deep_circuit(self):\n filename = self._get_resource_path('test_deep.tex')\n qc = QuantumCircuit(1)\n for _ in range(100):\n qc.h(0)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_medium_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('medium')",
"def test_interactive(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.interactive_draw_methods:\n with self.subTest('Test interactive regime for {} output'.format(draw_method),\n draw_method=draw_method):\n\n # Patch corresponding circuit_drawer such that it returns an instance of Image\n with patch.object(_cv, self.draw_methods[draw_method], return_value=Image()) as _:\n\n # Patch show attribute of Image such that it does nothing\n with patch.object(Image, 'show', return_value=None) as mock_show:\n\n # Check that show was called once with the correct arguments\n circuit_drawer(None, output=draw_method, interactive=True)\n mock_show.assert_called_once_with()",
"def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Chi(circuit)\n target = Chi(target)\n self.assertEqual(op, target)",
"def test_create_circuit_noname(self):\n qr = QuantumRegister(size=3)\n cr = ClassicalRegister(size=3)\n qc = QuantumCircuit(qr, cr)\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_random_circuit(self, device, tol, ret):\n n_wires = 2\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n supports_tensor = (\n \"supports_tensor_observables\" in dev.capabilities()\n and dev.capabilities()[\"supports_tensor_observables\"]\n )\n\n if not supports_tensor:\n pytest.skip(\"Device does not support tensor observables.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n n_layers = np.random.randint(1, 5)\n weights = 2 * np.pi * np.random.rand(n_layers, 1)\n\n ret_type = getattr(qml, ret)\n\n def circuit(weights):\n RandomLayers(weights, wires=range(n_wires))\n return ret_type(qml.PauliZ(wires=0) @ qml.PauliX(wires=1))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n grad_def = qml.grad(qnode_def, argnum=0)\n grad = qml.grad(qnode, argnum=0)\n\n assert np.allclose(qnode(weights), qnode_def(weights), atol=tol(dev.shots))\n assert np.allclose(grad(weights), grad_def(weights), atol=tol(dev.shots))",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def test_drawWire(self):\n\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L',(640,480),'white')\n\n drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))\n drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))\n drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))\n drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))\n drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)",
"def test_create_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=3)\n cr = q_program.create_classical_register(size=3)\n qc = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_draw():\n circ_m = test_QFTn(3)\n print(launch(1024, circ_m))\n fig = circ_m.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/test.png')\n return fig",
"def test_deep_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('deep')",
"def test_empty_circuit(self):\n filename = self._get_resource_path('test_empty.tex')\n qc = QuantumCircuit(1)\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def main():\n run_test_draw_upside_down_wall()",
"def test_01_lighting(self):",
"def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]",
"def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')",
"def test_correct_output_provided(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.draw_methods:\n with self.subTest('Test calling of the {} draw method'.format(draw_method),\n draw_method=draw_method):\n\n # Patch function corresponding to the current circuit drawer such that\n # it does nothing\n with patch.object(_cv, self.draw_methods[draw_method], return_value=None)\\\n as mock_draw_method:\n\n # Check that corresponding function was called once with the correct arguments\n circuit_drawer(None, output=draw_method)\n mock_draw_method.assert_called_once_with(None, **self.calls[draw_method])",
"def test_single_quadrant(self):",
"def test_drawing_tf():\r\n tf = pytest.importorskip(\"tensorflow\")\r\n\r\n x = tf.constant(0.1)\r\n y = tf.constant([0.2, 0.3])\r\n z = tf.Variable(0.4)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"tf\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected",
"def _small_circuit():\n qr = QuantumRegister(1, name='qr')\n cr = ClassicalRegister(1, name='cr')\n circuit = QuantumCircuit(qr, cr)\n\n circuit.x(qr[0])\n circuit.barrier(qr[0])\n circuit.measure(qr, cr)\n\n return circuit",
"def test_drawing_torch():\r\n torch = pytest.importorskip(\"torch\")\r\n\r\n x = torch.tensor(0.1, requires_grad=True)\r\n y = torch.tensor([0.2, 0.3], requires_grad=True)\r\n z = torch.tensor(0.4, requires_grad=True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"torch\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected",
"def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )",
"def test_ethyne(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((400, 420), (500, 420))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[400, 420, 500, 420]]\n ])\n )",
"def test_circle_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1),\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle3.png')",
"def test_huge_circuit(self):\n filename = self._get_resource_path('test_huge.tex')\n qc = QuantumCircuit(40)\n for qubit in range(39):\n qc.h(qubit)\n qc.cx(qubit, 39)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)"
]
| [
"0.6801835",
"0.6699156",
"0.64904195",
"0.6419961",
"0.6361697",
"0.63490915",
"0.6336773",
"0.62877667",
"0.61095464",
"0.6101822",
"0.608317",
"0.6071821",
"0.60660523",
"0.6031293",
"0.5999905",
"0.5997176",
"0.59682024",
"0.589236",
"0.58785677",
"0.5865717",
"0.5857305",
"0.584083",
"0.5840078",
"0.5826858",
"0.5796432",
"0.5773804",
"0.5762708",
"0.57571554",
"0.5746703",
"0.5736554"
]
| 0.7153058 | 0 |
Test draw deep circuit. | def test_deep_circuit(self):
filename = self._get_resource_path('test_deep.tex')
qc = QuantumCircuit(1)
for _ in range(100):
qc.h(0)
circuit_drawer(qc, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_deep_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('deep')",
"def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Chi(circuit)\n target = Chi(target)\n self.assertEqual(op, target)",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def test_circuit(circuit, n):\n a = circuit_to_state(ABPWrapper, n, circuit)\n b = circuit_to_state(AndersWrapper, n, circuit)\n assert a == b",
"def test_random_circuit(self, device, tol, ret):\n n_wires = 2\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n supports_tensor = (\n \"supports_tensor_observables\" in dev.capabilities()\n and dev.capabilities()[\"supports_tensor_observables\"]\n )\n\n if not supports_tensor:\n pytest.skip(\"Device does not support tensor observables.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n n_layers = np.random.randint(1, 5)\n weights = 2 * np.pi * np.random.rand(n_layers, 1)\n\n ret_type = getattr(qml, ret)\n\n def circuit(weights):\n RandomLayers(weights, wires=range(n_wires))\n return ret_type(qml.PauliZ(wires=0) @ qml.PauliX(wires=1))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n grad_def = qml.grad(qnode_def, argnum=0)\n grad = qml.grad(qnode, argnum=0)\n\n assert np.allclose(qnode(weights), qnode_def(weights), atol=tol(dev.shots))\n assert np.allclose(grad(weights), grad_def(weights), atol=tol(dev.shots))",
"def test_normal_circuit(self):\n filename = self._get_resource_path('test_normal.tex')\n qc = QuantumCircuit(5)\n for qubit in range(5):\n qc.h(qubit)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')",
"def test_diagonal_gate_wrapper(self):\n shots = 100\n lsts = [\n [1, -1],\n [1, -1, -1, 1],\n [1.0, -1.0, -1.0, 1.0]]\n circuits = [ ref_diagonal_gate.diagonal_gate_circuits_deterministic_w(state)\n for state in [ np.array(lst, dtype=t) \n for t in (None, float, np.float32, complex, np.complex64)\n for lst in lsts ] ]\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))",
"def test_medium_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('medium')",
"def test_full_graph(self):\r\n x, y, z = tensor.vectors('x', 'y', 'z')\r\n t = x * y\r\n self.check([\r\n (x * 2, x * 2, (({}, True), )),\r\n (x * 2, y * 2, (({}, False), ({y: x}, True), )),\r\n (x * 2, y * 2, (({}, False), ({x: y}, True), )),\r\n (x * 2, y * 3, (({}, False), ({y: x}, False), )),\r\n (t * 2, z * 2, (({}, False), ({t: z}, True), )),\r\n (t * 2, z * 2, (({}, False), ({z: t}, True), )),\r\n (x * (y * z), (x * y) * z, (({}, False), )),\r\n ])",
"def test_drawing_torch():\r\n torch = pytest.importorskip(\"torch\")\r\n\r\n x = torch.tensor(0.1, requires_grad=True)\r\n y = torch.tensor([0.2, 0.3], requires_grad=True)\r\n z = torch.tensor(0.4, requires_grad=True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"torch\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected",
"def test_interactive(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.interactive_draw_methods:\n with self.subTest('Test interactive regime for {} output'.format(draw_method),\n draw_method=draw_method):\n\n # Patch corresponding circuit_drawer such that it returns an instance of Image\n with patch.object(_cv, self.draw_methods[draw_method], return_value=Image()) as _:\n\n # Patch show attribute of Image such that it does nothing\n with patch.object(Image, 'show', return_value=None) as mock_show:\n\n # Check that show was called once with the correct arguments\n circuit_drawer(None, output=draw_method, interactive=True)\n mock_show.assert_called_once_with()",
"def test_diagonal_gate(self):\n shots = 100\n circuits = ref_diagonal_gate.diagonal_gate_circuits_deterministic(\n final_measure=True)\n targets = ref_diagonal_gate.diagonal_gate_counts_deterministic(\n shots)\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))\n self.compare_counts(result, circuits, targets, delta=0)",
"def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)",
"def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def test_inner_product(self):\n circuit = InnerProduct(n_qubits=3)\n expected = QuantumCircuit(*circuit.qregs)\n expected.cz(0, 3)\n expected.cz(1, 4)\n expected.cz(2, 5)\n self.assertEqual(circuit, expected)",
"def test_execute_non_gates(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make different devices\n [SW1_ID, SW2_ID, SW3_ID, CL_ID, D_ID] = names.lookup([\"Sw1\", \"Sw2\", \"Sw3\",\n \"Clock1\", \"D1\"])\n devices.make_device(SW1_ID, devices.SWITCH, 1)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n devices.make_device(CL_ID, devices.CLOCK, 1)\n devices.make_device(D_ID, devices.D_TYPE)\n\n # Make connections\n network.make_connection(SW1_ID, None, D_ID, devices.DATA_ID)\n network.make_connection(CL_ID, None, D_ID, devices.CLK_ID)\n network.make_connection(SW2_ID, None, D_ID, devices.SET_ID)\n network.make_connection(SW3_ID, None, D_ID, devices.CLEAR_ID)\n\n # Get device outputs, the expression is in a string here so that it\n # can be re-evaluated again after executing devices\n sw1_output = \"network.get_output_signal(SW1_ID, None)\"\n sw2_output = \"network.get_output_signal(SW2_ID, None)\"\n sw3_output = \"network.get_output_signal(SW3_ID, None)\"\n clock_output = \"network.get_output_signal(CL_ID, None)\"\n dtype_Q = \"network.get_output_signal(D_ID, devices.Q_ID)\"\n dtype_QBAR = \"network.get_output_signal(D_ID, devices.QBAR_ID)\"\n\n # Execute devices until the clock is LOW at the start of its\n # period\n clock_device = devices.get_device(CL_ID)\n network.execute_network()\n while clock_device.clock_counter != 1 or eval(clock_output) != LOW:\n network.execute_network()\n\n # The clock is not rising yet, Q could be (randomly) HIGH or LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output)] == [HIGH, LOW, LOW, LOW]\n\n assert eval(dtype_Q) in [HIGH, LOW]\n assert eval(dtype_QBAR) == network.invert_signal(eval(dtype_Q))\n\n network.execute_network() # the clock has risen\n # While sw1(DATA) is high, Q has now changed to HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, LOW) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, HIGH) # Sw2 is connected to SET\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is LOW, and the clock is rising,\n # sw2(SET) is HIGH, so Q is HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n LOW, HIGH, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, HIGH) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, LOW) # Sw2 is connected to SET\n devices.set_switch(SW3_ID, HIGH) # Sw3 is connected to CLEAR\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is HIGH, and the clock is rising,\n # sw3(CLEAR) is HIGH, so Q is LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, HIGH, HIGH, LOW, HIGH]",
"def test_1(self):\n graph = build_graph_with_attrs(nodes_with_attrs=self.nodes,\n edges_with_attrs=self.edges)\n tested_pass = AddIsCyclicAttribute()\n tested_pass.find_and_replace_pattern(graph)\n\n assert graph.graph['is_cyclic'] is False",
"def test_cycle(self):\n g = Graph(3)\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n # g.add_edge(0, 0)\n assert g.contains_cycle() is False\n g.add_edge(1, 2)\n assert g.contains_cycle() is True",
"def test_correct_output_provided(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.draw_methods:\n with self.subTest('Test calling of the {} draw method'.format(draw_method),\n draw_method=draw_method):\n\n # Patch function corresponding to the current circuit drawer such that\n # it does nothing\n with patch.object(_cv, self.draw_methods[draw_method], return_value=None)\\\n as mock_draw_method:\n\n # Check that corresponding function was called once with the correct arguments\n circuit_drawer(None, output=draw_method)\n mock_draw_method.assert_called_once_with(None, **self.calls[draw_method])",
"def testDipoleEdge(self):\n\n sources = DipoleFitTaskTest.runDetection(self)\n\n for i, r1 in enumerate(sources):\n result = r1.extract(\"ip_diffim_DipoleFit*\")\n self.assertTrue(result.get(\"ip_diffim_DipoleFit_flag\"))",
"def test_single_quadrant(self):",
"def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )",
"def test_run_circuit_oracle(self):\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n list_good_state = [\"11\"]\n grover = Grover(oracle=oracle, good_state=list_good_state)\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], list_good_state)",
"def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)",
"def test_unitary_gate_real(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_real_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_real_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_circuit_decompose(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=47)\n dmat, circ2cx = dec(u4)\n\n qc1 = QuantumCircuit(2)\n qc1.append(UnitaryGate(u4), range(2))\n\n qc2 = QuantumCircuit(2)\n qc2.compose(circ2cx, range(2), front=False, inplace=True)\n qc2.append(UnitaryGate(dmat), range(2))\n\n self.assertEqual(Operator(u4), Operator(qc1))\n self.assertEqual(Operator(qc1), Operator(qc2))",
"def test_make_pathways(self):\n basic_test_runner(self, 'pathways')",
"def test_create_circuit_noname(self):\n qr = QuantumRegister(size=3)\n cr = ClassicalRegister(size=3)\n qc = QuantumCircuit(qr, cr)\n self.assertIsInstance(qc, QuantumCircuit)"
]
| [
"0.79749006",
"0.60347056",
"0.60028684",
"0.5929052",
"0.5861189",
"0.58604586",
"0.58148104",
"0.5752517",
"0.5734806",
"0.5658191",
"0.5636587",
"0.56252784",
"0.5622533",
"0.56093955",
"0.5568864",
"0.55531883",
"0.5541974",
"0.5538976",
"0.5533178",
"0.5518426",
"0.55044043",
"0.54966563",
"0.5475336",
"0.5463039",
"0.54572135",
"0.54559",
"0.5451983",
"0.5451264",
"0.54392684",
"0.5430438"
]
| 0.7030379 | 1 |
Test draw huge circuit. | def test_huge_circuit(self):
filename = self._get_resource_path('test_huge.tex')
qc = QuantumCircuit(40)
for qubit in range(39):
qc.h(qubit)
qc.cx(qubit, 39)
circuit_drawer(qc, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')",
"def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')",
"def test_deep_circuit(self):\n filename = self._get_resource_path('test_deep.tex')\n qc = QuantumCircuit(1)\n for _ in range(100):\n qc.h(0)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_medium_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('medium')",
"def test_normal_circuit(self):\n filename = self._get_resource_path('test_normal.tex')\n qc = QuantumCircuit(5)\n for qubit in range(5):\n qc.h(qubit)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_tiny_circuit(self):\n filename = self._get_resource_path('test_tiny.tex')\n qc = QuantumCircuit(1)\n qc.h(0)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_deep_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('deep')",
"def _large_circuit():\n qr = QuantumRegister(9, name='qr')\n cr = ClassicalRegister(9, name='cr')\n circuit = QuantumCircuit(qr, cr)\n\n for i in range(3):\n zero = 3 * i\n first = 3 * i + 1\n second = 3 * i + 2\n\n circuit.x(qr[zero])\n circuit.y(qr[first])\n circuit.z(qr[second])\n\n circuit.h(qr[zero])\n circuit.s(qr[first])\n circuit.sdg(qr[second])\n\n circuit.t(qr[zero])\n circuit.tdg(qr[first])\n circuit.iden(qr[second])\n\n circuit.reset(qr[zero])\n circuit.reset(qr[first])\n circuit.reset(qr[second])\n\n circuit.rx(pi / 8, qr[zero])\n circuit.ry(pi / 8, qr[first])\n circuit.rz(pi / 8, qr[second])\n\n circuit.u1(pi / 8, qr[zero])\n circuit.u2(pi / 8, pi / 8, qr[first])\n circuit.u3(pi / 8, pi / 8, pi / 8, qr[second])\n\n circuit.swap(qr[zero], qr[first])\n\n circuit.cx(qr[zero], qr[first])\n circuit.cy(qr[first], qr[second])\n circuit.cz(qr[second], qr[zero])\n circuit.ch(qr[zero], qr[first])\n\n circuit.cu1(pi / 8, qr[zero], qr[first])\n circuit.cu3(pi / 8, pi / 8, pi / 8, qr[first], qr[second])\n\n circuit.barrier(qr)\n\n circuit.measure(qr, cr)\n\n return circuit",
"def test_24():\n n = 5\n q = QuantumRegister(n, 'q')\n circ = QuantumCircuit(q)\n for i in range(4):\n circ.h(q[i])\n circ.ch(q[3], q[4])\n circ_m = measure(circ, q, [i for i in range(n)])\n counts = launch(2048, circ_m)\n print(counts, len(counts))",
"def _small_circuit():\n qr = QuantumRegister(1, name='qr')\n cr = ClassicalRegister(1, name='cr')\n circuit = QuantumCircuit(qr, cr)\n\n circuit.x(qr[0])\n circuit.barrier(qr[0])\n circuit.measure(qr, cr)\n\n return circuit",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def test_num_circs_shots(self):\n backend = FakeValencia()\n generator = Generator(backend)\n max_experiments = 5\n max_shots = 10\n backend._configuration.max_experiments = max_experiments\n backend._configuration.max_shots = max_shots\n sub_tests = [1, 3*max_shots, 3*max_shots+1, 3*max_shots-1,\n 3*max_shots*2, 3*max_shots*2+1, 3*max_shots*max_experiments-1]\n for num_raw_bits in sub_tests:\n with self.subTest(num_raw_bits=num_raw_bits):\n result = generator.sample(num_raw_bits=num_raw_bits).block_until_ready()\n self.assertGreaterEqual(len(result.raw_bits), num_raw_bits)",
"def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)",
"def testDrawEdge(self):\n (w,h) = self.im8_1.getSize()\n \n for thick in range(10):\n self.im8_1.reset()\n drawEdge(self.im8_1, thick)\n self.im8_3.fill(255)\n drawSquare(self.im8_3, (thick, thick, w-1-thick, h-1-thick), 0)\n (x,y) = compare(self.im8_1, self.im8_3, self.im8_2)\n self.assertTrue(x<0)",
"def main():\n run_test_draw_upside_down_wall()",
"def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)",
"def test_unitary_gate_real(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_real_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_real_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_draw_two(self):\n f = txn_oracle.draw_two\n\n for _ in range(1000):\n max_n = random.randint(4, 20)\n i, j = f(max_n)\n assert i != j",
"def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )",
"def test_create_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=3)\n cr = q_program.create_classical_register(size=3)\n qc = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())",
"def test_add_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=2)\n cr = q_program.create_classical_register(size=2)\n qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc1.h(qr[0])\n qc1.measure(qr[0], cr[0])\n qc2.measure(qr[1], cr[1])\n new_circuit = qc1 + qc2\n q_program.add_circuit(quantum_circuit=new_circuit)\n backend = 'local_qasm_simulator_py' # cpp simulator rejects non string IDs (FIXME)\n shots = 1024\n result = q_program.execute(backend=backend, shots=shots, seed=78)\n counts = result.get_counts(new_circuit.name)\n target = {'00': shots / 2, '01': shots / 2}\n threshold = 0.04 * shots\n self.assertDictAlmostEqual(counts, target, threshold)\n self.assertRaises(QISKitError, result.get_counts)",
"def test_create_circuit_noname(self):\n qr = QuantumRegister(size=3)\n cr = ClassicalRegister(size=3)\n qc = QuantumCircuit(qr, cr)\n self.assertIsInstance(qc, QuantumCircuit)",
"def test_unitary_gate_complex(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_complex_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_complex_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Chi(circuit)\n target = Chi(target)\n self.assertEqual(op, target)",
"def test_drawWire(self):\n\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L',(640,480),'white')\n\n drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))\n drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))\n drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))\n drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))\n drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)",
"def test_run_circuit_oracle(self):\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n list_good_state = [\"11\"]\n grover = Grover(oracle=oracle, good_state=list_good_state)\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], list_good_state)",
"def check_circuit_type(self, circuit_type):\n # Obtain paths to directory where produced and reference outputs are to be stored\n # correspondingly\n test_output_dir, references_dir = self._prepare_dirs('{}'.format(circuit_type))\n\n for draw_method in self.draw_methods:\n # Create a subTest for each underlying circuit drawer\n with self.subTest('Test of drawing a {} circuit'\n ' with `{}` output format'.format(circuit_type, draw_method),\n draw_method=draw_method):\n # Obtain path to files with produced and reference outputs correspondingly\n test_output = os.path.join(test_output_dir, draw_method)\n reference_output = os.path.join(references_dir, draw_method)\n\n try:\n # Make underlying circuit drawer to draw chosen circuit\n circuit_drawer(self.circuits[circuit_type](),\n output=draw_method,\n filename=test_output)\n\n # Check if produced output equals the reference one\n self.assertOutputsAreEqual(draw_method,\n test_output + self.extensions.get(draw_method,\n ''),\n reference_output + self.extensions.get(\n draw_method, ''))\n\n # If `pfdlatex` is not installed, well, there is no sense in testing it\n except OSError:\n pass",
"def test_bw40_auto(self, setUp):\n\n self.common_channel(band='40', channel='0')",
"def _medium_circuit():\n\n qr = QuantumRegister(3, name='qr')\n cr = ClassicalRegister(3, name='cr')\n circuit = QuantumCircuit(qr, cr)\n\n circuit.x(qr[0])\n circuit.y(qr[1])\n circuit.z(qr[2])\n\n circuit.h(qr[0])\n circuit.s(qr[1])\n circuit.sdg(qr[2])\n\n circuit.t(qr[0])\n circuit.tdg(qr[1])\n circuit.iden(qr[2])\n\n circuit.reset(qr[0])\n circuit.reset(qr[1])\n circuit.reset(qr[2])\n\n circuit.rx(pi / 8, qr[0])\n circuit.ry(pi / 8, qr[1])\n circuit.rz(pi / 8, qr[2])\n\n circuit.u1(pi / 8, qr[0])\n circuit.u2(pi / 8, pi / 8, qr[1])\n circuit.u3(pi / 8, pi / 8, pi / 8, qr[2])\n\n circuit.swap(qr[0], qr[1])\n\n circuit.cx(qr[0], qr[1])\n circuit.cy(qr[1], qr[2])\n circuit.cz(qr[2], qr[0])\n circuit.ch(qr[0], qr[1])\n\n circuit.cu1(pi / 8, qr[0], qr[1])\n circuit.cu3(pi / 8, pi / 8, pi / 8, qr[1], qr[2])\n\n circuit.barrier(qr)\n\n circuit.measure(qr, cr)\n\n return circuit"
]
| [
"0.7120328",
"0.6945879",
"0.6453144",
"0.634923",
"0.6328613",
"0.6269708",
"0.61272216",
"0.6119357",
"0.61191165",
"0.60788524",
"0.6074225",
"0.594739",
"0.59119767",
"0.58624464",
"0.5825523",
"0.5784322",
"0.57635987",
"0.5749336",
"0.5729748",
"0.571905",
"0.571555",
"0.5711093",
"0.56812793",
"0.567582",
"0.56528074",
"0.5638838",
"0.5632418",
"0.56066895",
"0.55868965",
"0.5582176"
]
| 0.7038887 | 1 |
Test draw teleport circuit. | def test_teleport(self):
from qiskit.circuit.library import U3Gate
filename = self._get_resource_path('test_teleport.tex')
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
qc = QuantumCircuit(qr, cr)
# Prepare an initial state
qc.append(U3Gate(0.3, 0.2, 0.1), [qr[0]])
# Prepare a Bell pair
qc.h(qr[1])
qc.cx(qr[1], qr[2])
# Barrier following state preparation
qc.barrier(qr)
# Measure in the Bell basis
qc.cx(qr[0], qr[1])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
# Apply a correction
qc.z(qr[2]).c_if(cr, 1)
qc.x(qr[2]).c_if(cr, 2)
qc.measure(qr[2], cr[2])
circuit_drawer(qc, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_teleport(self):\n self.log.info('test_teleport')\n pi = np.pi\n shots = 2000\n qr = QuantumRegister(3, 'qr')\n cr0 = ClassicalRegister(1, 'cr0')\n cr1 = ClassicalRegister(1, 'cr1')\n cr2 = ClassicalRegister(1, 'cr2')\n circuit = QuantumCircuit(qr, cr0, cr1, cr2, name='teleport')\n circuit.h(qr[1])\n circuit.cx(qr[1], qr[2])\n circuit.ry(pi/4, qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.h(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr[0], cr0[0])\n circuit.measure(qr[1], cr1[0])\n circuit.z(qr[2]).c_if(cr0, 1)\n circuit.x(qr[2]).c_if(cr1, 1)\n circuit.measure(qr[2], cr2[0])\n job = execute(circuit, backend=self.backend, shots=shots, seed_simulator=self.seed)\n results = job.result()\n data = results.get_counts('teleport')\n alice = {\n '00': data['0 0 0'] + data['1 0 0'],\n '01': data['0 1 0'] + data['1 1 0'],\n '10': data['0 0 1'] + data['1 0 1'],\n '11': data['0 1 1'] + data['1 1 1']\n }\n bob = {\n '0': data['0 0 0'] + data['0 1 0'] + data['0 0 1'] + data['0 1 1'],\n '1': data['1 0 0'] + data['1 1 0'] + data['1 0 1'] + data['1 1 1']\n }\n self.log.info('test_teleport: circuit:')\n self.log.info(circuit.qasm())\n self.log.info('test_teleport: data %s', data)\n self.log.info('test_teleport: alice %s', alice)\n self.log.info('test_teleport: bob %s', bob)\n alice_ratio = 1/np.tan(pi/8)**2\n bob_ratio = bob['0']/float(bob['1'])\n error = abs(alice_ratio - bob_ratio) / alice_ratio\n self.log.info('test_teleport: relative error = %s', error)\n self.assertLess(error, 0.05)",
"def main():\n run_test_draw_upside_down_wall()",
"def test_make_pathways(self):\n basic_test_runner(self, 'pathways')",
"def test_win(self):\n self.T.board[0] = ['x']*3\n assert self.T.tic_tac_toe(self.T.board)",
"def testDraw():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n standings = playerStandings()\n [id1, id2, id3, id4] = [row[0] for row in standings]\n reportMatch(id1, id2, True)\n reportMatch(id3, id4, True)\n standings = playerStandings()\n if not (standings[0][2]==standings[1][2]==standings[2][2]==standings[3][2]):\n raise ValueError(\n \"Players should have the same number of points after drawing\"\n )\n\n print \"3. Draw is recorded properly.\"",
"def test_drive(self):\n global ENV, TRAFFIC_LIGHT\n ENV = simpy.Environment()\n TRAFFIC_LIGHT = TrafficLight()\n bus = Bus(nr=0)\n ENV.process(bus.drive())\n ENV.run()\n self.assertEqual(bus.movement.to_pos, 600)",
"def test_turn(self):\n Action = SimObject.Action # shortcut\n actions = [\n Action.MOVE, Action.TURN_RIGHT, Action.MOVE, Action.TURN_LEFT]\n map_lines = [\" \", \" T\"]\n _MapContainer.MAP = \"\\n\".join(map_lines)\n configuration = {\n \"map\": _MapContainer,\n \"parameters\": {(1, 1): ([Direction.NORTH, actions], {})},\n \"steps_limiter_steps\": 4\n }\n sim = RecorderSimulator(configuration, {})\n sim.run()\n self.assertEqual(\n sim.maps,\n [map_lines, [\" \", \" T\"], [\" \", \"T \"], [\" \", \"T \"],\n [\"T \", \" \"]])",
"def test_correct_output_provided(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.draw_methods:\n with self.subTest('Test calling of the {} draw method'.format(draw_method),\n draw_method=draw_method):\n\n # Patch function corresponding to the current circuit drawer such that\n # it does nothing\n with patch.object(_cv, self.draw_methods[draw_method], return_value=None)\\\n as mock_draw_method:\n\n # Check that corresponding function was called once with the correct arguments\n circuit_drawer(None, output=draw_method)\n mock_draw_method.assert_called_once_with(None, **self.calls[draw_method])",
"def test_draw(self):\n Stationery = m.Stationery('yo')\n\n self.assertEqual(Stationery.draw(), 'Starting to draw')",
"def test_via(num_vias = 100, wire_width = 10, via_width = 15,\n via_spacing = 40, pad_size = (300, 300), min_pad_spacing = 0,\n pad_layer = 0, wiring1_layer = 1, wiring2_layer = 2,\n via_layer = 3):\n VR = Device('test_via')\n pad1 = VR.add_ref(rectangle(size = pad_size, layer = pad_layer))\n pad1_overlay = VR.add_ref(rectangle(size = pad_size,\n layer = wiring1_layer))\n pad2 = VR.add_ref(rectangle(size = pad_size, layer = pad_layer))\n pad2_overlay = VR.add_ref(rectangle(size = pad_size,\n layer = wiring1_layer))\n nub = VR.add_ref(compass(size = (3*wire_width,wire_width),\n layer = pad_layer))\n nub_overlay = VR.add_ref(compass(size = (3*wire_width, wire_width),\n layer = wiring1_layer))\n head = VR.add_ref(compass(size = (wire_width, wire_width),\n layer = pad_layer))\n head_overlay = VR.add_ref(compass(size = (wire_width, wire_width),\n layer = wiring1_layer))\n nub.ymax = pad1.ymax - 5\n nub.xmin = pad1.xmax\n nub_overlay.ymax = pad1.ymax - 5\n nub_overlay.xmin = pad1.xmax\n head.connect(port = \"W\", destination = nub.ports[\"E\"])\n head_overlay.connect(port = \"W\", destination = nub_overlay.ports[\"E\"])\n pad1_overlay.xmin = pad1.xmin\n pad1_overlay.ymin = pad1.ymin\n\n old_port = head.ports['S']\n count = 0\n width_via_iter = 2*via_spacing - 2*wire_width\n\n pad2.xmin = pad1.xmax + min_pad_spacing\n up = False\n down = True\n edge = True\n current_width = 3*wire_width + wire_width #width of nub and 1 overlap\n obj_old = head\n obj = head\n via_iterable = _via_iterable(via_spacing, wire_width, wiring1_layer,\n wiring2_layer, via_layer, via_width)\n while((count + 2) <= num_vias):\n obj = VR.add_ref(via_iterable)\n obj.connect(port = 'W', destination = old_port, overlap = wire_width)\n old_port = obj.ports['E']\n edge = False\n if(obj.ymax > pad1.ymax):\n obj.connect(port = 'W', destination = obj_old.ports['S'],\n overlap = wire_width)\n old_port = obj.ports['S']\n current_width += width_via_iter\n down = True\n up = False\n edge = True\n\n elif(obj.ymin < pad1.ymin):\n obj.connect(port = 'W', destination = obj_old.ports['N'],\n overlap = wire_width)\n old_port = obj.ports['N']\n current_width += width_via_iter\n up = True\n down = False\n edge = True\n count = count + 2\n obj_old = obj\n\n if(current_width < min_pad_spacing and (min_pad_spacing - current_width) > 3*wire_width):\n tail = VR.add_ref(\n compass(size = (min_pad_spacing-current_width+wire_width,\n wire_width),\n layer = wiring1_layer)\n )\n tail_overlay = VR.add_ref(\n compass(size = (min_pad_spacing-current_width+wire_width,\n wire_width),\n layer = pad_layer)\n )\n else:\n tail = VR.add_ref(compass(size = (3*wire_width, wire_width),\n layer = wiring1_layer))\n tail_overlay = VR.add_ref(compass(size = (3*wire_width, wire_width),\n layer = wiring1_layer))\n\n if(up == True and edge != True):\n tail.connect(port = 'W', destination = obj.ports['S'],\n overlap = wire_width)\n tail_overlay.connect(port = 'W', destination = obj.ports['S'],\n overlap = wire_width)\n elif(down == True and edge != True):\n tail.connect(port = 'W', destination = obj.ports['N'],\n overlap = wire_width)\n tail_overlay.connect(port = 'W', destination = obj.ports['N'],\n overlap = wire_width)\n else:\n tail.connect(port = 'W', destination = obj.ports['E'],\n overlap = wire_width)\n tail_overlay.connect(port = 'W', destination = obj.ports['E'],\n overlap = wire_width)\n\n pad2.xmin = tail.xmax\n pad2_overlay.xmin = pad2.xmin\n pad2_overlay.ymin = pad2.ymin\n\n return VR",
"def test_interactive(self):\n # Create a subTest for current circuit drawer\n for draw_method in self.interactive_draw_methods:\n with self.subTest('Test interactive regime for {} output'.format(draw_method),\n draw_method=draw_method):\n\n # Patch corresponding circuit_drawer such that it returns an instance of Image\n with patch.object(_cv, self.draw_methods[draw_method], return_value=Image()) as _:\n\n # Patch show attribute of Image such that it does nothing\n with patch.object(Image, 'show', return_value=None) as mock_show:\n\n # Check that show was called once with the correct arguments\n circuit_drawer(None, output=draw_method, interactive=True)\n mock_show.assert_called_once_with()",
"def chain_test():\n print(f\"Running {__file__}::{chain_test.__name__}()\")\n con = Connection(State())\n arm_ctrl = BasicController(con)\n force_calib_ctrl = EMAForceCalibrator(arm_ctrl)\n touch_ctrl = TouchController(force_calib_ctrl)\n\n cmd = Command().make(kind =UR_CMD_KIND_MOVE_TOOL_POSE, target=Tool(0.1,0.1,0.1,0,0,0), force_low_bound=Tool(-1,-1,-1,-1,-1,-1), force_high_bound=Tool(1,1,1,1,1,1))\n state = State()\n touch_ctrl.execute(cmd, state)\n print(\"Passed.\")",
"def test_policer_handoff_output(self):\n self.policer_handoff_test(Dir.TX)",
"def test_draw_poly():\n\n for i in range(7):\n bob = turtle.Turtle()\n draw_poly(bob, i + 3, 100)",
"def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )",
"def test_connect(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not None)\n self.assertEquals(self.box1, cinfo.connected)\n self.assertTrue(cinfo.port is self.box1.ports()[0],\n 'port %s' % cinfo.port)\n self.assertTrue(isinstance(cinfo.constraint, LineConstraint))\n # No default callback defined:\n self.assertTrue(cinfo.callback is None)\n\n line, head = self._get_line()\n self.tool.connect(line, head, (90, 50))\n cinfo2 = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not cinfo2, cinfo2)\n self.assertTrue(cinfo2 is None, cinfo2)",
"def testPointSystem():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n registerPlayer(\"MewTwo\")\n standings = playerStandings()\n [id1, id2, id3, id4, id5] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id4, True)\n reportMatch(id5, id5, False, True)\n reportMatch(id1, id5)\n reportMatch(id3, id4)\n reportMatch(id2, id2, False, True)\n reportMatch(id1, id3)\n reportMatch(id5, id2)\n reportMatch(id4, id4, False, True)\n standings = playerStandings()\n if not (standings[0][0]==id2 and standings[0][2]==2 and\n standings[1][0]==id4 and standings[0][2]==2 and\n standings[2][0]==id3 and standings[0][2]==2 and\n standings[3][0]==id5 and standings[0][2]==2 and\n standings[4][0]==id1 and standings[0][2]==2):\n raise ValueError(\n \"Points are not tallied correctly.\"\n )\n\n print \"4. Points are tallied correctly.\"",
"def test_draw(self):\n pencil = m.Pencil('yo')\n\n self.assertEqual(pencil.draw(), 'Drawing with a pencil')",
"def test_drawWire(self):\n\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L',(640,480),'white')\n\n drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))\n drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))\n drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))\n drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))\n drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)",
"def test_drawing_torch():\r\n torch = pytest.importorskip(\"torch\")\r\n\r\n x = torch.tensor(0.1, requires_grad=True)\r\n y = torch.tensor([0.2, 0.3], requires_grad=True)\r\n z = torch.tensor(0.4, requires_grad=True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"torch\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected",
"def test_robot(r, c):\n return r['x'] == c or r['y'] == c\n \n \n \n \n \n \n \n \n \n # Make sure tests run when this module is run",
"def test_oscillating_network(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [NOR1, I1] = names.lookup([\"Nor1\", \"I1\"])\n # Make NOR gate\n devices.make_device(NOR1, devices.NOR, 1)\n\n # Connect the NOR gate to itself\n network.make_connection(NOR1, None, NOR1, I1)\n\n assert not network.execute_network()",
"def test_01_lighting(self):",
"def test_planning():\n\n joints1 = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n joints2 = [4.80, 2.92, 1.00, 4.20, 1.45, 1.32]\n\n\n path_planner = PathPlanner(\"manipulator\")\n\n print path_planner.group.get_end_effector_link()\n\n while True:\n raw_input(\"Press Enter to move to position 1\")\n plan = path_planner.plan_to_config(joints1)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)\n\n raw_input(\"Press Enter to move to position 2\")\n plan = path_planner.plan_to_config(joints2)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)",
"def test(simulation=False):\n\tsimulation = False\n\tif simulation:\n\t\tdyn.enable_vrep()\n\t\n\tctrl = init_ctrl()\n\n\tif simulation:\n\t\tpeter = SymbiotSpidey(ctrl)\n\telse:\n\t\tpeter = Spidey(ctrl)\n\n\tif simulation:\n\t\tctrl.start_sim()\n\n\tpeter.compliant = False\n\tprint peter.legs_references\n\n\tleg = peter.legs[0]\n\tpos = leg.position()\n\tpos = Vector3D(pos.x+6, pos.y, pos.z)\n\tleg.move(pos)\n\tctrl.wait(200)\n\tprint pos.x, leg.position().x, pos.x == leg.position().x\n\n\tpeter.compliant = True\n\n\tif simulation:\n\t\tctrl.stop_sim()",
"def test_change_color_of_the_device__true():",
"def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))",
"def test_actionWithTargetInAdjacentDarkRoom(self):\n self.otherRoom = objects.Thing(store=self.store, name=u'Elsewhere')\n objects.Container.createFor(self.otherRoom, capacity=1000)\n objects.Exit.link(self.location, self.otherRoom, u'west')\n self.player.moveTo(self.otherRoom)\n self.observer.moveTo(self.otherRoom)\n self.assertCommandOutput(\n \"wear pants\",\n [commandutils.E(u\"Who's that?\")],\n [])",
"def draw():",
"def test_change_color_of_the_device__false():"
]
| [
"0.70418996",
"0.59007996",
"0.5837641",
"0.5707822",
"0.5655482",
"0.5596955",
"0.55728096",
"0.5569884",
"0.55201656",
"0.5503052",
"0.5479844",
"0.5465956",
"0.544724",
"0.5422527",
"0.54156864",
"0.54148895",
"0.5414744",
"0.540316",
"0.53964335",
"0.53924996",
"0.53898317",
"0.53886056",
"0.5361735",
"0.53597456",
"0.5347636",
"0.5317931",
"0.5297236",
"0.5289812",
"0.5288971",
"0.5288066"
]
| 0.66358155 | 1 |
Test circuit with global phase | def test_global_phase(self):
filename = self._get_resource_path('test_global_phase.tex')
circuit = QuantumCircuit(3, global_phase=1.57079632679)
circuit.h(range(3))
circuit_drawer(circuit, filename=filename, output='latex_source')
self.assertEqualToReference(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = Chi(circuit)\n target = Chi(target)\n self.assertEqual(op, target)",
"def test_run_circuit_oracle(self):\n oracle = QuantumCircuit(2)\n oracle.cz(0, 1)\n list_good_state = [\"11\"]\n grover = Grover(oracle=oracle, good_state=list_good_state)\n ret = grover.run(self._qasm)\n self.assertIn(ret['top_measurement'], list_good_state)",
"def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]",
"def test_policer_handoff_output(self):\n self.policer_handoff_test(Dir.TX)",
"def circuitSat(C):",
"def test_teleport(self):\n self.log.info('test_teleport')\n pi = np.pi\n shots = 2000\n qr = QuantumRegister(3, 'qr')\n cr0 = ClassicalRegister(1, 'cr0')\n cr1 = ClassicalRegister(1, 'cr1')\n cr2 = ClassicalRegister(1, 'cr2')\n circuit = QuantumCircuit(qr, cr0, cr1, cr2, name='teleport')\n circuit.h(qr[1])\n circuit.cx(qr[1], qr[2])\n circuit.ry(pi/4, qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.h(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr[0], cr0[0])\n circuit.measure(qr[1], cr1[0])\n circuit.z(qr[2]).c_if(cr0, 1)\n circuit.x(qr[2]).c_if(cr1, 1)\n circuit.measure(qr[2], cr2[0])\n job = execute(circuit, backend=self.backend, shots=shots, seed_simulator=self.seed)\n results = job.result()\n data = results.get_counts('teleport')\n alice = {\n '00': data['0 0 0'] + data['1 0 0'],\n '01': data['0 1 0'] + data['1 1 0'],\n '10': data['0 0 1'] + data['1 0 1'],\n '11': data['0 1 1'] + data['1 1 1']\n }\n bob = {\n '0': data['0 0 0'] + data['0 1 0'] + data['0 0 1'] + data['0 1 1'],\n '1': data['1 0 0'] + data['1 1 0'] + data['1 0 1'] + data['1 1 1']\n }\n self.log.info('test_teleport: circuit:')\n self.log.info(circuit.qasm())\n self.log.info('test_teleport: data %s', data)\n self.log.info('test_teleport: alice %s', alice)\n self.log.info('test_teleport: bob %s', bob)\n alice_ratio = 1/np.tan(pi/8)**2\n bob_ratio = bob['0']/float(bob['1'])\n error = abs(alice_ratio - bob_ratio) / alice_ratio\n self.log.info('test_teleport: relative error = %s', error)\n self.assertLess(error, 0.05)",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def test_new():\n from qiskit import BasicAer\n from qiskit.aqua.algorithms import Grover\n from qiskit.aqua.components.oracles import LogicalExpressionOracle\n\n expr = \"your logical expression goes here\"\n algorithm = Grover(LogicalExpressionOracle(expr))\n backend = BasicAer.get_backend('qasm_simulator')\n result = algorithm.run(backend, seed=101110)\n print(result)",
"def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)",
"def test_policer_handoff_input(self):\n self.policer_handoff_test(Dir.RX)",
"def test_unitary_gate_real(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_real_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_real_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_circuit(circuit, n):\n a = circuit_to_state(ABPWrapper, n, circuit)\n b = circuit_to_state(AndersWrapper, n, circuit)\n assert a == b",
"def test_normal_circuit(self):\n filename = self._get_resource_path('test_normal.tex')\n qc = QuantumCircuit(5)\n for qubit in range(5):\n qc.h(qubit)\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def test_simple_hadamard(self):\n N = 1\n H_d = sigmaz()\n H_c = sigmax()\n qc = QubitCircuit(N)\n qc.add_gate(\"SNOT\", 0)\n\n # test load_circuit, with verbose info\n num_tslots = 10\n evo_time = 10\n test = OptPulseProcessor(N, drift=H_d)\n test.add_control(H_c, targets=0)\n tlist, coeffs = test.load_circuit(\n qc, num_tslots=num_tslots, evo_time=evo_time, verbose=True)\n\n # test run_state\n rho0 = qubit_states(1, [0])\n plus = (qubit_states(1, [0]) + qubit_states(1, [1])).unit()\n result = test.run_state(rho0)\n assert_allclose(fidelity(result.states[-1], plus), 1, rtol=1.0e-6)",
"def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')",
"def test_circuit(self):\n num_qubits = 3\n strike_price = 0.5\n bounds = (0, 2)\n ecd = EuropeanCallDeltaObjective(\n num_state_qubits=num_qubits, strike_price=strike_price, bounds=bounds\n )\n\n # map strike_price to a basis state\n x = (strike_price - bounds[0]) / (bounds[1] - bounds[0]) * (2 ** num_qubits - 1)\n comparator = IntegerComparator(num_qubits, x)\n\n self.assertTrue(Operator(ecd).equiv(comparator))",
"def test_py_compile_condition(self):\n self._test_py_compile('coin')",
"def test_construct_subcircuit(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def circuit(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(c, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # third parameter subcircuit\r\n assert len(tapes[2].operations) == 4\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n # Phase shift generator\r\n assert isinstance(tapes[2].operations[3], qml.QubitUnitary)",
"def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an",
"def pswitchon(chan) :\n s.phaseSwitching(True, chan)",
"def test_cx_equivalence_0cx(self, seed=0):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=6)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))",
"def self_defined_noisy_circuit() -> 'QEnv':\n # Create environment\n env = QEnv()\n # Choose backend Baidu local simulator\n env.backend(BackendName.LocalBaiduSim2)\n\n # Number of qubits, no larger than 20 \n num_qubit = 13\n # Number of gates in each for loop\n gate_num = 3 # Depth of circuit = num_qubit * gate_num\n\n assert num_qubit > 2\n assert gate_num > 2\n\n # Initialize a QCompute circuit\n q = env.Q.createList(num_qubit)\n\n # A noisy random H + CX + RX circuit\n for i in range(num_qubit - 1):\n H(q[i])\n CX(q[i], q[i + 1])\n # Random rotation angles\n rotation_list = [random.uniform(0, 6.28) for _ in range(gate_num - 2)]\n # random quantum registers\n qreg_list = [random.randint(0, num_qubit - 1) for _ in range(gate_num - 2)]\n for i in range(gate_num - 2):\n RX(rotation_list[i])(q[qreg_list[i]])\n\n # Measure with the computational basis\n MeasureZ(*env.Q.toListPair())\n\n # Define noise instances \n # Define a Bit Flip noise instance\n bfobj = BitFlip(0.1)\n # Define a 2-qubit Depolarizing noise instance\n dpobj = Depolarizing(2, 0.1)\n\n # Add noises\n env.noise(['H', 'RX'], [bfobj])\n env.noise(['CX'], [dpobj])\n\n return env",
"def tests_truth():\n circ_m = ccxtest(4)\n print(circ_m)\n circ_m = crootnxtest(4)\n print(circ_m)\n circ_m = oracletest(4)\n print(circ_m)\n circ_m = ccx_otest(4)\n print(circ_m)",
"def globalphase_compiler(self, gate, args):\n pass",
"def globalphase_compiler(self, gate, args):\n pass",
"def test_unitary_gate_complex(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_complex_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_complex_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))",
"def test_is_simulating(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n decoy.when(mock_engine_client.state.config.ignore_pause).then_return(True)\n assert subject.is_simulating()",
"def test_teleport(self):\n from qiskit.circuit.library import U3Gate\n filename = self._get_resource_path('test_teleport.tex')\n qr = QuantumRegister(3, 'q')\n cr = ClassicalRegister(3, 'c')\n qc = QuantumCircuit(qr, cr)\n # Prepare an initial state\n qc.append(U3Gate(0.3, 0.2, 0.1), [qr[0]])\n # Prepare a Bell pair\n qc.h(qr[1])\n qc.cx(qr[1], qr[2])\n # Barrier following state preparation\n qc.barrier(qr)\n # Measure in the Bell basis\n qc.cx(qr[0], qr[1])\n qc.h(qr[0])\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n # Apply a correction\n qc.z(qr[2]).c_if(cr, 1)\n qc.x(qr[2]).c_if(cr, 2)\n qc.measure(qr[2], cr[2])\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)"
]
| [
"0.66650254",
"0.6191711",
"0.6108052",
"0.6046065",
"0.5985339",
"0.5931292",
"0.5901644",
"0.5862877",
"0.58613193",
"0.5844447",
"0.58427745",
"0.58308804",
"0.5826411",
"0.57839686",
"0.57804453",
"0.57281876",
"0.5726908",
"0.57202667",
"0.56702447",
"0.5621255",
"0.5610256",
"0.557839",
"0.5570927",
"0.5543693",
"0.55218613",
"0.55218613",
"0.5521464",
"0.5511216",
"0.5486556",
"0.54705185"
]
| 0.76438737 | 0 |
Register a new Activity Stream feed URL to update every interval seconds. | def register(self, url, interval=300):
param_d = {
'url': url,
'interval': interval, # seconds
}
r = self._send_request('feeds/register', param_d, http_post=False)
# Return True on success.
if 'result' in r and r['result'] == 'success':
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_rss(url):",
"def add_feed(self, url, feed):\n print \"Adding the podcast: %s\" % url\n self.t.click(\"Sidebar\")\n self.shortcut('n')\n time.sleep(2)\n type(url + \"\\n\")\n time.sleep(10) #give it 10 seconds to add and update the feed\n self.click_podcast(feed)\n time.sleep(3)",
"def for_url(self, url):\n if url is None or url == '':\n raise BadURLException('Did you forget to provide a feed URL?')\n def txn():\n feed = RegisteredFeed.get_by_key_name(url)\n if feed is None:\n u = urlparse( url )\n q = parse_qs( u.query )\n if u.scheme != 'http' or u.netloc != 'rss.netflix.com' or 'id' not in q:\n raise BadURLException('Invalid Netflix feed URL was provided')\n feed = RegisteredFeed(\n key_name = url,\n id = q['id'][0],\n feed_type = u.path,\n rand = random.random()\n )\n feed.put()\n return feed\n feed = db.run_in_transaction(txn)\n if feed.slug is None:\n feed.slug = get_slug()\n feed.put()\n return feed",
"def register(url, interval=300):\n return Client.get_client().register(url, interval=interval)",
"def live_url(self, live_url):\n\n self._live_url = live_url",
"def add_by_url(self, feed_url, name=None):\n feed_data = {\"url\": feed_url}\n if name:\n feed_data['name'] = name\n else:\n f = feedparser.parse(feed_url)\n feed_data['name'] = f.feed.title\n feed = Feed(feed_data, self)\n feed._save()\n self.feeds.append(feed)",
"def updateOneFeed(self):\n feeds = backend.Feed.query.order_by(\"check_date\").limit(1).all()\n if feeds:\n feed = feeds[0]\n print feed.check_date\n # Only check if it has not been checked in at least 10 minutes\n if (datetime.datetime.now() - feed.check_date).seconds > 600:\n print \"Scheduled update of: \",feed.xmlurl\n fetcher_in.put(['update', feed.xmlurl, feed.etag, feed.check_date])",
"def subscribe(receiver, updateInterval=10):",
"def save_new_rss_subscription_task(feed_obj):\n save_new_rss_subscription(feed_obj)\n logger.info(\"Entries for new Feed subcription\")",
"def subscribe(receiver, updateInterval=None):",
"def storeFeeds(self, url, feeds):\n for feed in feeds:\n _date = time.localtime()\n if 'published_parsed' in feed:\n _date = feed['published_parsed']\n date = datetime(_date.tm_year, _date.tm_mon, _date.tm_mday)\n doc = {\n '_id': md5_new(feed.id).hexdigest(),\n 'title': feed.title,\n 'date': date,\n 'link': feed.link,\n 'summary': feed.summary,\n 'type': url,\n 'status': 'new',\n }\n try:\n self.feedsCol.insert(doc)\n except DuplicateKeyError:\n pass",
"def watch(self, url):\n self.__url = url\n self.downtime_info = None\n self.__timer.start()",
"def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)",
"def feed_link_decorator(context, feed):\n for item in feed.items:\n current_link = item['link']\n # print(current_link)\n new_link = current_link + FUD_DEFAULT['parameters']\n item['link'] = new_link\n # print(item)\n return feed",
"def monitor_urls_task():\n monitor_urls()",
"def workAFeed(feed):\n print(\"::working \",feed)\n\n # add http\n if feed.find(\"http\") == -1:\n feed = \"http://\" + feed\n print (\"::feed=\",feed)\n\n return feed",
"def create_stream_feeder(context=None):\n return StreamFeeder(context)",
"def increase_scan_interval(hass):\n hue_sensor_base.SensorManager.SCAN_INTERVAL = datetime.timedelta(days=365)",
"def generate_feeds():\n os.makedirs(Config.FEED_ROOT_PATH, exist_ok=True)\n use_batching = Config.DAILY_DIGEST is not None\n\n while True:\n _generate_feeds_once(use_batching=use_batching)\n interval = _interval_between_generating_feeds(Config.REFRESH_INTERVAL_SECONDS, Config.DAILY_DIGEST)\n logging.info('Sleeping %ss before attempting to generate feeds again.', interval)\n time.sleep(interval)",
"def start_recording_feed(stream):\n recording_log_msg = 'recording feed of ' + stream\n formatted_date = datetime.datetime.now().strftime(\"%I:%M:%S_%B_%d_%Y\")\n return feed_connection_check(stream, formatted_date, recording_log_msg)",
"def update_activity():\n pass",
"def feed_link(self):\n return self.url.replace(\"http://\", \"feed://\")",
"def gnews(self):\n\t\tfeed_url = self.get_feed()\n\t\tfeed_data = feedparser.parse(feed_url)\n\t\tprint(\"\")\n\t\ttype_tiny = pyshorteners.Shortener()\n\t\tfor data in feed_data[\"items\"]:\n\t\t\ttiny_url = type_tiny.tinyurl.short(data[\"link\"])\n\t\t\t#tiny_url = tinyurl.create_one(data[\"link\"])\n\t\t\tprint('\\033[33m' + data[\"title\"] + \" : \" + Style.RESET_ALL + tiny_url)\n\t\t\tprint(\"\")",
"def test_url_with_streams(self):\n streams = 'recentchange'\n e = EventStreams(streams=streams)\n self.assertEqual(\n e._url, 'https://stream.wikimedia.org/v2/stream/' + streams)\n self.assertEqual(e._url, e.url)\n self.assertEqual(e._url, e.sse_kwargs.get('url'))\n self.assertIsNone(e._total)\n self.assertEqual(e._streams, streams)",
"def stream_changed(self, uri):\n pass",
"def register_url_callback(self, pattern, callback):\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n self._url_callbacks[pattern] = callback",
"def run_rss(self):\n\n pass",
"def register_auto_refresh(self):\n self._auto_refresh_task = asyncio.create_task(self._auto_refresh())",
"def stream_rss(request):\n return render_rss(\n request=request,\n annotations=_annotations(request),\n rss_url=request.route_url(\"stream_rss\"),\n html_url=request.route_url(\"stream\"),\n title=request.registry.settings.get(\"h.feed.title\") or _(\"Hypothesis Stream\"),\n description=request.registry.settings.get(\"h.feed.description\")\n or _(\"The Web. Annotated\"),\n )",
"def create_streaming_url(StackName=None, FleetName=None, UserId=None, ApplicationId=None, Validity=None, SessionContext=None):\n pass"
]
| [
"0.58947414",
"0.57712126",
"0.57435876",
"0.5610155",
"0.5598217",
"0.55441993",
"0.55131596",
"0.5490333",
"0.54477894",
"0.53591835",
"0.53310436",
"0.5251577",
"0.5234932",
"0.52232367",
"0.5207392",
"0.5129045",
"0.5116691",
"0.5088758",
"0.50820553",
"0.506711",
"0.50538003",
"0.50303274",
"0.5030225",
"0.5005958",
"0.49858803",
"0.49706644",
"0.49444515",
"0.4931813",
"0.49182707",
"0.49075356"
]
| 0.61889106 | 0 |
Remove a certain URL form the list of Activity Stream feeds registered earlier. | def unregister(self, url):
param_d = {
'url': url,
}
r = self._send_request('feeds/unregister', param_d, http_post=False)
# Return True on success.
if 'result' in r and r['result'] == 'success':
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_seen(self, url):\n self.seen.delete(url)",
"def remove(self, urls):\n path = \"authSettings/exemptedUrls?action=REMOVE_FROM_LIST\"\n return self._session.post(path, urls)",
"def remove_urls(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"http\\S+\", \"\", tweet[\"text\"])\n novos_tweets.append(texto)\n\n return novos_tweets",
"def remove_link():",
"async def remove(self, ctx: Context, url: str):\n try:\n index = int(url)\n if 0 < index <= len(self.urls):\n removed_url = self.urls[index - 1]\n del self.urls[index - 1]\n self.write_vac()\n await ctx.channel.send('Removed <{}> from checker.'.format(removed_url))\n else:\n await ctx.channel.send('{} is not a valid index.'.format(index))\n except ValueError:\n if url in self.urls:\n self.urls.remove(url)\n self.write_vac()\n await ctx.channel.send('Removed <{}> from checker.'.format(url))\n else:\n await ctx.channel.send('<{}> is not registered to checker.'.format(url))",
"def delete_feeds(self, feed_url: Optional[str] = WILDCARD, feed_title: Optional[str] = WILDCARD,\n category: Optional[str] = WILDCARD) -> int:\n feedlist = self.feedlist\n return feedlist.remove_feeds(feed_title, feed_url, category)",
"def purge_dead(self):\n if self.live == None:\n return\n\n remove = []\n\n for s in self.streams:\n if not s.remote_location in self.live:\n remove.append(s)\n\n while len(remove) > 0:\n s = remove.pop()\n del s",
"def rmFeed(feed,filepath):\n print(\"::removing feed\")\n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n\n with open(filepath,mode='w', encoding='utf-8') as feedsjson:\n entry = {'url':feed}\n print(\"::feeds=\",jconfig[1]['feeds'])\n jconfig[1]['feeds'].append(entry)\n\n print(json.dumps(jconfig,indent=2))\n json.dump(jconfig,feedsjson)",
"def getUrls(url):\n f = requests.get(url)\n p = MyParser()\n p.feed(f.text)\n list_of_urls = p.output_list\n #deal with possible strange None values\n list_of_urls = [url for url in list_of_urls if url is not None]\n for url in list_of_urls:\n if 'http' not in url: list_of_urls.remove(url)\n return list_of_urls",
"def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break",
"def erase_captured_urls(url_list):\n if gs.local:\n erase_captured_urls_local(url_list)\n else:\n erase_captured_urls_aws(url_list)",
"def unshorten(self, url):\n h = requests.get(url)\n stack = [i.url for i in h.history]\n stack.append(h.url)\n return stack",
"def removing_screen_names_repetition(urls):\n\n screen_name = set()\n for url in urls:\n nom = extract_screen_name_from_twitter_url(url)\n if nom:\n screen_name.add(nom)\n return screen_name",
"def remove_connection(self, addr, port):\n location = (addr,port)\n remove = []\n for stream in self.streams:\n if stream.remote_location == location:\n remove.append(stream)\n\n for r in remove:\n self.streams.remove(r)\n try:\n r.close()\n except:\n pass\n if r in self.connecting:\n self.connecting.remove(r)",
"def clear_latest_url(self):\n if os.path.isfile(self.HTTPD_LATEST_URL_FILE):\n os.remove(self.HTTPD_LATEST_URL_FILE)\n logger.debug('deleted latest URL file %s' % (self.HTTPD_LATEST_URL_FILE))",
"def removeFromDownloadQueue(self, _src):\n for dl in self.downloadQueue:\n if _src in dl['src']:\n self.downloadQueue.pop(self.downloadQueue.index(dl))\n return",
"def unsubscribe(self, *rss_feeds):\n [self.subscriptions.remove(feed) for feed in rss_feeds if feed in self.subscriptions]\n self.save()",
"def remove_feed(self, reference: str) -> None:\n self.logger.debug(f\"Remove feed called with: reference='{reference}'\")\n message = self.message_factory.make_feed_removal(reference)\n\n if not self.connectivity_service.is_connected():\n self.logger.warning(\n \"Not connected - not sending remove feed request\"\n )\n self.message_queue.put(message)\n return\n\n if not self.connectivity_service.publish(message):\n self.logger.warning(f\"Failed to publish message: {message}\")\n self.message_queue.put(message)",
"def remove_urls(text):\n pass",
"def _remove_not_last_stream_calendar_entries(client_id, match_id, now):\n\t# If needed, remove a CalendarEntry for each user who starred the streaming user.\n\tuser_ids_cursor = session.query(StarredStreamer.user_id)\\\n\t\t\t.filter(StarredStreamer.streamer_id == client_id)\n\tuser_ids_cursor = (row.user_id for row in user_ids_cursor)\n\t_multi_decrement_num_user_stars(user_ids_cursor, match_id, now)",
"def unregister(url):\n return Client.get_client().unregister(url)",
"def remove_streaming(self):\n self.streaming = None",
"def remove(url: str):\n authenticated = credentials.authenticate(url)\n REMOVER_REGISTRY.get_handler(authenticated.scheme).remove(authenticated)",
"def removeurl(wordlist):\n newlist=[]\n for w in wordlist:\n phrases=str(w[0]).split()\n for phrase in phrases:\n if(phrase.startswith('http') is True):\n phrase=\"\"\n newlist.append((phrases,w[1])) \n return newlist",
"def clear_recent_urls():\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:\n recent_dict = json.load(recent_urls_json)\n for key in recent_dict.keys():\n recent_dict[key] = []\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json', 'w') as fp:\n json.dump(recent_dict, fp, sort_keys=True, indent=4)",
"def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)",
"def extract_url_download(update: Update, context: CallbackContext) -> None:\r\n received_text = update.message.text\r\n yt_urls = get_link_text(received_text)\r\n yt_urls_msg = update.message.reply_text(pretty_url_string(yt_urls), disable_web_page_preview=True)\r\n if len(yt_urls) > 0:\r\n for url in yt_urls:\r\n if 'list=' in url:\r\n print(\"dshgshj\")\r\n\t\t\t\t# download_playlist_url(update, context, url)\r\n else:\r\n download_url(update, context, url)\r\n context.bot.delete_message(message_id=yt_urls_msg.message_id, chat_id=yt_urls_msg.chat_id)",
"def remove_links(self, item):\r\n if item.get('link'):\r\n item.pop('link')\r\n if item.get('links'):\r\n item.pop('links')\r\n return item",
"def delete_registry(self) -> None:\n \n self.view_registry()\n links = self.load_links()[0]\n try:\n url_to_delete = links[abs(int(input(\"Enter no. of URL to delete: \")))]\n except IndexError:\n print('Item not found - Nothing was deleted')\n return\n with open(URL_FILE, 'w') as f:\n for link in links:\n if(link != url_to_delete):\n f.write(link+'\\n')",
"def unlink(self):\n for activity in self:\n if activity.state != 'draft':\n raise ValidationError(_('You cannot delete activity'))\n return super(inagro_crop_activity, self).unlink()"
]
| [
"0.7107925",
"0.64206517",
"0.6121294",
"0.6053596",
"0.60072714",
"0.59609747",
"0.5941415",
"0.59172726",
"0.5895897",
"0.5816972",
"0.5728807",
"0.5713048",
"0.5704622",
"0.5679299",
"0.56752235",
"0.56640065",
"0.5656824",
"0.56449866",
"0.5618883",
"0.55948067",
"0.55839646",
"0.55725807",
"0.5546351",
"0.55139464",
"0.5508048",
"0.55036646",
"0.5486502",
"0.5468086",
"0.5457773",
"0.54206645"
]
| 0.65047264 | 1 |
Prints sorted item of the list data structure formated using the rows and columns parameters | def print_sorted_list(data, rows=0, columns=0, ljust=10):
if not data:
return
if rows:
# column-wise sorting
# we must know the number of rows to print on each column
# before we print the next column. But since we cannot
# move the cursor backwards (unless using ncurses library)
# we have to know what each row with look like upfront
# so we are basically printing the rows line by line instead
# of printing column by column
lines = {}
for count, item in enumerate(sorted(data)):
lines.setdefault(count % rows, []).append(item)
for key, value in sorted(lines.items()):
for item in value:
print (item.ljust(ljust)),
#print()
elif columns:
# row-wise sorting
# we just need to know how many columns should a row have
# before we print the next row on the next line.
for count, item in enumerate(sorted(data), 1):
print (item.ljust(ljust)),
if count % columns == 0:
print()
else:
print (sorted(data)) # the default print behaviour | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def displaySorted(self):\r\n os.system('cls')\r\n for i in self.sortedList:\r\n print(str(i[2]) + \": \" + i[0].showRule())",
"def print_row(width, items):\n def fmt_item(x):\n if isinstance(x, np.ndarray):\n assert x.ndim == 0\n x = x.item()\n if isinstance(x, float):\n rep = \"%.3f\" % x\n else:\n rep = str(x)\n return rep.ljust(width)\n\n print(\" | \".join(fmt_item(item) for item in items))",
"def show(matrix):\n print(\"\",end=\" \")\n for k in sorted(matrix.keys()):\n print(k,end=\" \")\n \n for i,row in sorted(matrix.items()):\n print(\"\\n\" + str(i),end=\" \")\n for j in row:\n print(matrix[i][j],end=\" \")\n print()",
"def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()",
"def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])",
"def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))",
"def print_row():\n print('| | | | |')",
"def print_row():\n print('| | |')",
"def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])",
"def display_row(self, row):\n column_sizes = self.get_column_sizes()\n sys.stdout.write('|')\n for size, column in zip(column_sizes, row):\n sys.stdout.write(' ' + str(column).ljust(size) + ' |')\n print",
"def show(self, lst=None):\n\n def f(v):\n if np.size(v) == 1:\n return str(v)\n elif np.size(v) > 3:\n return str(np.shape(v))\n elif np.ndim(v) > 1:\n return str(np.shape(v))\n else:\n return str(v)\n\n def buffer(l, m, n=25):\n end = len(l) - 1\n buffered = []\n for i in range(m):\n if i > end:\n buffered.append(\"\".ljust(n))\n else:\n buffered.append(l[i].ljust(n))\n return buffered\n\n lst = self if lst is None else lst\n out = [IND.ljust(7) + INDEP.ljust(60) + DEP.ljust(60)]\n for row in lst:\n ind = [str(row[IND])]\n dep = [k + \": \" + f(v) for k, v in row[DEP].items()]\n indep = [k + \": \" + f(v) for k, v in row[INDEP].items()]\n m = max(len(dep), len(indep), 1)\n ind = buffer(ind, m, 7)\n dep = buffer(dep, m, 60)\n indep = buffer(indep, m, 60)\n for a, b, c in zip(ind, indep, dep):\n out.append(a + b + c)\n out.append(\"\")\n return \"\\n\".join(out)",
"def pprint(self):\n pad = 4\n upper = [x/10 for x in range(GRID_SIZE)]\n lower = [x%10 for x in range(GRID_SIZE)]\n print(' ' * pad + ' '+' '.join(map(str,upper)))\n print(' ' * pad + ' '+' '.join(map(str,lower)))\n print(' ' * pad + pprint_header())\n for row in range(GRID_SIZE):\n print(' {0:2}'.format(row), end=' ')\n for col in range(GRID_SIZE):\n cell = self.get_cell_rc(row, col)\n if cell is None:\n print('.', end=' ')\n elif cell == 0:\n print(' ', end=' ')\n else: # cell == 1\n print('#', end=' ')\n print() # end line\n print(' ' * pad + pprint_header(delim='+', pad='-'))",
"def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))",
"def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')",
"def __print_row(vol_id, vol_name, used, used_lim, excl, excl_lim):\n print(\"{:s}{:s} | {:s}{:s} | {:s} | {:s}{:s} | {:s} | {:s}{:s}\".format(\n vol_id, \" \"*(__collen[\"id\"]-len(vol_id)),\n vol_name, \" \"*(__collen[\"name\"]-len(vol_name)),\n used, \" \"*(__collen[\"used_lim\"]-len(used_lim)), used_lim,\n excl, \" \"*(__collen[\"excl_lim\"]-len(excl_lim)), excl_lim))",
"def printlist(x, width=70, indent=4, file=None):\n\n blanks = ' ' * indent\n # Print the sorted list: 'x' may be a '--random' list or a set()\n print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,\n initial_indent=blanks, subsequent_indent=blanks),\n file=file)",
"def display_data(data):\n\n index = 0\n for details in data:\n index += 1\n print(\"{5:1}{0}. {1:10} in {2:15} priority {3:>3}\".format(index, *details))",
"def print_sorted(self):\n new_list = []\n for x in sorted(self):\n new_list.append(x)\n print(new_list)",
"def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))",
"def print_sorted_table_by_value(table):\n\td_view = [ (v,k) for k,v in table.iteritems() ]\n\td_view.sort(reverse=True) # natively sort tuples by first element\n\tfor v,k in d_view:\n\t\tprint \"%d: %s\" % (v,k)",
"def print_table(self, items, fields):\r\n formats = []\r\n borders = []\r\n for f in fields:\r\n length = max(len(f),\r\n max([len(self.string(getattr(i, f))) for i in items]))\r\n justify = '>' if isinstance(getattr(\r\n items[0], f), int) or f == 'size' or f == 'reward' else '<'\r\n formats.append('{:' + justify + self.string(length + 2) + '}')\r\n borders.append('-' * length + ' ')\r\n row_format = u''.join(formats)\r\n headers = [f + ' ' for f in fields]\r\n print(row_format.format(*headers))\r\n print(row_format.format(*borders))\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]\r\n try:\r\n print(row_format.format(*i_fields))\r\n except UnicodeEncodeError:\r\n print(row_format.format(*i_fields).encode('utf-8'))",
"def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt",
"def display(self):\n for row in self.tile_rows:\n print(row)",
"def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return",
"def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)",
"def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')",
"def print(listing: typing.Iterable[typing.Any]) -> None:\n listing = tuple(str(i) for i in listing)\n if not listing:\n return\n width = max(len(i) for i in listing) + 2\n count = min(shutil.get_terminal_size().columns // width, len(listing))\n for row in itertools.zip_longest(*(listing[i::count] for i in range(count)), fillvalue=''):\n print(*(f'{c:<{width}}' for c in row), sep='')",
"def print_row(n, version, sz, last_modified):\n print (get_pad1(n) + str(n) + \". \" + sz + \" \" * 13 + last_modified)",
"def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))",
"def print_sorted(self):\n sort_list = sorted(self)\n print(sort_list)"
]
| [
"0.6569567",
"0.6550878",
"0.6524419",
"0.64498854",
"0.6407262",
"0.63954085",
"0.62853277",
"0.6282342",
"0.6258152",
"0.6221673",
"0.6194902",
"0.6172055",
"0.6146181",
"0.61409026",
"0.6130194",
"0.61223423",
"0.6114192",
"0.60976356",
"0.6074979",
"0.60746396",
"0.60661197",
"0.6034243",
"0.6033798",
"0.60255635",
"0.60241807",
"0.5991277",
"0.5987073",
"0.59847724",
"0.59829104",
"0.59597456"
]
| 0.73106694 | 0 |
Get text templates for sms | def get_templates(self, template_name, **kwargs):
text = render_template("{template}.txt".format(template=template_name), **kwargs)
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_text(contents, template, is_sms = True):\n \n max_chars = 160 - len(SMS_SALUTATION)\n \n contents = replace_dollar_signs(contents)\n sms = template.substitute(contents)\n \n if is_sms:\n if len(sms) > max_chars:\n template_length = len(sms)-get_content_length(contents)\n new_contents = reduce_contents(contents, \n max_chars - template_length) \n sms = SMS_SALUTATION + template.substitute(new_contents)\n else:\n sms = SMS_SALUTATION + sms\n return sms",
"def _get_message_template(search_results: SearchResults) -> Text:\n msg_template = ''\n if search_results.checked_post.post_type == 'image':\n if len(search_results.matches) == 0:\n msg_template = DEFAULT_COMMENT_OC\n elif len(search_results.matches) == 1:\n msg_template = DEFAULT_REPOST_IMAGE_COMMENT_ONE_MATCH\n else:\n msg_template = DEFAULT_REPOST_IMAGE_COMMENT\n\n if search_results.checked_post.post_type == 'link':\n if len(search_results.matches) == 0:\n msg_template = LINK_OC\n else:\n msg_template = LINK_REPOST\n\n return msg_template",
"def get_templates(self, template_name, **kwargs):\n html = render_template(\"{template}.html\".format(template=template_name), **kwargs)\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return html, text",
"def textfrombodies(self) -> str:\n type_priority = [\"plain\", \"html\", \"other\"] # TODO: Make configurable\n\n for texttype in type_priority:\n if texttype == \"plain\" and texttype in self.textbodies:\n \"\"\"Text is plain, so it can be used verbatim\"\"\"\n return self.textbodies[texttype]\n if texttype == \"html\" and texttype in self.textbodies:\n \"\"\"HTML text. Convert to markup with html2text and remove extra spaces\"\"\"\n text = html2text.html2text(self.textbodies[texttype])\n # Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes\n # the jira ticket hard to read.\n return re.sub(\"(\\n.*?)\\n\", \"\\g<1>\", text)\n if texttype == \"other\" and len(self.textbodies):\n # If no other text is found, return the first available body if any.\n return self.textbodies[list(self.textbodies.keys())[0]]\n return \"The email contained no text bodies.\"",
"def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]",
"def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)",
"def get_formatted_messages(formats, label, context):\r\n format_templates = {}\r\n for format in formats:\r\n # conditionally turn off autoescaping for .txt extensions in format\r\n if format.endswith(\".txt\"):\r\n context.autoescape = False\r\n format_templates[format] = render_to_string((\r\n 'notification/%s/%s' % (label, format),\r\n 'notification/%s' % format), context_instance=context)\r\n return format_templates",
"def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)",
"def T(request):\n\treturn all_templates[request.param]",
"def get_text(self, caller):\n \n if caller == \"txt_search\":\n search_text = self.builder.get_object(\"txt_search\").get_text()\n return search_text\n elif caller == \"txt_tweet\":\n tweet_text = self.builder.get_object(\"txt_tweet\").get_text() \n return tweet_text",
"def test_admin_sms_template_view_list(self):\n response = self.client.get('/admin/sms_module/smstemplate/')\n self.failUnlessEqual(response.status_code, 200)",
"def get_rendered_text(self, context):\n missing = set()\n for required in utils.get_variable_names_from_template(self):\n if required not in context:\n missing.add(required)\n if missing:\n raise MissingContext(missing)\n tmpl = utils.PyratempTemplate(self.text)\n context = context.copy()\n context[\"locale\"] = self.language.iso_code\n return tmpl.render(context)",
"def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def generate(self, text):\n self.__params['text']=text\n self._data = requests.get(self.TTS_URL, params=self.__params,\n stream=False).iter_content()",
"def get_text(self):",
"def schedule_text():",
"def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string",
"def get_plain_sentences(self, type):\n if type == \"translation\":\n fn = self.translationfile()\n elif type == \"source\":\n fn = self.sourcefile()\n elif type == \"reference\":\n fn = self.referencefile()\n else:\n raise ValueError\n with open(fn, \"r\") as f:\n lines = f.readlines()\n return lines",
"def test_template():\n \n # Keywords and values to be filled into the template\n items = {'item_1': 'First', 'long_keyword_item_2': 'Second',\n 'space_3': 'Third Third Third ', 'item_4': 'Fourth',\n 'item_5': None}\n \n sender = '[email protected]'\n receiver = '[email protected]'\n result = 'First Second\\nThird Third Third Fourth\\n'\n \n # TEST_DIR = os.path.dirname(os.path.abspath(__file__))\n template = os.path.abspath(os.path.join(TEST_DIR, 'test_template.txt'))\n\n msg = TemplateMessage(sender=sender, email=receiver, template=template,\n **items)\n assert msg.body == result",
"def generate_sentence(self, t=20):\n result = [\"START\", \"START\"]\n\n for i in range(t-3):\n if result[-1] == \"STOP\":\n break\n\n match = {}\n for k,v in self.trigramcounts.items():\n if k[0] == result[-2] and k[1] == result[-1]:\n match[k[-1]] = v\n r = np.random.choice(list(match.keys()), p=np.array(list(match.values())) / np.sum(np.array(list(match.values()))))\n result.append(r)\n\n return result",
"def t(message):\n\n tpl = string.Template(message)\n return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)",
"def extract_phrases(data,model):\n phrases = []\n alignment = model.alignment_idx\n for i in range(len(data)):\n sent_phrases = phrase_extraction(data[i][\"fr\"],data[i][\"en\"],alignment[i])\n phrases.append(sent_phrases)\n return phrases",
"def read_in_templates(path, email_object=None):\n import os\n templates = {}\n\n for fle in os.listdir(path):\n with open(os.path.join(path, fle)) as _f:\n raw = \"\\n\".join(_f.readlines())\n templates[fle] = raw\n\n if email_object:\n email_object.use_templates(templates)\n else:\n return templates",
"def find_templates(self, name):\n script = (\n 'Get-SCVMTemplate -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVMTemplate(system=self, raw=tmpl_data) for tmpl_data in data]\n return [SCVMTemplate(system=self, raw=data)]",
"def generate(self):\n\n letter = ''\n\n for template_name in self.templates:\n template = self.templates[template_name]\n\n # Process all replacements ({...} syntax).\n replacements = re.finditer(REPLACEMENT_REGEX, template)\n for replacement in replacements:\n match = replacement.group()\n key = replacement.group(1)\n\n template = template.replace(match, self._lookup(key))\n\n # Process all conditionals (<...> syntax).\n conditionals = re.finditer(CONDITIONAL_REGEX, template)\n for conditional in conditionals:\n match = conditional.group()\n\n # Process each condition within the conditional ([...]... syntax).\n conditions = re.finditer(CONDITION_REGEX, match)\n for index, condition in enumerate(conditions):\n skill_type = condition.group(2)\n skill = condition.group(3)\n text = condition.group(4)\n\n # If the skill is empty, treat it as a catch all case.\n if not skill or skill in self._lookup(skill_type):\n template = template.replace(match, text)\n break\n\n letter += template\n\n return letter"
]
| [
"0.66384625",
"0.5941724",
"0.57845145",
"0.57653564",
"0.5687092",
"0.56843555",
"0.5681145",
"0.55595624",
"0.5449239",
"0.5425735",
"0.5397242",
"0.53627807",
"0.5276596",
"0.5267229",
"0.5267229",
"0.5267229",
"0.5267229",
"0.5267229",
"0.52559733",
"0.5247302",
"0.5245663",
"0.5245499",
"0.52422667",
"0.5238992",
"0.5216245",
"0.51998913",
"0.5123117",
"0.51214075",
"0.5120131",
"0.5107626"
]
| 0.6188881 | 1 |
Creates a new IR Learned Code from a IRCode and a IRCodeInfo. | def __init__(self, code, codeInfo):
self.Code = code #IRCode
self.CodeInfo = codeInfo #IRCodeInfo
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __new__(cls, code: RegisterCode, info: str):\n if not isinstance(code, RegisterCode):\n raise TypeError(\"Code must be one of RegisterCode\")\n return super().__new__(cls, code, str(info))",
"def recreate_code(code, codes_offsets, codes_lines):\n offsets = codes_offsets[id(code)]\n lines = codes_lines[id(code)]\n new_lnotab = reconstruct_lnotab(code.co_firstlineno, offsets, lines)\n new_consts = []\n for const in code.co_consts:\n if isinstance(const, CodeType):\n new_consts.append(recreate_code(const, codes_offsets, codes_lines))\n else:\n new_consts.append(const)\n\n if PY3:\n new_code = CodeType(\n code.co_argcount, code.co_kwonlyargcount, code.co_nlocals,\n code.co_stacksize, code.co_flags, code.co_code, tuple(new_consts),\n code.co_names, code.co_varnames, code.co_filename, code.co_name,\n code.co_firstlineno, new_lnotab, code.co_freevars, code.co_cellvars\n )\n else:\n new_code = CodeType(\n code.co_argcount, code.co_nlocals,\n code.co_stacksize, code.co_flags, code.co_code, tuple(new_consts),\n code.co_names, code.co_varnames, code.co_filename, code.co_name,\n code.co_firstlineno, new_lnotab, code.co_freevars, code.co_cellvars\n )\n return new_code",
"def __init__(self, codeInfo=None):\r\n #Defaults for Members\r\n self.Encoding = IREncoding.Unknown\r\n \"\"\"Data Encoding.\r\n \r\n This defaults to Space encoding.\r\n \"\"\"\r\n \r\n self.Length = IRCodeLength.Unknown\r\n \"\"\"Code length.\r\n \r\n This defaults to Constant.\r\n \"\"\"\r\n \r\n self.BitCount = 0\r\n \"\"\"Code data length in bits.\"\"\"\r\n \r\n self.Gap = 0\r\n \"\"\"Gap length in us.\"\"\"\r\n \r\n self.Trail = 0\r\n \"\"\"Trailing pulse length in us.\"\"\"\r\n \r\n self.Header = None #list of ints\r\n \"\"\"Header data array in us.\"\"\"\r\n \r\n self.One = [0, 0]\r\n \"\"\"Pulse-Space encoding for a '1', in us.\"\"\"\r\n \r\n self.Zero = [0, 0]\r\n \"\"\"Pulse-Space encoding for a '0', in us.\"\"\"\r\n \r\n self.MinRepeat = 1\r\n \"\"\"Minimum number of times to repeat the code.\"\"\"\r\n \r\n self.ToggleMask = None #IRCode\r\n \"\"\"Mask of bits that should be toggled every time the code is sent.\r\n \r\n This is usually used in combination with MinRepeat.\r\n \"\"\"\r\n \r\n self.Repeat = None #list of ints\r\n \"\"\"Repeat code, in us.\"\"\"\r\n \r\n self.CarrierFrequency = 38000\r\n \"\"\"Carrier frequency\"\"\"\r\n \r\n self.DutyCycle = 33\r\n \"\"\"Duty cycle.\"\"\"\r\n #end Defaults\r\n \r\n if codeInfo != None:\r\n if (codeInfo.bitCount % 8) == 0:\r\n dataBytes = int(codeInfo.bitCount / 8) + 0\r\n else:\r\n dataBytes = int(codeInfo.bitCount / 8) + 1\r\n \r\n self.Encoding = codeInfo.encoding\r\n self.Length = codeInfo.length\r\n self.BitCount = codeInfo.bitCount\r\n self.Gap = codeInfo.gap\r\n self.Trail = codeInfo.trail\r\n \r\n \r\n if codeInfo.header[0] != 0:\r\n self.Header = [codeInfo.header[0], codeInfo.header[1]]\r\n else:\r\n self.Header = None\r\n \r\n self.Zero = [codeInfo.zero[0], codeInfo.zero[1]]\r\n self.One = [codeInfo.one[0], codeInfo.one[1]]\r\n \r\n self.MinRepeat = codeInfo.min_repeat\r\n self.CarrierFrequency = codeInfo.carrierFrequency\r\n self.DutyCycle = codeInfo.dutyCycle\r\n \r\n self.ToggleMask = IRCode(codeInfo.toggle_mask, codeInfo.bitCount)\r\n \r\n repCount = 0\r\n while codeInfo.repeat[repCount] != 0:\r\n repCount = repCount + 1\r\n \r\n if repCount > 0:\r\n self.Repeat = []\r\n for i in range(repCount):\r\n self.Repeat.append(codeInfo.repeat[i])\r\n else:\r\n self.Repeat = None",
"def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod",
"def compile_IR(ir):\n triple = re.search(\n r'target\\s+triple\\s*=\\s*\"(?P<triple>[-\\d\\w\\W_]+)\"\\s*$',\n ir, re.M).group('triple')\n\n # Create execution engine\n target = llvm.Target.from_triple(triple)\n target_machine = target.create_target_machine()\n backing_mod = llvm.parse_assembly(\"\")\n engine = llvm.create_mcjit_compiler(backing_mod, target_machine)\n\n # Create LLVM module and compile\n mod = llvm.parse_assembly(ir)\n mod.verify()\n engine.add_module(mod)\n engine.finalize_object()\n engine.run_static_constructors()\n\n return engine",
"def __init__(self, coder):\n self.coder = coder",
"def __init__(self, code: str, ref_smiles: str, ref_resn: str):\n self.code = code\n self.ref_resn = ref_resn\n self.ref_smiles = ref_smiles\n # create self.template: namedtuple('template', ['all', 'no_ligand', 'only_ligand']) of pdb blocks\n self.template = self.prepare_template()\n # creates self.target: Chem.Mol corrected based on self.ref_smiles\n self.target = self.get_target_from_pdb()",
"def __init__(self, source_code, bytecode=None, address=None, abi=SOPHIA, client=None):\n if client is None:\n client = epoch.EpochClient()\n self.client = client\n self.abi = abi\n self.source_code = source_code\n self.bytecode = bytecode\n self.address = address\n if self.bytecode is None:\n self.bytecode = self.compile(self.source_code)",
"def __init__(__self__, *,\n code: Optional[pulumi.Input[Union[str, 'Code']]] = None):\n if code is not None:\n pulumi.set(__self__, \"code\", code)",
"def lei_code(self, lei_code: str):\n\n self._lei_code = lei_code",
"def init_IR_codes():\n IR_codes.update( {b'FF629D' : say_temp} ) # Say temperature status\n IR_codes.update( {b'84FF9375' : say_temp} ) # Say temperature status\n #IR_codes.update( {b'FFA857' : volume_inc} ) # increase volume\n #IR_codes.update( {b'FFE01F' : volume_dec} ) # reduce volume\n IR_codes.update( {b'FF906F' : toSecureMode} ) # Will be noBodyHome\n IR_codes.update( {b'FFC23D' : ultra.switch} ) # On/off radio\n IR_codes.update( {b'BF09C35C' : ultra.switch} ) # On/off radio (big)\n #IR_codes.update( {b'8BE68656' : holeNightLightAuto} )\n #IR_codes.update( {b'B21F28AE' : hole_night_light.setManualStateOff} )\n #IR_codes.update( {b'A6B1096A' : hole_night_light.setManualStateOn} )\n IR_codes.update( {b'24014B0' : noolite_hole_set_off} )\n IR_codes.update( {b'8FC212DB' : noolite_hole_set_on} )\n IR_codes.update( {b'7960556F' : noolite_hole_set_auto} )\n #IR_codes.update( {b'FF10EF' : holeNightLightAuto} )\n #IR_codes.update( {b'FF38C7' : hole_night_light.setManualStateOff} )\n #IR_codes.update( {b'FF5AA5' : hole_night_light.setManualStateOn} )\n IR_codes.update( {b'FF30CF' : noolite_hole_set_off} )\n IR_codes.update( {b'FF18E7' : noolite_hole_set_on} )\n IR_codes.update( {b'FF7A85' : noolite_hole_set_auto} )",
"def __init__(self, code, state):\n self.code = code\n self.state = state",
"def create_social_code(self, email, phone, password):\r\n # code = \"4512\"\r\n code = self.random_code(settings.CODE_LENGTH)\r\n activation = Activation(phone=phone,\r\n email=email,\r\n to_reset=False,\r\n password=make_password(password),\r\n code=code)\r\n activation.save()\r\n return activation",
"def decode(self, code):\n raise NotImplementedError",
"def from_text(cls, text):\n raw = decode_b64(json.loads(text))\n raw[0] = Code(raw[0]) # make it an object of type Code\n return cls(*raw)",
"def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code",
"def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code",
"def create_unique_instruction(\n self, file: str, instr: Instr, code_object_id: int, node_id: int, offset: int\n ) -> UniqueInstruction:\n code_meta = self._known_code_objects[code_object_id]\n return UniqueInstruction(\n file,\n instr.name,\n code_object_id,\n node_id,\n code_meta,\n offset,\n instr.arg,\n instr.lineno,\n )",
"def create_module_from_code(\n module_name: str, source_code: str, origin: str = None\n) -> ModuleType:\n module = create_empty_module(module_name, origin)\n exec(source_code, module.__dict__)\n return module",
"def update_code(self, new_code):\n self.code = new_code\n\n # Fill in the rest",
"def _create_naics_map():\n # Read in list of industry topics.\n naics_codes = pd.read_excel(\n \"https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx\"\n )\n naics_codes = naics_codes.iloc[:, [1, 2]]\n naics_codes.columns = ['NAICSCode', 'Title']\n\n # Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33.\n def range_to_array(read_code):\n if isinstance(read_code, str) and \"-\" in read_code:\n lower, upper = read_code.split(\"-\")\n return list(range(int(lower), int(upper) + 1))\n return read_code\n\n naics_codes = naics_codes.dropna()\n naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)\n naics_codes = naics_codes.explode('NAICSCode')\n\n # Add unclassified code which is used in some statistical variables.\n naics_codes = naics_codes.append(\n {\n \"NAICSCode\": 99,\n \"Title\": \"Nonclassifiable\"\n }, ignore_index=True)\n\n # Query for only two digit codes.\n short_codes = naics_codes[naics_codes['NAICSCode'] < 100]\n short_codes = short_codes.set_index(\"NAICSCode\")\n short_codes = short_codes['Title'].to_dict()\n\n # Read in overview codes.\n overview_codes = pd.read_csv(\n \"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv\"\n )\n overview_codes.columns = [\"NAICSCode\", \"Title\"]\n overview_codes = overview_codes.set_index(\"NAICSCode\")\n overview_codes = overview_codes['Title'].to_dict()\n\n # Combine the two sources of codes.\n NAICS_MAP = {}\n combined_codes = short_codes\n combined_codes.update(overview_codes)\n\n # Rename industries into Pascal case.\n for code, orig_name in combined_codes.items():\n NAICS_MAP[str(code)] = standard_name_remapper(orig_name)\n\n # Other edge cases.\n NAICS_MAP['00'] = 'Unclassified'\n return NAICS_MAP",
"def apply_coder(text, coder):\n ### TODO.",
"def ircode(self, code):\n if code.lower() in self.codes:\n self._sendCommand('IRCODE ' + self.codes[code.lower()])\n else:\n print 'No such code: %s' % code",
"def __init__(self, code, p=None, SNR=None, PAM=None, bias_mode='R'):\n self.code = code\n if p is not None:\n self.p = p\n self.compute_metric_increment = BSC_metric_increment(code.n, p)\n elif SNR is not None:\n self.SNR = SNR\n self.compute_metric_increment = AWGN_2PAM_metric_increment(SNR) \\\n if PAM is None else AWGN_PAM_metric_increment(PAM, SNR)\n else:\n raise ValueError(\"p or SNR must be given\")\n\n if bias_mode == 'R':\n self.bias = self.code.rate()\n elif bias_mode == 'E0':\n self.bias = self.E0(1)\n elif isinstance(bias_mode, Number):\n self.bias = bias_mode\n else:\n raise ValueError(\n \"{} is not 'R', 'E0' or a number\".format(bias_mode))\n\n self.bias_sanity_check()\n\n self.nodes = PriorityQueue()\n root = StackDecoder.Node(self.code)\n root.metric = 0\n self.nodes.put(root)\n\n # The first node in each layer\n self.first_nodes = [root]",
"def __init__(self, code, title, credit, term):\n \n assert code[2] in Course.CODENUM, \\\n \"first number must be one of {}.\".format(Course.CODENUM)\n\n assert len(code) == 5 and code[:1].isalpha() and code[2:].isdigit(), \\\n \"Must be in format XXnnn, where X is a upper case letter and n is a number.\"\n assert title[0].isalpha() and title[1].isalpha(), \\\n \"Course title must only consist of letters.\"\n assert float(credit) in Course.CREDIT, \\\n \"credit value must be one of {}.\".format(Course.CREDIT)\n assert term[4:6] in Course.TERMNUM, \\\n \"term must be in form YYYYSS, where SS is one of {}\". format(Course.TERMNUM)\n \n self.code = code\n self.title = title\n self.credit = credit\n self.term = term\n \n return",
"def build_coder(shift):\n ### TODO.",
"def get_reconstruction_from_code(self, codes):\n return self.sess.run(self.reconstructed,\n feed_dict={self.z: codes})",
"def __init__(self, code):\n if isinstance(code, str):\n if not self._iscode(code):\n raise ValueError(\"String is not a valid LoC code\")\n\n self.orig_code = \"\"\n self.section = \"\"\n self.topic = \"\"\n self.sub_topic = \"\"\n self.cutter = \"\"\n self.version = 0\n self._year = 0\n self.work_letter = \"\"\n self.copy = 0\n self.other = \"\"\n\n self.orig_code = code\n code = code.split()\n\n # If there's a section name, pull that out first\n if code[0].isalpha():\n self.section = code.pop(0)\n\n # Now, get the topic and sub-topic if they exist. Also cutter if it's dotted.\n whole_topic = code.pop(0)\n whole_topic = whole_topic.split(\".\")\n self.topic = whole_topic.pop(0) +\\\n (\".{}\".format(whole_topic.pop(0)) if len(whole_topic) and whole_topic[0].isnumeric() else \"\")\n if len(whole_topic):\n self.sub_topic = whole_topic.pop(0)\n if len(whole_topic):\n self.cutter = whole_topic[0]\n\n # Now, pull out the cutter if it exists separately.\n if len(code) and is_topic_or_cutter(code[0]):\n self.cutter = code.pop(0)\n\n # Remainder can come in any order. We'll figure out which it is each iteration.\n for item in code:\n if item.startswith(\"v.\"):\n self.version = int(item[2:])\n elif item.startswith(\"c.\"):\n self.copy = int(item[2:])\n elif is_year(item):\n self._year, self.work_letter = _split_year(item)\n elif self.section != \"\" and item.isalpha():\n self.section = item\n else:\n if self.other:\n self.other += \" \"\n self.other += item\n elif isinstance(code, LOC):\n self.orig_code = code.orig_code\n self.section = code.section\n self.topic = code.topic\n self.sub_topic = code.sub_topic\n self.cutter = code.cutter\n self.version = code.version\n self._year = code._year\n self.work_letter = code.work_letter\n self.copy = code.copy\n self.other = code.other\n else:\n raise TypeError(\"Input must be a string LoC code or LoC object\")",
"def update_code(self, new_code):\n\n new_code = self.code",
"def add_code(self, id, code):\n self.codes[id] = code"
]
| [
"0.55522573",
"0.52894145",
"0.5265566",
"0.5017019",
"0.4987227",
"0.49768665",
"0.4955158",
"0.49432",
"0.49169284",
"0.49076864",
"0.48669353",
"0.48622426",
"0.4811263",
"0.48017865",
"0.4796054",
"0.47858155",
"0.47858155",
"0.47830468",
"0.47628322",
"0.4738658",
"0.4724764",
"0.47013894",
"0.469824",
"0.46915755",
"0.46800715",
"0.4664488",
"0.4637995",
"0.4629126",
"0.45984748",
"0.45929208"
]
| 0.6802893 | 0 |
Transmits a repeat of a previously transmitted code. his must be called within the gap period after transmitting the original code. This is required for codes that use seperate sequences for the code and the repeat identifier. | def transmitRepeat(self):
try:
result = PhidgetLibrary.getDll().CPhidgetIR_TransmitRepeat(self.handle)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_next_packet():\n #\"global\" required here to be able to read and write to SEQUENCE \n global SEQUENCE\n data = sys.stdin.buffer.read(DATA_SIZE)\n if (len(data) > 0):\n rtt_start = time.time()\n msg_obj = {\"sequence\": SEQUENCE, \"data\": b64encode(data).decode(), \"ack\": True, \"eof\": False}\n if handle_packet_send(msg_obj):\n log(f\"Sequence number: \" + str(SEQUENCE))\n SEQUENCE += len(data)\n log(f'updating sender seq: {SEQUENCE}')\n return PacketInfo(msg_obj, rtt_start)\n return False",
"def sendData(packet: FrameStruct, repeats: int) -> NoReturn:\n ftype = b'\\x08\\x00'\n dur = b'\\x00\\x00'\n # random hex stream, could be used as additional space of bits\n src = b'\\x08\\x00\\x27\\x8e\\x75\\x44'\n # broadcast address is used to stop certain drivers retransmitting frames\n dst = b'\\xff\\xff\\xff\\xff\\xff\\xff'\n bssid = src\n # semi unique id, annoyingly not usable due to lack of bits for this appli\n sn = (random.randint(0, 4096))\n sn = sn << 4\n seq = sn.to_bytes(4, 'little')\n\n # generate 80211 header\n header80211 = ftype + dur + dst + src + bssid + seq\n\n # combine header with other data to create valid frame\n data = globaldat.RADIO_TAP + header80211 + b\"\\x72\\x6f\\x62\\x6f\\x74\" + \\\n packet # attach radiotap headers, 80211 headers and yodel payload\n #globaldat.bytesPrint(data)\n #print(repeats)\n for i in range(repeats): # re-transmmit message a couple times\n globaldat.yodelSocket.send(data) # send the data",
"def send_signal():\n print(\"... run {0} transmission\".format(SendSignal.__SIGNAL_SETTINGS['repeats']))\n SendSignal.__SIGNAL_OBJ.RFxmit(SendSignal.__SIGNAL_SETTINGS['text_message'] *\n SendSignal.__SIGNAL_SETTINGS['repeats'])\n print('... set USB Dongle idle')\n SendSignal.__SIGNAL_OBJ.setModeIDLE()",
"def send(self, data:bytes):\n packet = Rudp.Packet(self.seq, 0, data)\n packet.timesamp = time()\n self.sendPacket(packet)\n self.seqPlusOne()\n return(packet)",
"def write(self, code):\n yield self.packet().write(code).send()",
"async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()",
"def send_data(self, data, retry=True):\n self.seq_number = RDTSegment.increment(self.seq_number)\n self.send_pkt(data)\n while True:\n try:\n if self.tries == RDTSocket.N_TRIES:\n raise Exception(\"Connection lost\")\n pkt = self.receive_pkt(0)\n except socket.timeout:\n if not retry:\n self.logger.debug(\"got timeout.\")\n raise socket.timeout\n self.logger.debug(f\"got timeout. resending seq_num {self.seq_number}\")\n self.send_pkt(data)\n self.tries += 1\n continue\n\n if pkt.seq_num == self.remote_number and not pkt.ack:\n self.logger.debug(f\"got repeated package. resending ACK. pkt=[{pkt}]\")\n self.send_pkt(ack=1, seq_number=pkt.seq_num)\n\n if pkt.seq_num == self.seq_number and pkt.ack:\n self.logger.debug(f\"got ACK. ending. pkt=[{pkt}]\")\n break\n\n self.tries = 0",
"def MakeRepeat1(self,content):\n return self.register(Repeat1(content,reg=self))",
"def _send_code(self, phone, code, case):\n raise NotImplementedError",
"async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"def wave_tx_repeat():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVGOR, 0, 0))",
"def wave_send_repeat(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVTXR, wave_id, 0))",
"def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)",
"def sendpkt(self, data, retries=10): \n wire_data = self.pack(data).encode()\n self.logger.debug('sending> %s', data) \n self.s.send(wire_data)\n res = self.rxqueue.get()\n while res != '+':\n self.s.send(wire_data)\n res = self.rxqueue.get()\n retries -= 1\n if retries == 0:\n raise ValueError(\"retry fail\")",
"def resend(self, seqno, address=None):\n super(Sender, self).send(self.window.get(seqno), address)",
"def keycode(\n self, keycode: int, action: int = const.ACTION_DOWN, repeat: int = 0\n ) -> bytes:\n return struct.pack(\">Biii\", action, keycode, repeat, 0)",
"def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)",
"def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)",
"def __frame_tx(self,data):\n\n if self._spy_frame_tx is not None:\n self._spy_frame_tx(data)\n\n data=self.__pad(data)\n\n if len(data) < self.other_bufferlen:\n self.com.tx(data)\n else:\n chunks = (len(data)-1) // self.other_bufferlen\n #print(\"__frame_tx: %d full chunks + last\"%chunks,flush=True)\n for i in range(0,chunks):\n self.com.tx(data[i*self.other_bufferlen:(i+1)*self.other_bufferlen])\n self.com.rx_ack()\n self.com.tx(data[chunks*self.other_bufferlen:])\n #print(\"__frame_tx done\",flush=True)",
"def sendRtspRequest(self, requestCode):\r\n\t\tself.rtspSeq = self.rtspSeq + 1\r\n\t\trequestCodetMsg = requestCode + \" \" + self.fileName + \" \" + \"RTSP/1.0\"\r\n\t\trequestSeqMsg = \"\\n\" + \"CSeq:\" + \" \" + str(self.rtspSeq)\r\n\t\trequestPayload = \"\"\r\n\r\n\t\tif (requestCode == self.SETUP):\r\n\t\t\trequestPayload = \"\\n\" + \"Transport\" + \" \" + \"RTP/UDP;\" + \" \" + \"client_port=\" + \" \" + str(self.rtpPort)\r\n\t\r\n\t\telse:\r\n\t\t\trequestPayload = \"\\n\" + \"Session:\" + \" \" + str(self.sessionId)\r\n\r\n\t\trequestPacket = requestCodetMsg + requestSeqMsg + requestPayload\r\n\t\tself.rtspSocket_client.sendall(bytes(requestPacket, \"utf-8\"))\r\n\t\tprint(\"C:\\n\" + requestPacket)",
"def transmitRaw(self, data, gap=0, count=0, carrierFrequency=0, dutyCycle=0):\r\n if count == 0:\r\n count = len(data)\r\n \r\n dataPtr = (c_int * count)()\r\n \r\n for i in range(count):\r\n dataPtr[i] = c_int(data[i])\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_TransmitRaw(self.handle, dataPtr, count, carrierFrequency, dutyCycle, gap)\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)",
"async def repeat(\n text: ('str', 'The content to repeat')\n):\n if not text:\n text = 'nothing to repeat'\n \n return InteractionResponse(text, allowed_mentions = None)",
"def _send_sequence(self):\n # For new processes that may spawn\n _SHARED_SEQUENCES[self.uid] = self.sequence",
"def transfer(self, address, direction, repeats):\n if direction == \"in\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"inlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"outlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n elif direction == \"out\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"outlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"inlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n else:\n pass # return error",
"async def repeat(self, ctx, *, text):\n await ctx.send(text)",
"def repeat(s):\r\n\r\n return s",
"def acknowledge(self, sequence: int):\n ackPacket = Rudp.Packet(self.seq, sequence)\n frame = ackPacket.construct()\n self.seqPlusOne()\n self.socket.sendto(frame, self.client)",
"def send_data_control_experiment(ecg, emg, gsr):\n\ti = 0\n\tj = 0\n\tk = 0\n\twhile True:\n\t\tif i == len(ecg): break\n\t\tskt.send(bytes(ecg[i], 'utf-8'))\n\t\ti += 1\n\t\t# blocking - always wait for ACK before sending the next packet\n\t\t# - can change this and handle out of order packets\n\t\t# ACK = soc.recv(1024)\n\n\t\t# wait for 1 sec before sending next packet\n\t\t# simulate a real time situation\n\t\t# time.sleep(1)\n\n\twhile True:\n\t\tif j == len(emg): break\n\t\tskt.send(bytes(emg[j], 'utf-8'))\n\t\tj += 1\n\n\twhile True:\n\t\tif k == len(gsr): break\n\t\tskt.send(bytes(gsr[k], 'utf-8'))\n\t\tk += 1\n\n\tstart = time.time()\n\tskt.sendall(b'A'*1024)\n\tend = time.time()\n\tprint(end - start)",
"def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")"
]
| [
"0.63232994",
"0.6089056",
"0.56461316",
"0.56111425",
"0.5511506",
"0.5493618",
"0.5429207",
"0.5380129",
"0.53693646",
"0.5357285",
"0.5306283",
"0.5254036",
"0.5245807",
"0.5197269",
"0.5188506",
"0.51812416",
"0.5173638",
"0.5160593",
"0.5160593",
"0.5154831",
"0.514265",
"0.5131796",
"0.51164746",
"0.5105965",
"0.51036984",
"0.509456",
"0.5091571",
"0.50809735",
"0.5064246",
"0.5062923"
]
| 0.6863742 | 0 |
Reads raw IR data. | def readRaw(self):
count = 2048 #this is as big as the library buffer, so the user doesn't have to poll as often
buf = [] #buffer that will hold the read raw data and be returned to the user
dataPtr = (c_int * count)()
length = c_int()
length.value = count;
try:
result = PhidgetLibrary.getDll().CPhidgetIR_getRawData(self.handle, dataPtr, byref(length))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
for i in range(length.value):
buf.append(dataPtr[i])
return buf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_raw_data(self):\n # Must be set by the user\n raise Exception(\"not implemented\")",
"def read_raw(self):\n return self._FITS.read_raw()",
"def read_data(self):\n raise NotImplementedError",
"def _read_data(self):",
"def read():\n # TODO",
"def readData(self):\n self._readHeader()\n self._readSize()\n self._readComments()\n self._readAllROI()\n self._readDate()\n self._readArray()",
"def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes",
"def read_raw8(self):\n raise NotImplementedError",
"def read(self):\n return self._read(self.rfile)",
"def read(self):\n\n return self.read_raw().rstrip()",
"def raw_data(self):\n return self._buf[self.data_offset():self.data_offset() + self.size()]",
"def read_sensor_raw(self):\n return self.read_sensor()",
"def read_sensor_raw(self):\n return self.read_sensor()",
"def read(self):\n pass",
"def _readString(self, rawData, offset=0):\n\n strLen, = unpack(\n self.byteFormat, rawData[\n offset:offset + self.byteFormatLen])\n\n return rawData[self.byteFormatLen:][:strLen]",
"def raw_data(self):\n return self.tif_file.raw_data()",
"def read_ir(self):\n return self._read16(0x24, little_endian=True)",
"def raw_data(self):\n return self._raw_data",
"def raw_data(self):\n return self._raw_data",
"def read(self):\r\n return RecordIO.Reader.do_read(self._fp, self._codec)",
"def read(self):",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def read(self):\n raise NotImplementedError",
"def readData(self, rawstring, datatype):\n data = rawstring[:-1] #remove last NULL byte\n\n if datatype == ERROR:\n if is_python3():\n data2 = data.tobytes()\n data = data2.decode('utf-8')\n return data\n elif datatype == STRING or datatype == DOUBLE:\n # try to convert data to a more appropriate type\n if is_python3():\n data2 = data.tobytes()\n data = data2.decode('utf-8')\n\n try:\n data = int(data)\n except:\n try:\n data = float(data)\n except:\n pass\n\n return data\n elif datatype == ASSOC:\n return rawtodictonary(rawstring)\n elif SpecArray.isArrayType(datatype):\n #Here we read cols and rows... which are *supposed* to be received in the header!!!\n #better approach: data contains this information (since it is particular to that data type)\n return SpecArray.SpecArray(rawstring, datatype, self.rows, self.cols)\n else:\n raise TypeError",
"def read_data(self):\n temperature_data = RS485.read_temperature(self.data_path)\n humidity_data = RS485.read_humidity(self.data_path)\n moisture_data = RH_010_GN.read_moisture(self.data_path)\n o2_data = LB_856.read_o2(self.data_path)\n co2_data = LB_856.read_co2(self.data_path)\n\n self.data = [temperature_data, humidity_data, moisture_data, o2_data, co2_data]",
"def read_binary(self):\n with self.open(\"rb\") as f:\n return f.read()",
"def test_ipam_rirs_read(self):\n pass",
"def _readByte(self, rawData, offset=0):\n val, = unpack(\n self.byteFormat, rawData[\n offset:offset + self.byteFormatLen])\n \n return val"
]
| [
"0.73799086",
"0.6494946",
"0.64116246",
"0.6363456",
"0.6314987",
"0.6298859",
"0.6213011",
"0.6164756",
"0.61377877",
"0.60885054",
"0.6075769",
"0.6051732",
"0.6051732",
"0.6000561",
"0.5983055",
"0.5955333",
"0.5945794",
"0.59205985",
"0.59205985",
"0.59154874",
"0.59004056",
"0.58985186",
"0.58985186",
"0.58985186",
"0.5888661",
"0.58534706",
"0.5851836",
"0.58388865",
"0.5822788",
"0.58218765"
]
| 0.6902751 | 1 |
Gets the last code that was recieved. | def getLastCode(self):
codePtr = (c_ubyte * IR_MAX_CODE_DATA_LENGTH)(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
dataLength = c_int(IR_MAX_CODE_DATA_LENGTH)
bitCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetIR_getLastCode(self.handle, codePtr, byref(dataLength), byref(bitCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
code = IRCode(codePtr, bitCount.value)
return code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_last_event(self):\n return self.last_event_code",
"def getLastLearnedCode(self):\r\n codePtr = (c_ubyte * IR_MAX_CODE_DATA_LENGTH)()\r\n dataLength = c_int(IR_MAX_CODE_DATA_LENGTH)\r\n codeInfo = CPhidgetIR_CodeInfo()\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_getLastLearnedCode(self.handle, codePtr, byref(dataLength), byref(codeInfo))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n else:\r\n learnedCode = IRLearnedCode(IRCode(codePtr, codeInfo.bitCount), IRCodeInfo(codeInfo))\r\n return learnedCode",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def code(self):\n return self._data",
"def _get_last_code_line():\n return max(_code_lines) + 2",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def fetch(self):\n line = self.code[self.ip]\n self.ip += 1\n return line",
"def get_code(self):\n self._payload_to_str()\n return self._str_payload",
"def lastMessageReceived():",
"def code(self):\n return self._getCode()",
"def Code(self):\n if self.force_auto_sync:\n self.get('Code')\n return self._Code",
"def get_last_ack(self):\n\t\treturn self.last_ack",
"def get_last_blockchain_value():\n return blockchain[-1]",
"def get_my_last_event(self):\r\n return self._handler.get_my_last_event()",
"def get_last(self):\n return self.get_block(len(self.chain)-1)",
"def get_last_blockchainvalue(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]",
"def getLast(self):\r\n return self._data[-1]",
"def get_response(self):\n\n response = self.socket.recv(1024)\n code = response.split(\" \")[0]\n message = response[4:]\n\n return int(code), message",
"def getMessageCode(self):\n return self._payload[2]"
]
| [
"0.7467118",
"0.70808333",
"0.6818239",
"0.6818239",
"0.6818239",
"0.6818239",
"0.65389955",
"0.648529",
"0.6458124",
"0.6458124",
"0.6458124",
"0.6458124",
"0.6388842",
"0.638534",
"0.6327297",
"0.6304387",
"0.6252619",
"0.62518626",
"0.6220969",
"0.6149291",
"0.6115518",
"0.6115075",
"0.6086352",
"0.6086352",
"0.6086352",
"0.6086352",
"0.6086352",
"0.5996326",
"0.5993587",
"0.5964522"
]
| 0.7622817 | 0 |
Gets the last code the was learned. | def getLastLearnedCode(self):
codePtr = (c_ubyte * IR_MAX_CODE_DATA_LENGTH)()
dataLength = c_int(IR_MAX_CODE_DATA_LENGTH)
codeInfo = CPhidgetIR_CodeInfo()
try:
result = PhidgetLibrary.getDll().CPhidgetIR_getLastLearnedCode(self.handle, codePtr, byref(dataLength), byref(codeInfo))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
learnedCode = IRLearnedCode(IRCode(codePtr, codeInfo.bitCount), IRCodeInfo(codeInfo))
return learnedCode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLastCode(self):\r\n codePtr = (c_ubyte * IR_MAX_CODE_DATA_LENGTH)(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)\r\n dataLength = c_int(IR_MAX_CODE_DATA_LENGTH)\r\n bitCount = c_int()\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_getLastCode(self.handle, codePtr, byref(dataLength), byref(bitCount))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n else:\r\n code = IRCode(codePtr, bitCount.value)\r\n return code",
"def _get_last_code_line():\n return max(_code_lines) + 2",
"def get_last_event(self):\n return self.last_event_code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def fetch(self):\n line = self.code[self.ip]\n self.ip += 1\n return line",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def code(self):\n return self._code",
"def getLastInstruction(self) -> ghidra.program.model.listing.Instruction:\n ...",
"def final_instr(self):\n\n return self.instr_instances[-1]",
"def code(self):\n return self._data",
"def get_last(self):\n return self.get_block(len(self.chain)-1)",
"def code(self) -> pulumi.Output['outputs.CanaryCode']:\n return pulumi.get(self, \"code\")",
"def get_code(self):\r\n # подключаемся к базе данных хероку, чтобы вытащить крайний ключ-код\r\n engine = db.create_engine('postgresql+psycopg2://vxttrrwzkdeaol:367054ad01122101b1b5d9'\r\n 'ee099e03253d212ec914e330378952dec6c67e5174@ec2-79-125-126-20'\r\n '5.eu-west-1.compute.amazonaws.com/d82qavso2hgauu')\r\n\r\n connection = engine.connect() # устанавливаем соединение\r\n metadata = db.MetaData()\r\n\r\n # из всех существующих таблиц выбираем нужную: 'hola_bottable'\r\n hola_bottable = db.Table('hola_bottable', metadata, autoload=True, autoload_with=engine)\r\n\r\n # Equivalent to 'SELECT * FROM census'\r\n query = db.select([hola_bottable])\r\n ResultProxy = connection.execute(query)\r\n ResultSet = ResultProxy.fetchall() # возвращает список из tuple формата [(id:..., code:...)]\r\n\r\n code = ResultSet[-1][1] # из списка строк выбираем последнюю\r\n return code",
"def original_code(self):\n return self._original_code",
"def code(self):\n return self._getCode()",
"def code(self) -> int:\n return self._code",
"def code(self) -> int:\n return self._code",
"def last_block(self):\n return self.chain[len(self.chain) - 1]",
"def last_block(self):\n return self.chain[-1]",
"def last_block(self):\n return self.chain[-1]",
"def last_run(self):\n return self._last_run",
"def Code(self):\n if self.force_auto_sync:\n self.get('Code')\n return self._Code",
"def get_fullcode(self):\n raise NotImplementedError",
"def code(self) -> str:\n return self._code",
"def code(self) -> str:\n return self._code"
]
| [
"0.70882255",
"0.6817966",
"0.65252477",
"0.6414747",
"0.6414747",
"0.6414747",
"0.6414747",
"0.6366159",
"0.6299188",
"0.6299188",
"0.6299188",
"0.6299188",
"0.6193091",
"0.61307126",
"0.6107682",
"0.60453266",
"0.6043935",
"0.59798914",
"0.5971125",
"0.5961705",
"0.59294826",
"0.59294826",
"0.59096223",
"0.5899316",
"0.5899316",
"0.5889783",
"0.58877987",
"0.58495736",
"0.5839149",
"0.5839149"
]
| 0.8201471 | 0 |
Create new nodes from files only (no CSV), and add media. These objects will have a title (derived from filename), and a configdefined Islandora model, content type, and status. Media use is derived from config as well. | def create_from_files():
logging.info('"Create from files" task started using config file %s', args.config)
file_dir_path = config['input_dir']
files = os.listdir(file_dir_path)
for file_name in files:
filename_without_extension = os.path.splitext(file_name)[0]
if len(filename_without_extension) > 255:
message = 'Truncating the filename "' + filename_without_extension + '" since it exceeds Drupal\'s maximum node title length of 255 characters.'
logging.error(message)
filename_without_extension = filename_without_extension[:255]
islandora_model = set_model_from_extension(file_name, config)
node_json = {
'type': [
{'target_id': config['content_type'],
'target_type': 'node_type'}
],
'title': [
{'value': filename_without_extension}
],
'status': [
{'value': config['published']}
],
'field_model': [
{'target_id': islandora_model,
'target_type': 'taxonomy_term'}
]
}
node_headers = {
'Content-Type': 'application/json'
}
node_endpoint = '/node?_format=json'
node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)
if node_response.status_code == 201:
node_uri = node_response.headers['location']
print('+ Node for "' + filename_without_extension + '" created at ' + node_uri + '.')
logging.info('Node for "%s" created at %s.', filename_without_extension, node_uri)
if 'output_csv' in config.keys():
write_to_output_csv(config, '', node_response.text)
file_path = os.path.join(config['input_dir'], file_name)
media_type = set_media_type(file_path, config)
media_response_status_code = create_media(config, file_name, node_uri)
allowed_media_response_codes = [201, 204]
if media_response_status_code in allowed_media_response_codes:
print('+ ' + media_type.title() + " media for " + filename_without_extension + " created.")
logging.info("Media for %s created.", file_path)
else:
logging.error('Node for "%s" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_media():\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n for row in csv_data:\n row = clean_csv_values(row)\n if not ping_node(config, row['node_id']):\n print(\"Node \" + row['node_id'] + \" not found or not \" +\n \"accessible, skipping adding media.\")\n continue\n\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n node_json_url = config['host'] + '/node/' + row['node_id'] + '?_format=json'\n node_uri = config['host'] + '/node/' + row['node_id']\n node_response = issue_request(config, 'GET', node_json_url)\n if node_response.status_code == 200:\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print(media_type.title() + \" media for \" + row['file'] + \" created and added to \" + node_uri)\n logging.info(\"%s media for %s created and added to %s.\", media_type.title(), row['file'], node_uri)",
"def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n node_endpoint = config['host'] + '/node?_format=json'\n\n for row in csv_data:\n row = clean_csv_values(row)\n id_field = row[config['id_field']]\n\n # Add required fields.\n node = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': row['title']}\n ],\n 'status': [\n {'value': config['published']}\n ]\n }\n\n # If a node with an ID that matches the current item's\n # 'parent_id' value has just been created, make the item\n # a child of the node.\n if 'parent_id' in row.keys() and row['parent_id'] in node_ids:\n row['field_member_of'] = node_ids[row['parent_id']]\n\n # Add custom (non-required) CSV fields.\n required_fields = ['file', config['id_field'], 'title']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n if not isinstance(row[custom_field], str):\n continue\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # This field can exist in the CSV to create parent/child\n # relationships and is not a Drupal field.\n if custom_field == 'parent_id':\n continue\n\n # 'langcode' is a core Drupal field, but is not considered a \"base field\".\n if custom_field == 'langcode':\n continue\n\n # Execute field preprocessor scripts, if any are configured. Note that these scripts\n # are applied to the entire value from the CSV field and not split field values,\n # e.g., if a field is multivalued, the preprocesor must split it and then reassemble\n # it back into a string before returning it. Note that preprocessor scripts work only\n # on string data and not on binary data like images, etc. and only on custom fields\n # (so not title).\n if 'preprocessors' in config and len(config['preprocessors']) > 0:\n for field, command in config['preprocessors'].items():\n if field in csv_column_headers:\n output, return_code = preprocess_field_data(config['subdelimiter'], row[field], command)\n if return_code == 0:\n preprocessor_input = copy.deepcopy(row[field])\n row[field] = output.decode().strip()\n logging.info('Preprocess command %s executed, taking \"%s\" as input and returning \"%s\".', command, preprocessor_input, output.decode().strip())\n else:\n message = 'Preprocess command ' + command + ' failed with return code ' + str(return_code)\n logging.error(message)\n sys.exit(message)\n\n # Assemble Drupal field structures for entity reference fields from CSV data. For\n # taxonomy terms, target_type is 'taxonomy_term'; for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0],\n 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # For non-entity reference and non-typed relation fields (text, integer, boolean etc.).\n else:\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n first_subvalue = subvalues[0]\n first_subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], first_subvalue)\n node[custom_field] = [{'value': first_subvalue}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n node_headers = {'Content-Type': 'application/json'}\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('Node for \"' + row['title'] + '\" (record ' + id_field + ') created at ' + node_uri + '.')\n logging.info(\"Node for %s (record %s) created at %s.\", row['title'], id_field, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, id_field, node_response.text)\n else:\n logging.error(\"Node for CSV record %s not created, HTTP response code was %s.\", id_field, node_response.status_code)\n continue\n\n # Map ID from CSV of newly created node to its node ID so we can use it for linking child nodes, etc.\n if node_response.status_code == 201:\n node_nid = node_uri.rsplit('/', 1)[-1]\n node_ids[id_field] = node_nid\n\n # If there is no media file (and we're not creating paged content), move on to the next CSV row.\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+No media for ' + node_uri + ' created since its \"file\" field in the CSV is empty.')\n logging.warning(\"No media for %s created since its 'file' field in the CSV is empty.\", node_uri)\n continue\n\n # If there is a media file, add it.\n if 'file' in row:\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n if node_response.status_code == 201:\n # If what is identified in the 'file' field is a file, create the media from it.\n if 'file' in row and len(row['file']) != 0 and os.path.isfile(file_path):\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+' + media_type.title() + \" media for \" + row['file'] + \" created.\")\n logging.info(\"%s media for %s created.\", media_type.title(), row['file'])\n\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+ No file specified in CSV for ' + row['title'])\n logging.info(\"No file specified for %s, so no media created.\", id_field)\n\n if config['paged_content_from_directories'] is True:\n # Console output and logging are done in the create_children_from_directory function.\n create_children_from_directory(config, row, node_nid, row['title'])",
"def createExtnNodes(self):\n for parent, dirs, files in os.walk(self.destndir):\n for fname in files:\n filename = os.path.join(parent, fname)\n if os.path.isfile(filename):\n direntry=parent\n #direntry=parent.replace(self.destndir,'',len(self.destndir))\n #direntry = os.path.basename(os.path.abspath(parent))\n self.appendSrcType(direntry, fname)",
"def import_media(self, path):\n media_vertex = {}\n infile = configparser.ConfigParser()\n infile.read(path, encoding='utf-8')\n # Use the path name for error messages or assignments\n for field in infile.items(\"media\"):\n if (field[0].find(\"photo\") != -1 and\n len(field[0].split(\".\")) == 2):\n # Process a small set of photo credits for all the pandas\n # author = infile.get(\"media\", field[0] + \".author\")\n # if author in self.photo[\"credit\"].keys():\n # self.photo[\"credit\"][author] = self.photo[\"credit\"][author] + 1\n # else:\n # self.photo[\"credit\"][author] = 1\n # Track what the max number of panda photos an object has is\n # test_count = int(field[0].split(\".\")[1])\n # if test_count > self.photo[\"max\"]:\n # self.photo[\"max\"] = test_count\n # Accept the data and continue\n media_vertex[field[0]] = field[1]\n # TODO: track video info for apple counting as well\n else:\n # Accept the data and move along\n media_vertex[field[0]] = field[1]\n self.media.append(media_vertex)\n self.vertices.append(media_vertex)\n self.media_files.append(path)",
"def on_files(self, files, config, **kwargs):\n linked_md_file = File(\n path=self.config[\"path_to_file\"],\n src_dir=self.config[\"path_to_src_dir\"],\n dest_dir=config[\"site_dir\"],\n use_directory_urls=config[\"use_directory_urls\"]\n )\n files.append(linked_md_file)\n return files",
"async def on_post(self, req, resp):\n files = await req.media('files')\n model_contents = files['model']['content']\n model_name = files['model']['filename']\n resp.media = model_registry_client.add_model(model_name, model_contents)",
"def seed():\n for fullname in os.listdir(\"static/examples\"):\n filename, extension = os.path.splitext(fullname)\n relpath = \"static/examples/\" + fullname\n if extension == '.json':\n with open(relpath) as f:\n settings = json.load(f)\n title = settings.get('title')\n short_url = filename\n username = None\n \n meta = db.session.query(models.Metadata).filter_by(short_url=short_url).first()\n\n if meta == None:\n new_graph = models.Graph(settings)\n db.session.add(new_graph)\n db.session.commit()\n \n new_meta = models.Metadata(title, new_graph.id, username, short_url=short_url)\n db.session.add(new_meta)\n db.session.commit()\n \n else:\n graph = db.session.query(models.Graph).filter_by(id=meta.graph_id).first()\n graph.settings = settings\n meta.title=title\n db.session.commit()",
"def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd",
"def test_create_local_metadata(self):\n local_media = {\n 'path': 'test_mp4_short.mp4',\n 'title': 'Test media title',\n 'description': 'Test media description',\n }\n\n media_filename = \"%s/%s\" % (settings.get('base', 'path.local.media'), local_media['path'])\n\n self.model = Media.create(\n client=self.client,\n media_filename=media_filename,\n title=local_media['title'],\n description=local_media['description'],\n )\n\n media_item = Media.get(client=self.client, uuid=self.model.uuid)\n assert media_item.title == local_media['title']\n assert media_item.description == local_media['description']\n #TODO: assert creator is owner",
"def create_default(self):\n\n self.database.lifetime = 604800\n self.database.path_media = '../data/media.db'\n self.database.path_playlist = '../data/playlist.db'\n self.indexing.audio.rules = [IndexerRuleConfig()]\n self.indexing.audio.rules[0].directory = '/mnt/hdd/Audio'\n self.indexing.audio.rules[0].extensions = ['.flac', '.mp3', '.ogg', '.wav']\n self.indexing.audio.rules[0].pattern = '{}/{}/{} {}'.format(\n get_complete_tag(TAG_ARTIST),\n get_complete_tag(TAG_ALBUM),\n get_complete_tag(TAG_NUMBER),\n get_complete_tag(TAG_TITLE))\n self.indexing.image.rules = [IndexerRuleConfig()]\n self.indexing.image.rules[0].directory = '/mnt/hdd/Image'\n self.indexing.image.rules[0].extensions = ['.gif', '.jpg', '.jpeg', '.png']\n self.indexing.image.rules[0].pattern = '{}/{}'.format(\n get_complete_tag(TAG_ALBUM),\n get_complete_tag(TAG_TITLE))\n self.indexing.video.ignore_revisions = False\n self.indexing.video.subtitle_rules = [IndexerRuleConfig()]\n self.indexing.video.subtitle_rules[0].directory = '/mnt/hdd/Video'\n self.indexing.video.subtitle_rules[0].extensions = ['.srt']\n self.indexing.video.subtitle_rules[0].pattern = '{}/Subtitle/{}/{}/{}/{}/{}'.format(\n get_complete_tag(TAG_TITLE),\n get_complete_tag(TAG_QUALITY),\n get_complete_tag(TAG_LANGUAGES),\n get_complete_tag(TAG_LANGUAGE),\n get_complete_tag(TAG_ANY),\n get_complete_tag(TAG_EPISODE_TITLE))\n self.indexing.video.video_rules = [IndexerRuleConfig()]\n self.indexing.video.video_rules[0].directory = '/mnt/hdd/Video'\n self.indexing.video.video_rules[0].extensions = ['.avi', '.flv', '.mkv', '.mp4']\n self.indexing.video.video_rules[0].pattern = '{}/Content/{}/{}/{}/{}'.format(\n get_complete_tag(TAG_TITLE),\n get_complete_tag(TAG_QUALITY),\n get_complete_tag(TAG_LANGUAGES),\n get_complete_tag(TAG_ANY),\n get_complete_tag(TAG_EPISODE_TITLE))\n self.logging.enabled = True\n self.logging.level = 'error'\n self.logging.max_size_bytes = 524288\n self.logging.path = '../data/log.txt'\n self.multimedia.av_player = 'vlc'\n self.multimedia.av_player_path = '/usr/bin/vlc-wrapper'\n self.multimedia.image_viewer = 'feh'\n self.multimedia.image_viewer_path = '/usr/bin/feh'\n self.web.port = 8095",
"def create():\n # for clean test cases, first the available databases will be flushed\n get_redis().flushdb()\n graph = FileStructureProcessor()\n return graph.get_graph()",
"def process_first_x_files(path_signalmedia_json,\n path_newsreader_nafs='',\n start=None,\n end=None):\n if end:\n line_range = range(start, end+1)\n\n news_item = namedtuple('news_item',\n ['signalmedia_json', 'preprocessing'])\n path_template = '{path_newsreader_nafs}/{identifier}.in.naf'\n\n with open(path_signalmedia_json) as infile:\n for counter, line in enumerate(infile, 1):\n\n if end:\n if counter not in line_range:\n continue \n if counter > end:\n break\n\n article = json.loads(line)\n identifier = article['id']\n spacy_naf = spacy_to_naf.text_to_NAF(article['content'], nlp)\n the_preprocessing = {('spacy', spacy_naf)}\n\n if path_newsreader_nafs:\n path_newsreader_naf = path_template.format_map(locals())\n if os.path.exists(path_newsreader_naf):\n newsreader_naf = etree.parse(path_newsreader_naf)\n the_preprocessing.add(('newsreader', newsreader_naf))\n\n a_news_item = news_item(signalmedia_json=article,\n preprocessing=the_preprocessing)\n yield a_news_item",
"def network_nodes(self):\n nodes = []\n for node in self.filelist:\n node_attributes = {\"type\": node.suffix}\n if node_attributes[\"type\"] == \".py\":\n node_attributes[\"size\"] = (\n log(self.get_filesize(self.sourcepath / node) + 25) * 2\n )\n node_attributes[\"color\"] = {\n \"border\": \"rgba(0,70,10,1)\",\n \"background\": \"rgba(0, 120, 20 ,1)\",\n }\n nodes.append((self.name(node), node_attributes))\n return nodes",
"def import_file(self, file_obj, folder):\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n\n obj, created = cls.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\"file_created #%s / image_created #%s -- file : %s -- created : %s\" % (self.file_created,\n self.image_created,\n obj, created))\n return obj",
"def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')",
"def add(self, data, media_type):\n\t\tif isinstance(data, xmlrpclib.Binary):\n\t\t\tdata = data.data # looks strange, but that's how xmlrpc works :)\n\n\t\tid = md5(data).hexdigest()\n\t\tsize = len(data)\n\t\tself.log.debug(\"add for %s [%d bytes] was called\" % (id, size))\n\n\t\t@stack\n\t\tdef check_exists(result):\n\t\t\tif result[0] == 0:\n\t\t\t\tif not result[1]:\n\t\t\t\t\treturn self._find_suitable_storage_targets(id, data, self._cfg_default_distribution_total)\n\t\t\t\telse:\n\t\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t@stack\n\t\tdef insert_txn(txn, host, id):\n\t\t\ttxn.execute(\"\"\"\n\t\t\t\tINSERT INTO\n\t\t\t\t\tstorage_assignments (\n\t\t\t\t\t\thostname,\n\t\t\t\t\t\tmedia_id\n\t\t\t\t\t) VALUES (%s, %s)\n\t\t\t\t\"\"\", (host, id))\n\n\t\t\ttxn._connection.commit()\n\t\t\tself.log.debug(\"done inserting image\")\n\t\t\treturn 0\n\n\t\t@stack\n\t\tdef handle_path(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\t\t\tpath = result[1]\n\t\t\tself.log.debug(\"calling _write_binary(%s.jpg)\" % path)\n\t\t\treturn self._write_binary(\"%s.jpg\" % path, data)\n\t\t\t\n\t\t@stack\n\t\tdef main_inserts(result):\n\t\t\tif result[0] != 0:\n\t\t\t\tself.log.warning(\"Error getting storage_targets: %s\" % result[1])\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tnodes = result[1]\n\t\t\tself.log.debug(\"Got %s nodes back from _find_suitable()\" % pformat(nodes))\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\tself.log.debug(\"getting path for image %s, node %s\" % (id, n))\n\t\t\t\td2 = self._make_media_path(id, n)\n\t\t\t\td2.addCallback(handle_path)\n\t\t\t\td2.addCallback(lambda _: dl.append(self.app.db.runInteraction(insert_txn, n, id, host=n)))\n\t\t\t\tdl.append(d2)\n\t\t\td_list = DeferredList(dl, fireOnOneErrback=1)\n\t\t\td_list.addCallback(lambda _: 0)\n\t\t\treturn d_list\n\n\t\td = self.binary_exists(id)\n\t\td.addCallback(check_exists)\n\t\td.addCallback(main_inserts)\n\t\td.addErrback(lambda failure: (-1, failure.getErrorMessage()))\n\t\treturn d",
"def create_nodes(name):\n # Find the tsp instance file and extract its extension\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n ext = name.split('.')[1]\n nodes = [] #Array storing nodes\n\n # If .csv then just read nodes line by line\n if (ext == \"csv\"):\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n x = int(coords[0])\n y = int(coords[1])\n nodes.append(Node(x,y))\n elif (ext == \"tsp\"):\n # If .tsp then the format of the file changes and needs to be read differently.\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n\n nodes.append(Node(x,y))\n\n return nodes",
"def create_node(self, **kwargs):\n size = kwargs['size'].ram\n params = {\n 'cmd' : 'dreamhost_ps-add_ps',\n 'movedata' : kwargs.get('movedata', 'no'),\n 'type' : kwargs['image'].name,\n 'size' : size\n }\n data = self.connection.request('/', params).object\n return Node(\n id = data['added_web'],\n name = data['added_web'],\n state = NodeState.PENDING,\n public_ip = [],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'type' : kwargs['image'].name\n }\n )",
"def test_add_media_type(self):\n\n # check if documentalist has access to create new media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = {\n 'status': '0',\n 'acronym': 'foto',\n 'name': 'Foto',\n 'language' : 'pt-br',\n 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/media-type/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, \"Foto\")",
"async def _init_tree_with_sources(svc: Pytheos) -> TreeEntry:\n tree = TreeEntry(obj=None)\n\n for source in await svc.api.browse.get_music_sources():\n tree.setdefault(source.name, TreeEntry(obj=source))\n\n return tree",
"def ingest_node_results(self, filename, extension=-1):\n\n # Which node is this?\n wg, node_name = utils.parse_node_filename(filename)\n #node_id = self.retrieve_node_id(wg, node_name)\n uves_node_id = self.retrieve_node_id(wg, \"UVES-{}\".format(node_name))\n giraffe_node_id = self.retrieve_node_id(wg, \"GIRAFFE-{}\".format(node_name))\n\n # Start ingesting results.\n data = Table.read(filename, hdu=extension)\n\n #default_row = {\"node_id\": node_id}\n default_row = {\"node_id\": -1}\n columns = (\n \"node_id\", \"cname\", \"filename\", \"setup\", \"snr\",\n \"vel\", \"e_vel\", \"vrot\", \"e_vrot\",\n \"teff\", \"e_teff\", \"nn_teff\", \"enn_teff\", \"nne_teff\", \"sys_err_teff\",\n \"logg\", \"e_logg\", \"nn_logg\", \"enn_logg\", \"nne_logg\", \"sys_err_logg\", \"lim_logg\",\n \"feh\", \"e_feh\", \"nn_feh\", \"enn_feh\", \"nne_feh\", \"sys_err_feh\",\n \"xi\", \"e_xi\", \"nn_xi\", \"enn_xi\", \"nne_xi\",\n \"mh\", \"e_mh\", \"nn_mh\", \"enn_mh\", \"nne_mh\",\n \"alpha_fe\", \"e_alpha_fe\", \"nn_alpha_fe\", \"enn_alpha_fe\", \"nne_alpha_fe\",\n \"vrad\", \"e_vrad\", \"vsini\", \"e_vsini\",\n \"peculi\", \"remark\", \"tech\")\n\n # Update formats, as necessary.\n tmp_key_format = \"{}_NEW_DTYPE\"\n for key, new_dtype in _FITS_FORMAT_ADAPTERS.items():\n\n # FUCK THESE IDIOTIC PEOPLE WHAT THE FUCK IS WRONG WITH THEM\n if node_name == \"Carmela-Elena\":\n\n if key in (\"teff\", \"e_teff\", \"logg\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"feh\", \"e_feh\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = np.nan * np.ones(len(data))\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"tech\", \"peculi\", \"remark\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = [\"\"] * len(data)\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n\n elif node_name == \"Porto\" and key in (\"teff\", \"e_teff\", \"feh\", \"e_feh\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n data[tmp_key_format.format(key.upper())] = np.array(data[key.upper()], dtype=new_dtype)\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n N = len(data)\n for i, row in enumerate(data):\n logger.info(\"Ingesting row {}/{} from node WG{}: {}\".format(i, N,\n wg, node_name))\n row_data = {}\n row_data.update(default_row)\n row_data.update(dict(zip(columns[1:], [row[c.upper()] for c in columns[1:]])))\n\n if row_data[\"setup\"].strip() == \"UVES\":\n row_data[\"node_id\"] = uves_node_id\n elif row_data[\"setup\"].strip() == \"GIRAFFE\":\n row_data[\"node_id\"] = giraffe_node_id\n else:\n raise WTFError\n\n if node_name.lower() == \"carmela-elena\":\n for key in (\"tech\", \"peculi\", \"remark\"):\n row_data[key] = str(row_data[key])\n\n use_columns = [] + list(columns)\n for k in row_data.keys():\n if isinstance(row_data[k], (bool, np.bool_)):\n del row_data[k]\n use_columns.remove(k)\n\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(use_columns),\n \", \".join([\"%({})s\".format(column) for column in use_columns])),\n row_data)\n\n else:\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(columns),\n \", \".join([\"%({})s\".format(column) for column in columns])),\n row_data)\n\n self.connection.commit()\n return N",
"def create(self):\n self.create_file()",
"def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)",
"def __init__(self, manifest, mode='train'):\n self.audio_links = [line.rstrip('\\n').split(' ')[0] for line in open(manifest)]\n self.labels_emotion = [int(line.rstrip('\\n').split(' ')[1]) for line in open(manifest)]\n self.labels_gender = [int(line.rstrip('\\n').split(' ')[2]) for line in open(manifest)]",
"def __init__(self):\n\n # the path to the file locally\n self.path = None\n # the file extension\n self.ext = None\n # image|video\n self.type = None\n ##\n # file title reference\n self.title = None\n # [image, gallery, video, performer]\n self.category = None\n # file size\n self.size = None",
"def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))",
"async def create_media(self, community: Community):\n media_tab_url = f\"{self._api_stream_url}{community.id}/{self._api_media_tab}\"\n async with self.web_session.get(media_tab_url, headers=self._headers) as resp:\n if self.check_status(resp.status, media_tab_url):\n data = await resp.json()\n media_objects, photo_media_dicts = iterate_community_media_categories(data)\n\n # This endpoint does NOT give us any information about the photos, therefore we must make\n # a separate api call to retrieve proper photo information for the photo media.\n for media in photo_media_dicts:\n media_obj = await self.fetch_media(community.id, media.get(\"id\"))\n if media_obj:\n media_objects.append(media_obj)\n\n self._add_media_to_cache(media_objects)",
"def create_video_task_structure(obj):\n parent_path = resource_path(obj.__parent__)\n obj_path = resource_path(obj)\n types = ['.mkv', '.avi'] #FIXME: Settings, formats etc...\n results = []\n for mtype in types:\n task_c = chain(blob_to_tmp_file.s(obj_path), convert_file.s(mtype), tmp_to_blob.s(parent_path, obj_path))\n results.append(task_c())\n return results",
"def create_preview(name):\n file_type = os.path.splitext(name)[1]\n\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n if file_type == '.csv':\n\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n\n x = int(coords[0])\n y = int(coords[1])\n axis.scatter(x, y, c = 'b', label = nodeNo)\n axis.set_title(name)\n axis.text(x+5,y+5, str(nodeNo))\n else:\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n axis.scatter(x, y, c = 'b', label = i)\n axis.set_title(name)\n axis.text(x,y, str(i))\n\n return fig",
"def _build(self, files, strict=True, usrdata=None):\n # Allow for single files\n _files = files if hasattr(files, '__len__') else [files]\n\n # Build lists to fill\n data = {k:[] for k in self.spectrograph.meta.keys()}\n data['directory'] = ['None']*len(_files)\n data['filename'] = ['None']*len(_files)\n\n # Build the table\n for idx, ifile in enumerate(_files):\n _ifile = Path(ifile).resolve()\n # User data (for frame type)\n if usrdata is None:\n usr_row = None\n else:\n # TODO: This check should be done elsewhere\n # Check\n if _ifile.name != usrdata['filename'][idx]:\n msgs.error('File name list does not match user-provided metadata table. See '\n 'usrdata argument of instantiation of PypeItMetaData.')\n usr_row = usrdata[idx]\n\n # Add the directory and file name to the table\n data['directory'][idx] = str(_ifile.parent)\n data['filename'][idx] = _ifile.name\n if not data['directory'][idx]:\n data['directory'][idx] = '.'\n\n # Read the fits headers. NOTE: If the file cannot be opened,\n # headarr will be None, and the subsequent loop over the meta keys\n # will fill the data dictionary with None values.\n msgs.info(f'Adding metadata for {data[\"filename\"][idx]}')\n headarr = self.spectrograph.get_headarr(_ifile, strict=strict)\n\n # Grab Meta\n for meta_key in self.spectrograph.meta.keys():\n value = self.spectrograph.get_meta_value(headarr, meta_key, \n required=strict,\n usr_row=usr_row, \n ignore_bad_header = (\n self.par['rdx']['ignore_bad_headers'] or strict))\n if isinstance(value, str) and '#' in value:\n value = value.replace('#', '')\n msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(\n meta_key, value))\n data[meta_key].append(value)\n\n # JFH Changed the below to not crash if some files have None in\n # their MJD. This is the desired behavior since if there are\n # empty or corrupt files we still want this to run.\n\n # Validate, print out a warning if there is problem\n try:\n time.Time(data['mjd'], format='mjd')\n except ValueError:\n mjd = np.asarray(data['mjd'])\n filenames = np.asarray(data['filename'])\n bad_files = filenames[mjd == None]\n # Print status message\n msg = f'Time invalid for {len(bad_files)} files.\\nContinuing, but the following ' \\\n 'frames either could not be opened, are empty, or have corrupt headers:\\n'\n for file in bad_files:\n msg += f' {file}\\n'\n msgs.warn(msg)\n\n # Return\n return data"
]
| [
"0.6899484",
"0.6054888",
"0.58803874",
"0.57928514",
"0.5420819",
"0.5370023",
"0.5346127",
"0.5283462",
"0.52506554",
"0.5231957",
"0.5210766",
"0.5093985",
"0.50406533",
"0.501919",
"0.5015375",
"0.49877536",
"0.49819472",
"0.49494064",
"0.49333236",
"0.49259782",
"0.49049827",
"0.49042276",
"0.48808593",
"0.4878923",
"0.48767197",
"0.48710132",
"0.4870728",
"0.48671526",
"0.48631003",
"0.48362097"
]
| 0.714121 | 0 |
A function that places a hyperlink within a paragraph object. | def add_hyperlink(paragraph, url, text, color, underline):
# This gets access to the document.xml.rels file and gets a new relation id value
part = paragraph.part
r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element
new_run = docx.oxml.shared.OxmlElement('w:r')
# Create a new w:rPr element
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Add color if it is given
if not color is None:
c = docx.oxml.shared.OxmlElement('w:color')
c.set(docx.oxml.shared.qn('w:val'), color)
rPr.append(c)
# Remove underlining if it is requested
if not underline:
u = docx.oxml.shared.OxmlElement('w:u')
u.set(docx.oxml.shared.qn('w:val'), 'none')
rPr.append(u)
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
paragraph._p.append(hyperlink)
return hyperlink | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_link(self, text, href):\n self.insert_text('\\n<a href=\"%s\">%s</a>' % (href, text))",
"def add_link(self, text, link, doc=None):\n if doc is None:\n doc = self.doc\n\n attributes = dict(height=13, width=800, align=None,\n style={'width': '800px',\n 'font-size': '100%',\n 'font-style': 'italic',\n 'font-weight': 'lighter',\n 'color': self.palette['hover'],\n 'text-align': 'center'})\n\n color = self.palette['hover']\n style = f\"style=\\\"text-decoration: none; color: {color};\\\"\"\n\n doc.add_root(Div(text=f\"<a href=\\\"{link}\\\" {style}>{text}</a>\",\n **attributes))\n return doc",
"def link(text, link_func):\n def object_formatter(v, c, m, p):\n \"\"\"Format object view link.\"\"\"\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))\n return object_formatter",
"def link(self, link, title, text):\n link = escape_link(link)\n return [MdStyleInstructionLink(link)] + text",
"def render_link(url, text=None):\n try:\n url_validator(url)\n return \"<a href='%s'>%s</a>\" % (url, text if text else url)\n except ValidationError:\n return url",
"def link_html(text: str, href: str) -> str:\n return '<a href=\"{}\">{}</a>'.format(href, text)",
"def pybb_link(object, anchor=''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_text(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))",
"def urlLink(self, text=None, url=None, attrs={}):\n if not text:\n text = self.titleOrId()\n text = escape(text)\n if not self.checkRemotePerm(\"View\", self):\n return text\n if not url:\n url = self.getPrimaryUrlPath()\n if len(attrs):\n return '<a href=\"%s\" %s>%s</a>' % (url,\n ' '.join('%s=\"%s\"' % (x,y) for x,y in attrs.items()),\n text)\n else:\n return '<a href=\"%s\">%s</a>' % (url, text)",
"def md_link(link_text, link_target):\n return '[%s](%s)' % (md_escape(link_text, characters=']'),\n md_escape(link_target, characters=')'))",
"def wrap_it_in_a_link(html, url):\n\n return \"<a href='\" + url + \"'>\" + html + \"</a>\"",
"def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)",
"def link(self):\n return f\"[{self.numbered_title}]({self.html_url})\"",
"def slack_link(url, text=\"\"):\n if text:\n return \"<%s|%s>\" % (url, text)\n\n else:\n return \"<%s>\" % url",
"def add_link(\n self,\n url: str,\n label: Optional[str] = None,\n ) -> None:\n if not label:\n label = url\n self._client.add_element(\n Markdown(\n f\"[{label}]({url})\",\n on_tap_link=lambda e: self._client.page.launch_url(e.data),\n )\n )",
"def make_wepay_link(app, rawtext, endpoint, function, name_override, options):\n try:\n # get the documentation URL\n base = app.config.wepay_docs_home\n if not base:\n raise AttributeError\n except AttributeError as err:\n raise ValueError('wepay_docs_home configuration value is not set (%s)' % str(err))\n\n # if the URL doesn't include a trailing slash, add one\n slash = '/' if base[-1] != '/' else ''\n\n # build external url\n # if no function is given, then it is the main endpoint, which is accessed by #lookup on the page\n ref = \"{0}{1}#{2}\"\n\n ref = ref.format(base,endpoint,function) if function else ref.format(base,endpoint,\"lookup\")\n\n # build the text that we will display instead of :wepay:`endpoint function`\n insert_text = \"/\" + endpoint + \"/\" + function if function else \"/\" + endpoint\n if name_override:\n insert_text = name_override\n set_classes(options)\n\n # make the node\n node = nodes.reference(rawtext, insert_text, refuri=ref,\n **options)\n return node",
"def object_formatter(v, c, m, p):\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))",
"def createLinkFromWikiWord(word, wikiPage): # normalizeWikiWord\r\n return \"\"",
"def add_link(self, link):\n raise NotImplementedError",
"def link(self, link):\r\n return links.Link(self, link)",
"def links(self, text):\n\n # For some reason, the part of the regex below that matches the url\n # does not match a trailing parenthesis. It gets caught by tail, and\n # we check later to see if it should be included as part of the url.\n pattern = r'''\n (?P<pre>^|(?<=[\\s>.\\(\\|])|[{[])? # leading text\n \" # opening quote\n (?P<atts>%s) # block attributes\n (?P<text>[^\"]+?) # link text\n \\s? # optional space\n (?:\\((?P<title>[^)]+?)\\)(?=\"))? # optional title\n \": # closing quote, colon\n (?P<url>%s+?) # URL\n (?P<slash>\\/)? # slash\n (?P<post>[^\\w\\/]*?) # trailing text\n (?P<tail>[\\]})]|(?=\\s|$|\\|)) # tail\n ''' % (self.c, self.urlch)\n\n text = re.compile(pattern, re.X | re.U).sub(self.fLink, text)\n\n return text",
"def html_link_to_tag(string, input_id, proc):\n return html_simple_element(\n string, \"a\", 'href=\"#' + proc + \"_\" + normalise_tag_id(input_id) + '\"'\n )",
"def anchorlink(self, on, name='', **kw):\n attrs = self._langAttr()\n if name:\n name = self.sanitize_to_id(name)\n attrs['href'] = '#' + self.qualify_id(name)\n if 'href' in kw:\n del kw['href']\n if on:\n str = self._open('a', attr=attrs, **kw)\n else:\n str = self._close('a')\n return str",
"def start_link(self, link):\n self.start_underline()",
"def _make_doi_clickable(link):\n return f\"https://doi.org/{link}\"",
"def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()",
"def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse",
"def link(address):",
"def edit_link(parser, token):\n return EditLinkTag(parser, token)",
"def __init__(self, link_text, passage_name=None, passage_on_right=True):\n self.link_text = link_text\n self.passage_name = passage_name\n self.passage_on_right = passage_on_right",
"def wiki_link(text):\n return wiki_link_pattern.sub(get_link, text)"
]
| [
"0.7194196",
"0.711099",
"0.6931182",
"0.6729301",
"0.6459507",
"0.6443439",
"0.6435088",
"0.6348411",
"0.63430583",
"0.63310754",
"0.6183733",
"0.61709255",
"0.6125358",
"0.6087342",
"0.6061669",
"0.6057206",
"0.6023001",
"0.5997185",
"0.59653616",
"0.5963516",
"0.5961892",
"0.5958463",
"0.5922726",
"0.5915761",
"0.59080565",
"0.5896739",
"0.58756936",
"0.5865955",
"0.58458644",
"0.5837964"
]
| 0.7520187 | 0 |
Parse the video info from the list file | def _parse_list(self):
frame_path = [x.strip().split(' ') for x in open(self._image_set)]
self.video_list = [VideoRecord(item) for item in frame_path]
print('Sequence number/ video number:%d' % (len(self.video_list))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)",
"def parse_video_list (self, response_data):\n video_list = {};\n raw_video_list = response_data['value']\n netflix_list_id = self.parse_netflix_list_id(video_list=raw_video_list);\n for video_id in raw_video_list['videos']:\n if self._is_size_key(key=video_id) == False:\n video_list.update(self.parse_video_list_entry(id=video_id, list_id=netflix_list_id, video=raw_video_list['videos'][video_id], persons=raw_video_list['person'], genres=raw_video_list['genres']))\n return video_list",
"def get_mpeg_info(videos_dir, filename):\n logger.info(\"Getting info from %s/%s\" % (videos_dir, filename))\n if not os.path.exists(videos_dir):\n raise Exception(\"%s dir does not exist!\" % videos_dir)\n path = os.path.join(videos_dir, filename)\n if not os.path.exists(path):\n raise Exception(\"%s does not exist!\" % path)\n\n p = subprocess.Popen([FFMPEG, \"-i\", filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n out = p.stdout.read()\n pattern = r'Video: mpeg2video \\(Main\\), (?P<vdata>.*?)\\n'\n m = re.search(pattern, out)\n\n if not m:\n raise Exception(\"Failed to search mpeg info: '%s'\" % out)\n\n vdata = m.groups()[0]\n mdata = vdata.split(\", \")\n logger.info(mdata)\n\n resolution = mdata[1].split(\" \")[0]\n (width, height) = resolution.split(\"x\")\n width = int(width)\n height = int(height)\n logger.info(\"%dx%d\" % (width, height))\n\n bitrate = mdata[2].split(\" \")[0] # kb/s\n\n fps = float(mdata[3].split(\" \")[0])\n\n return {\n \"width\": width,\n \"height\": height,\n \"bitrate\": bitrate, # kb/s\n \"fps\": fps,\n }",
"def load_video(self):\n self.video_file = tkFileDialog.askopenfilename()\n self.video_parser = VideoFileParser(self.video_file)\n\n self.video_entries = self.video_parser.entries\n\n for index, entry in enumerate(self.video_entries):\n self.video_box.insert(index, entry.word)",
"def getvideolist():\n safeprint(\"Getting video list...\")\n response = getfile(\"http://openings.moe/api/list.php\")\n lstjson = response.read().decode(\"utf-8\", \"ignore\")\n videolist = json.loads(lstjson)\n return videolist",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos",
"def _parse_feed(self,feed): \n meta=[]\n for entry in feed:\n item_meta=self._parse_entry(entry)\n item_meta['video-id']='0'\n meta.append(item_meta)\n self._logger.info('%s videos were founded and parsed at Megavideo',len(meta)) \n return meta",
"def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]",
"def load_video_data(fpath):\n videos = []\n with open(fpath) as f:\n d1 = json.load(f)['items']\n videos += append_videos(d1)\n return videos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos",
"def _parseMediaInfo(self):\n\t\t# the program path to MediaInfo should be set otherwise\n\t\tenv = {'path': env_mediainfo_dir}\n\t\t# the command for MediaInfo is a fixed command\n\t\tcom = [com_mediainfo, '-f', self.name]\n\t\t# invoke the external program\n\t\tproc = externalProcess(com, env)\n\t\t# read the programs output line by line and parse the output to a dictionary, obtaining all information\n\t\tinfo = {}\n\t\tstate = 'start'\n\t\tstream = 0\n\t\tfor line in proc.execute():\n\t\t\tlist = line.split(\":\")\n\t\t\t# recognize the sections ('General','Video','Audio','Text')\n\t\t\tif len(list) == 1 and list[0] != '':\n\t\t\t\tstate = str(list[0].lstrip().rstrip())\n\t\t\t\t# print \"New state: \", state\n\t\t\telif len(list) >= 2 and list[0] != '' and list[1] != '':\n\t\t\t\t# recognize several stream identifier\n\t\t\t\tif str(list[0].lstrip().rstrip()) == 'Stream identifier':\n\t\t\t\t\tstream = int(str(list[1].lstrip().rstrip()))\n\t\t\t\t\tcontinue\n\t\t\t\t# save the information to the dictionary\n\t\t\t\tkey = state + \"_\" + str(stream) + \"_\" + str(list[0].lstrip().rstrip())\n\t\t\t\twhile key in info.keys():\n\t\t\t\t\tkey += \"_\"\n\t\t\t\tinfo[key] = str(list[1].lstrip().rstrip())\n\t\treturn info",
"def parse(self):\n try:\n self.open_file()\n lines = list(self._file)\n\n if len(lines) > 0:\n text = ''.join(lines)\n regex = 'Song \\d+\\nStart (\\d+:\\d+:\\d+)\\nEnd (\\d+:\\d+:\\d+)\\nLength (\\d+.\\d+)'\n match = re.findall(regex, text)\n if len(match):\n starts = []\n ends = []\n lengths = []\n\n for i in range(len(match)):\n starts.append(match[i][0])\n ends.append(match[i][1])\n lengths.append(float(match[i][2]))\n\n for i in range(len(match)):\n self.debug_data.append({\n 'start':starts[i],'end':ends[i],'length':lengths[i]})\n\n match = re.search('T\\d_S(\\d{4})_.*.txt', self._filepath)\n if match:\n self._experiment_metadata['session_id'] = int(match.groups()[0])\n else:\n raise EIMParsingError(\"No valid session id found in filename %s\" % self._filepath)\n\n finally:\n if self._file and not self._file.closed:\n self.close_file()",
"def media_file_info(self):\n\n if self.observationId and self.playerType == VLC:\n\n media = self.mediaplayer.get_media()\n\n logging.info(\"State: {}\".format(self.mediaplayer.get_state()))\n logging.info(\"Media (get_mrl): {}\".format(bytes_to_str(media.get_mrl())))\n logging.info(\"media.get_meta(0): {}\".format(media.get_meta(0)))\n logging.info(\n \"Track: {}/{}\".format(self.mediaplayer.video_get_track(), self.mediaplayer.video_get_track_count()))\n logging.info(\"number of media in media list: {}\".format(self.media_list.count()))\n logging.info(\"get time: {} duration: {}\".format(self.mediaplayer.get_time(), media.get_duration()))\n logging.info(\"Position: {} %\".format(self.mediaplayer.get_position()))\n logging.info(\"FPS: {}\".format(self.mediaplayer.get_fps()))\n logging.info(\"Rate: {}\".format(self.mediaplayer.get_rate()))\n logging.info(\"Video size: {}\".format(self.mediaplayer.video_get_size(0)))\n logging.info(\"Scale: {}\".format(self.mediaplayer.video_get_scale()))\n logging.info(\"Aspect ratio: {}\".format(self.mediaplayer.video_get_aspect_ratio()))\n logging.info(\"is seekable? {0}\".format(self.mediaplayer.is_seekable()))\n logging.info(\"has_vout? {0}\".format(self.mediaplayer.has_vout()))\n\n vlc_output = (\"State: {}<br>\"\n \"Media Resource Location: {}<br>\"\n \"File name: {}<br>\"\n \"Track: {}/{}<br>\"\n \"Number of media in media list: {}<br>\"\n \"get time: {}<br>\"\n \"duration: {}<br>\"\n \"Position: {} %<br>\"\n \"FPS: {}<br>\"\n \"Rate: {}<br>\"\n \"Video size: {}<br>\"\n \"Scale: {}<br>\"\n \"Aspect ratio: {}<br>\"\n \"is seekable? {}<br>\"\n \"has_vout? {}<br>\").format(self.mediaplayer.get_state(),\n bytes_to_str(media.get_mrl()),\n media.get_meta(0),\n self.mediaplayer.video_get_track(),\n self.mediaplayer.video_get_track_count(),\n self.media_list.count(),\n self.mediaplayer.get_time(),\n self.convertTime(media.get_duration() / 1000),\n self.mediaplayer.get_position(),\n self.mediaplayer.get_fps(),\n self.mediaplayer.get_rate(),\n self.mediaplayer.video_get_size(0),\n self.mediaplayer.video_get_scale(),\n self.mediaplayer.video_get_aspect_ratio(),\n \"Yes\" if self.mediaplayer.is_seekable() else \"No\",\n \"Yes\" if self.mediaplayer.has_vout() else \"No\"\n )\n\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n\n self.results.ptText.appendHtml(\"<b>VLC analysis</b><hr>\" + vlc_output)\n\n # FFmpeg analysis\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n for nplayer in self.pj[OBSERVATIONS][self.observationId][FILE]:\n for filePath in self.pj[OBSERVATIONS][self.observationId][FILE][nplayer]:\n media_full_path = project_functions.media_full_path(filePath, self.projectFileName)\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, media_full_path)\n\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, media_full_path)\n nframes = r[\"frames_number\"]\n\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=media_full_path,\n error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(media_full_path, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"],\n r[\"has_video\"], r[\"has_audio\"]))\n\n self.results.ptText.appendHtml(\"Total duration: {} (hh:mm:ss.sss)\".\n format(self.convertTime(sum(self.duration) / 1000)))\n\n self.results.show()\n\n else:\n\n fn = QFileDialog(self).getOpenFileName(self, \"Select a media file\", \"\", \"Media files (*)\")\n filePath = fn[0] if type(fn) is tuple else fn\n\n if filePath:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, filePath)\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, filePath)\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=filePath, error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(filePath, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"], r[\"has_video\"],\n r[\"has_audio\"]))\n\n self.results.show()",
"async def read_video_info(vid_fp: str, logger=None):\n args = ['-v', 'quiet', '-print_format', 'json', '-show_streams', '-sexagesimal', vid_fp]\n p = await asyncio.create_subprocess_exec('ffprobe', *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, _ = await p.communicate()\n if p.returncode != 0:\n err = f'Cannot get video info for {vid_fp}'\n if logger:\n logger.error(err)\n else:\n print(err)\n return\n # Find duration\n metadata = json.loads(stdout.decode())\n for stream in metadata['streams']:\n if stream['codec_type'] != 'video':\n continue\n # Good for H264\n dur = stream.get('duration')\n # H265\n if dur is None and stream.get('tags') is not None:\n dur = stream['tags'].get('DURATION')\n if dur is None:\n return\n return parse_duration(dur)\n return",
"def fetch_video_list(self, params):\n list_id = params.get('list_id', [''])[0]\n start = int(params.get('list_from', [0])[0])\n end = int(params.get('list_to', [26])[0])\n raw_video_list = self.netflix_session.fetch_video_list(\n list_id=list_id,\n list_from=start,\n list_to=end)\n if 'error' in raw_video_list:\n return raw_video_list\n # parse the video list ids\n if 'videos' in raw_video_list.get('value', {}).keys():\n video_list = self.netflix_session.parse_video_list(\n response_data=raw_video_list)\n return video_list\n return []",
"def get_fields(self, cur_list):\n lists = []\n for ind, element in enumerate(cur_list):\n try:\n video = element.find('h3', class_='yt-lockup-title ').find('a')\n author = element.find('h3', class_='yt-lockup-title ').find('a')\n lists.append({'video_url': video.get('href'),\n 'video_name': video.text,\n 'author_url': author.get('href'),\n 'author_name': author.text\n })\n except Exception as e:\n print(e)\n return lists",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos",
"def video_list(self) -> list:\n return self._video_list",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers']\n info_dict['question'] = video_info['q']\n info_dict['subtitle'] = video_info['located_sub_text']\n info_dict['frame_ind'] = video_info['located_frame']\n info_dict['total_frames'] = video_info.get('total_frames', -1)\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos",
"def parse_movie(self, line):\n pass",
"def _parse_entry(self,entry):\n item_meta={'title':entry.title,\n 'description':entry.description,\n 'category':entry.category,\n 'tags':entry.tags,\n 'page_url':entry.url,\n 'lq_url':None,\n 'hq_url':None,\n 'hd_url':None,\n 'search-id':self.search_id,\n 'source':'4',}\n self._logger.debug('Video Metadata: %s',item_meta)\n return item_meta",
"def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos",
"def fetch_video_list_information (self, video_ids):\n paths = []\n for video_id in video_ids:\n paths.append(['videos', video_id, ['summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'watched', 'videoQuality']])\n paths.append(['videos', video_id, 'cast', {'from': 0, 'to': 15}, ['id', 'name']])\n paths.append(['videos', video_id, 'cast', 'summary'])\n paths.append(['videos', video_id, 'genres', {'from': 0, 'to': 5}, ['id', 'name']])\n paths.append(['videos', video_id, 'genres', 'summary'])\n paths.append(['videos', video_id, 'tags', {'from': 0, 'to': 9}, ['id', 'name']])\n paths.append(['videos', video_id, 'tags', 'summary'])\n paths.append(['videos', video_id, ['creators', 'directors'], {'from': 0, 'to': 49}, ['id', 'name']])\n paths.append(['videos', video_id, ['creators', 'directors'], 'summary'])\n paths.append(['videos', video_id, 'bb2OGLogo', '_400x90', 'png'])\n paths.append(['videos', video_id, 'boxarts', '_342x192', 'jpg'])\n paths.append(['videos', video_id, 'boxarts', '_1280x720', 'jpg'])\n paths.append(['videos', video_id, 'storyarts', '_1632x873', 'jpg'])\n paths.append(['videos', video_id, 'interestingMoment', '_665x375', 'jpg'])\n paths.append(['videos', video_id, 'artWorkByType', 'BILLBOARD', '_1280x720', 'jpg'])\n\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='fetch_video_list_information')",
"def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):",
"def parse_media_info(filename):\n print_info('Extracting hash from {0}'.format(filename))\n media_info = MediaInfo()\n for media_info_type in MEDIA_INFO_REGEXS:\n #print_info('Parsing for {0}'.format(media_info_type))\n for regex in MEDIA_INFO_REGEXS[media_info_type]:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_data = m.group('MediaInfo').upper()\n print_info('Extracted {0}: {1}'.format(media_info_type, extracted_data))\n\n # Before we set, do any needed cleanup\n if media_info_type == 'resolution':\n if not extracted_data.endswith('p'):\n resolution = int(extracted_data)\n if resolution == 1280:\n extracted_data = '720'\n extracted_data = extracted_data + 'p'\n media_info.resolution = extracted_data\n if media_info_type == 'source':\n media_info.source = extracted_data.replace('-', '')\n elif media_info_type == 'audio_source':\n media_info.audio_source = extracted_data\n elif media_info_type == 'encoding':\n media_info.encoding = re.sub('X', 'H', extracted_data)\n elif media_info_type == 'color_bits':\n media_info.color_bits = extracted_data\n break\n \n \n return media_info",
"def fetch_video_list (self, list_id, list_from=0, list_to=FETCH_VIDEO_REQUEST_COUNT):\n paths = [\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'watched', 'videoQuality']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'cast', {'from': 0, 'to': 15}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'cast', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'genres', {'from': 0, 'to': 5}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'genres', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'tags', {'from': 0, 'to': 9}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'tags', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['creators', 'directors'], {'from': 0, 'to': 49}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['creators', 'directors'], 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'bb2OGLogo', '_400x90', 'png'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'boxarts', '_1280x720', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'storyarts', '_1632x873', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'interestingMoment', '_665x375', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'artWorkByType', 'BILLBOARD', '_1280x720', 'jpg']\n ]\n\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Video list')",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks"
]
| [
"0.6779816",
"0.65194994",
"0.6317407",
"0.6295546",
"0.6287042",
"0.6220349",
"0.62049514",
"0.6153184",
"0.61432546",
"0.61055183",
"0.60525054",
"0.6038838",
"0.6028858",
"0.6019547",
"0.6008906",
"0.596016",
"0.5949716",
"0.5949716",
"0.59392923",
"0.5934991",
"0.5922064",
"0.5910063",
"0.59081566",
"0.586869",
"0.58417666",
"0.58254004",
"0.58215344",
"0.57792187",
"0.5769842",
"0.57608587"
]
| 0.78747493 | 1 |
Count device properties in tango database | def _count_device_properties(self):
db_info = self.db_instance.get_info()
db_info_list = db_info.split("\n")
num_properties = 0
for line in db_info_list:
if "Device properties defined" in line:
num_properties = line.split("=")[-1]
return int(num_properties) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()",
"def get_number_of_devices(self):\n return self.drt_manager.get_number_of_devices()",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def test_properties_count_get(self):\n pass",
"def test_initial_device_properties(self):\n expected_count = 1 # model_key property already present in db\n self.assertEquals(expected_count, self._count_device_properties())",
"def count(self, query):",
"def count_measurements(database: Database) -> int:\n return int(database.measurements.count_documents(filter={}))",
"def countreadcolumns(prop_info):\n count = 0\n for prop in prop_info:\n if isinstance(prop['table_name'], str):\n count += 1\n else:\n count += len(prop['table_name'])\n return count",
"def count():\r\n return Activation.query.count()",
"def get_device_summary(dataframe):\n\n print(\"Total number of points: \", len(dataframe))\n\n print(\"The number of rows from each device are as follows: \", dataframe.groupby(['device_id']).size())",
"def count():",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def test_write_device_properties_to_db(self):\n initial_count = self._count_device_properties()\n tango_sim_generator.write_device_properties_to_db(\n self.sim_device.name(), self.expected_model, self.db_instance\n )\n num_expected_properties = len(self.expected_model.sim_properties.keys())\n final_count = self._count_device_properties()\n num_added_properties = final_count - initial_count\n self.assertEquals(num_expected_properties, num_added_properties)",
"def get_device_count():\n debug(\"MpOrLibUsb.get_device_count()\")\n num = MpOrLibUsb.__get_device_count_Mpusb()\n num = num + MpOrLibUsb.__get_device_count_Libusb()\n return num\n #end get_device_count()",
"def count(self):\n return self.properties.get('count')",
"def num_keys_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection counting matching incident_id\n cursor = COLLECTION.find({})\n count = 0\n for i in cursor:\n if incident in i:\n count += 1\n return f'The count of the key/value pairs for the incident - {str(count)}', {}, {}",
"def test_properties_count_group_by_group_by_get(self):\n pass",
"def probe_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n\n return dict(\n attributes=\",\".join(attributes),\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_grouping=\"\"\"\n client_agg_type,\n agg_type\n \"\"\",\n # not boolean\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n **kwargs,\n )",
"def count(self):\n return self.query.count(with_limit_and_skip = True)",
"def device_count() -> int:\n return flow._oneflow_internal.CudaGetDeviceCount()",
"def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])",
"def get_devs_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetDevsCount', self.handle)",
"def count(listing):\n if 'meta' in listing and 'query_total' in listing['meta']:\n return listing['meta']['query_total']",
"def property_count_function(listOfProperties):\n\n property_count = {} # Empty dict, is gonna look like this: property_count{property : count}\n for lists in listOfProperties:\n try:\n for properties in lists:\n property_count[properties] = property_count.get(properties, 0) + 1\n except TypeError as e:\n print(e)\n\n # Converts the dictionary to a dataframe\n property_dataframe = pd.DataFrame(list(property_count.items()), columns=['Property', 'Frequency'])\n # property_dataframe = property_dataframe.set_index(\"Property\")\n property_dataframe = property_dataframe.sort_values(by=['Frequency'], ascending=False)\n\n return property_dataframe",
"def get_device_count():\n c_num = ct.c_int(0)\n safe_call(backend.get().af_get_device_count(ct.pointer(c_num)))\n return c_num.value",
"def count(self):\n ans = self.execute(self.commands.table_count(self.name))\n return ans[0][0]",
"def object_count(request, model):\n active_tool_session_id = request.session[\"active_tool_session_id\"]\n num_of_objects = model.objects.filter(\n tool_session_id=active_tool_session_id\n ).count()\n return num_of_objects"
]
| [
"0.7232637",
"0.6833704",
"0.677386",
"0.66516244",
"0.6515157",
"0.6435831",
"0.6418593",
"0.63793916",
"0.6332636",
"0.623051",
"0.62289196",
"0.615941",
"0.615941",
"0.615941",
"0.615941",
"0.61536556",
"0.61357987",
"0.610687",
"0.60905564",
"0.60675",
"0.6046463",
"0.6018334",
"0.600387",
"0.5990456",
"0.5990176",
"0.5971581",
"0.5968695",
"0.5966325",
"0.59608805",
"0.5951492"
]
| 0.80800503 | 0 |
Test initial device properties added to the tangoDB | def test_initial_device_properties(self):
expected_count = 1 # model_key property already present in db
self.assertEquals(expected_count, self._count_device_properties()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_store_property_after_reconnecting_to_the_device():",
"def test_write_device_properties_to_db(self):\n initial_count = self._count_device_properties()\n tango_sim_generator.write_device_properties_to_db(\n self.sim_device.name(), self.expected_model, self.db_instance\n )\n num_expected_properties = len(self.expected_model.sim_properties.keys())\n final_count = self._count_device_properties()\n num_added_properties = final_count - initial_count\n self.assertEquals(num_expected_properties, num_added_properties)",
"def test_create_device_data(self):\n pass",
"def test_update_device(self):\n pass",
"def test_update_device(self):\n pass",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)",
"def test_add_device(self):\n\n pass",
"def test_create_device1(self):\n pass",
"def setUp(self):\n INFLUX_DB_NAME = 'test_device_parameters'\n EmptyDBTestCase.client.create_database(INFLUX_DB_NAME)\n EmptyDBTestCase.client.drop_database(INFLUX_DB_NAME)\n EmptyDBTestCase.client.create_database(INFLUX_DB_NAME)",
"def test_device_failed_properties(self):\n dev = mock.Mock()\n dev.GetId = mock.Mock(return_value=\"id\")\n dev.GetState = mock.Mock(return_value=AudioDeviceState.Active)\n store = mock.Mock()\n store.GetCount = mock.Mock(return_value=1)\n store.GetAt = mock.Mock(return_value=\"pk\")\n store.GetValue = mock.Mock(side_effect=_ctypes.COMError(None, None, None))\n dev.OpenPropertyStore = mock.Mock(return_value=store)\n with warnings.catch_warnings(record=True) as w:\n AudioUtilities.CreateDevice(dev)\n assert len(w) == 1\n assert \"COMError attempting to get property 0 from device\" in str(w[0].message)",
"def test_get_device(self):\n pass",
"def test_get_device(self):\n pass",
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass",
"def test_get_devices1(self):\n pass",
"def test_update_device_template(self):\n pass",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def test_02_Device(self):\n # print(PrettyFormatAny.form(self.m_device_obj, 'A1-02-A - Device'))\n self.assertEqual(self.m_device_obj.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(self.m_device_obj.Key, TESTING_LIGHT_KEY_0)\n self.assertEqual(self.m_device_obj.Active, TESTING_LIGHT_ACTIVE_0)\n self.assertEqual(self.m_device_obj.Comment, TESTING_DEVICE_COMMENT_0)\n self.assertEqual(self.m_device_obj.BrightnessPct, TESTING_LIGHT_CUR_LEVEL_0)",
"def test_verify_state_of_a_device():",
"def test_add_device_users(self):\n pass",
"async def test_initialize_from_database(\n recorder_mock: Recorder, hass: HomeAssistant\n) -> None:\n # enable and pre-fill the recorder\n await hass.async_block_till_done()\n await async_wait_recording_done(hass)\n\n for value in VALUES_NUMERIC:\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n await async_wait_recording_done(hass)\n\n # create the statistics component, get filled from database\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 100,\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == str(round(sum(VALUES_NUMERIC) / len(VALUES_NUMERIC), 2))\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS",
"def test_set_properties(self):\n\n api_key = 'abc'\n project_id = '123'\n\n kaput.init(api_key, project_id, debug=True)\n\n self.assertEqual(api_key, kaput._API_KEY)\n self.assertEqual(project_id, kaput._PROJECT_ID)\n self.assertTrue(kaput._DEBUG)\n self.assertEqual(kaput._handle_exception, sys.excepthook)",
"def test_02_Device(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_device = self.m_device_obj\n # print(PrettyFormatAny.form(l_device, 'C4-02-A - Device'))\n self.assertEqual(l_device.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_device.Key, TESTING_LIGHT_KEY_0)\n self.assertEqual(l_device.Active, TESTING_LIGHT_ACTIVE_0)\n self.assertEqual(l_device.DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)\n self.assertEqual(str(l_device.DeviceType), TESTING_LIGHT_DEVICE_TYPE_0)\n self.assertEqual(str(l_device.DeviceSubType), TESTING_LIGHT_DEVICE_SUBTYPE_0)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_0)",
"def test_verify_connection_to_a_device():",
"def test_info(get_touchmat):\n touchmat = get_touchmat\n\n info = touchmat.info()\n check_device_types.check_DeviceInfo(info)\n\n vid_pid = (info['vendor_id'], info['product_id'])\n assert vid_pid in (Devices.touchmat_g1.value,\n Devices.touchmat_g2.value)\n\n serial = info['serial']\n if Devices(vid_pid) == Devices.touchmat_g2:\n assert serial == \"Not Available\"\n else:\n assert len(serial) == 24",
"def test_device_states_device_name_put(self):\n pass",
"def test_init(self):\n self.assertIsNotNone(DatabaseIntermediary(), self.ec.db)",
"def set_all_properties(device: Device, test_cfg: TestCfg):\n cprint(\"\\nSet device owned properties.\", color=\"cyan\", flush=True)\n for key, value in test_cfg.mock_data.items():\n device.send(test_cfg.interface_device_prop, \"/sensor-id/\" + key, value)\n time.sleep(0.005)\n\n cprint(\"\\nSet server owned properties.\", color=\"cyan\", flush=True)\n for key, value in test_cfg.mock_data.items():\n value = prepare_transmit_data(key, value)\n post_server_interface(test_cfg, test_cfg.interface_server_prop, \"/sensor-id/\" + key, value)\n time.sleep(0.005)"
]
| [
"0.7515061",
"0.74581534",
"0.6795936",
"0.67435575",
"0.67435575",
"0.6704687",
"0.6704687",
"0.66615325",
"0.6619589",
"0.65853965",
"0.64755106",
"0.643274",
"0.64094573",
"0.64094573",
"0.6301926",
"0.6301926",
"0.6227684",
"0.62258434",
"0.62068087",
"0.6171025",
"0.6169102",
"0.60298544",
"0.60229367",
"0.60137284",
"0.59909356",
"0.5978351",
"0.59648204",
"0.5954631",
"0.59545755",
"0.5949387"
]
| 0.75091547 | 1 |
Testing whether the device properties in the model are added to the tangoDB | def test_write_device_properties_to_db(self):
initial_count = self._count_device_properties()
tango_sim_generator.write_device_properties_to_db(
self.sim_device.name(), self.expected_model, self.db_instance
)
num_expected_properties = len(self.expected_model.sim_properties.keys())
final_count = self._count_device_properties()
num_added_properties = final_count - initial_count
self.assertEquals(num_expected_properties, num_added_properties) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_initial_device_properties(self):\n expected_count = 1 # model_key property already present in db\n self.assertEquals(expected_count, self._count_device_properties())",
"def test_store_property_after_reconnecting_to_the_device():",
"def check_device_state(self):",
"def test_attr(self):\n self.assertTrue(hasattr(BaseModel, \"__str__\"))\n self.assertTrue(hasattr(BaseModel, \"save\"))\n self.assertTrue(hasattr(BaseModel, \"to_dict\"))\n self.assertTrue(\"updated_at\" in self.my_model1.__dict__)\n self.assertTrue(\"created_at\" in self.my_model1.__dict__)\n self.assertTrue(\"id\" in self.my_model1.__dict__)",
"def HasPerInstancePropertyProviders(self) -> bool:",
"def check_persist_model():\n return env_helper.has_env(_store_environment_variable)",
"def test_add_device(self):\n\n pass",
"def test_back_compat_attributes(self):\n cap = DeviceCapabilities.create(True)\n self.assertTrue(cap.iot_edge)",
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the simdd json file\n device_attributes = set(self.sim_device.get_attribute_list())\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n remaining_device_attrs = device_attributes - default_attributes\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in itervalues(self.sim_file_parser._device_attributes):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False",
"def checkModel(self, model):\n # TODO",
"def test_verify_state_of_a_device():",
"def _count_device_properties(self):\n db_info = self.db_instance.get_info()\n db_info_list = db_info.split(\"\\n\")\n num_properties = 0\n for line in db_info_list:\n if \"Device properties defined\" in line:\n num_properties = line.split(\"=\")[-1]\n return int(num_properties)",
"def is_savable(self, do_raise: bool = True) -> Tuple[bool, List[str]]:\n errors = []\n for k, v in self.properties.items():\n if v is None and self.sample_type.field_type(k).required:\n errors.append(\"FieldValue '{}' is required.\".format(k))\n if do_raise and errors:\n raise AquariumModelError(\n \"Cannot update/save due to the following:\\n\"\n \"Sample: id={} name={} ({})\\n\\t{}\".format(\n self.id, self.name, self.sample_type.name, \"\\n\\t\".join(errors)\n )\n )\n return len(errors) == 0, errors",
"def the_user_should_be_able_to_see_the_changes_in_the_connected_device():\n assert web_app.check_value_after_reconnect()",
"def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()",
"def test_update_device(self):\n pass",
"def test_update_device(self):\n pass",
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the fandango generated file\n device_attributes = set(self.sim_device.get_attribute_list())\n extra_attr_from_device = set([\"NumAttributesNotAdded\", \"AttributesNotAdded\"])\n remaining_device_attrs = device_attributes - extra_attr_from_device\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in list(self.sim_file_parser._device_attributes.values()):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list {} differs from expected list {}!\".format(\n remaining_device_attrs, expected_attributes\n ),\n )",
"def is_good_for_setup(self):\n attributes = [\n \"rotatePivot\",\n \"scalePivot\",\n \"rotatePivotTranslate\",\n \"scalePivotTranslate\",\n ]\n\n for attrStr in attributes:\n connections = self._object.attr(attrStr).connections()\n if len(connections) > 0:\n return False\n\n return True",
"def test_attributes_DBStorage(self):\n self.assertTrue(hasattr(DBStorage, '_DBStorage__engine'))\n self.assertTrue(hasattr(DBStorage, '_DBStorage__session'))\n self.assertTrue(hasattr(DBStorage, 'new'))\n self.assertTrue(hasattr(DBStorage, 'save'))\n self.assertTrue(hasattr(DBStorage, 'all'))\n self.assertTrue(hasattr(DBStorage, 'delete'))\n self.assertTrue(hasattr(DBStorage, 'reload'))",
"def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0",
"def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True",
"def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)",
"def test_create(self):\n self.assertTrue(WayPoint.objects.exists())",
"def test_create_device_data(self):\n pass",
"def test_device_failed_properties(self):\n dev = mock.Mock()\n dev.GetId = mock.Mock(return_value=\"id\")\n dev.GetState = mock.Mock(return_value=AudioDeviceState.Active)\n store = mock.Mock()\n store.GetCount = mock.Mock(return_value=1)\n store.GetAt = mock.Mock(return_value=\"pk\")\n store.GetValue = mock.Mock(side_effect=_ctypes.COMError(None, None, None))\n dev.OpenPropertyStore = mock.Mock(return_value=store)\n with warnings.catch_warnings(record=True) as w:\n AudioUtilities.CreateDevice(dev)\n assert len(w) == 1\n assert \"COMError attempting to get property 0 from device\" in str(w[0].message)",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def test_hasattribute(self):\n b1 = BaseModel()\n self.assertTrue(hasattr(b1, \"__init__\"))\n self.assertTrue(hasattr(b1, \"created_at\"))\n self.assertTrue(hasattr(b1, \"updated_at\"))\n self.assertTrue(hasattr(b1, \"id\"))",
"def test_create_device(self):\n pass"
]
| [
"0.73286724",
"0.62140524",
"0.6161287",
"0.5922223",
"0.58981586",
"0.5896649",
"0.58640945",
"0.5854194",
"0.58400106",
"0.5782783",
"0.5782267",
"0.5770153",
"0.5759309",
"0.5759194",
"0.57582355",
"0.5751846",
"0.5724441",
"0.5724441",
"0.5711718",
"0.5711576",
"0.5688705",
"0.5678365",
"0.5676981",
"0.56694645",
"0.5663565",
"0.56402105",
"0.5636888",
"0.5634245",
"0.55849403",
"0.55593413"
]
| 0.714154 | 1 |
Testing whether the attributes quantities in the model are added to the TANGO sim device controller | def test_sim_control_attribute_list(self):
implemented_attr = helper_module.SIM_CONTROL_ADDITIONAL_IMPLEMENTED_ATTR
control_attributes = test_sim_test_interface.control_attributes(
self.expected_model
)
attributes = set(self.sim_control_device.get_attribute_list())
self.assertEqual(attributes - implemented_attr, set(control_attributes)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the simdd json file\n device_attributes = set(self.sim_device.get_attribute_list())\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n remaining_device_attrs = device_attributes - default_attributes\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in itervalues(self.sim_file_parser._device_attributes):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the fandango generated file\n device_attributes = set(self.sim_device.get_attribute_list())\n extra_attr_from_device = set([\"NumAttributesNotAdded\", \"AttributesNotAdded\"])\n remaining_device_attrs = device_attributes - extra_attr_from_device\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in list(self.sim_file_parser._device_attributes.values()):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list {} differs from expected list {}!\".format(\n remaining_device_attrs, expected_attributes\n ),\n )",
"def test_add_software_system(attributes: dict, model: Model):\n software_system = SoftwareSystem(**attributes)\n model += software_system\n assert software_system.id == \"1\"\n assert len(model.software_systems) == 1\n for attr, expected in attributes.items():\n assert getattr(software_system, attr) == expected",
"def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)",
"def test_device_attribute_list(self):\n # First testing that the attribute with data format \"IMAGE\" is in the device.\n attribute_name = \"image1\"\n device_attributes = set(self.sim_device.get_attribute_list())\n self.assertIn(\n attribute_name,\n device_attributes,\n \"The attribute {} has been added to the device.\".format(attribute_name),\n )\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value if not_added_attr.value else []\n self.assertNotIn(\n attribute_name,\n not_added_attr_names,\n \"The attribute {} was not added to the list of attributes that\"\n \" could not be added to the device.\".format(attribute_name),\n )\n\n expected_attributes = []\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n\n for attribute_data in self.sim_file_parser._device_attributes:\n expected_attributes.append(attribute_data[\"dynamicAttributes\"][\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n expected_attributes,\n device_attributes - default_attributes,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def test_model_id_attr(self):\n for cube in [self.temperature, self.relative_humidity, self.pressure]:\n cube.attributes[\"mosg__model_configuration\"] = \"uk_ens\"\n\n result = WetBulbTemperature(model_id_attr=\"mosg__model_configuration\").process(\n CubeList([self.temperature, self.relative_humidity, self.pressure])\n )\n self.assertArrayAlmostEqual(result.data, self.expected_wbt_data, decimal=3)\n self.assertEqual(result.units, Unit(\"K\"))\n self.assertEqual(result.attributes[\"mosg__model_configuration\"], \"uk_ens\")",
"def test_trainable_property(self):\n scalar_weighted_addition_model = ScalarWeightedAddition(10)\n np.testing.assert_equal(scalar_weighted_addition_model.is_trainable, True)",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def hasRequiredAttributes(self):\n return _libsbml.Unit_hasRequiredAttributes(self)",
"def test_initial_attribute_values(self, create_controller: Controller) -> None:\n assert create_controller.coffee_machine.is_on\n assert create_controller.coffee_machine.water_level > 0\n assert create_controller.coffee_machine.milk_level > 0\n assert create_controller.coffee_machine.coffee_beans_level > 0\n assert create_controller.view\n assert not create_controller.play",
"def check_if_quantized(model: Any) -> bool:\n nodes = model.get_ops()\n for op in nodes:\n if \"FakeQuantize\" == op.get_type_name():\n return True\n return False",
"def hasRequiredAttributes(self):\n return _libsbml.UnitDefinition_hasRequiredAttributes(self)",
"def testMA(self):\n for size in range(5):\n a = AttributeAbility(['MA',], size + 1)\n self.assert_(str((size + 1) * 2) in str(a))\n self.assert_('MA' in str(a))",
"def test_initial_device_properties(self):\n expected_count = 1 # model_key property already present in db\n self.assertEquals(expected_count, self._count_device_properties())",
"def test_write_device_properties_to_db(self):\n initial_count = self._count_device_properties()\n tango_sim_generator.write_device_properties_to_db(\n self.sim_device.name(), self.expected_model, self.db_instance\n )\n num_expected_properties = len(self.expected_model.sim_properties.keys())\n final_count = self._count_device_properties()\n num_added_properties = final_count - initial_count\n self.assertEquals(num_expected_properties, num_added_properties)",
"def test_back_compat_attributes(self):\n cap = DeviceCapabilities.create(True)\n self.assertTrue(cap.iot_edge)",
"def test_attractor_list(self):\n assert len(get_attractor_list()) > 130",
"def has_details(self):\n\n return len(self.attributes) > 1",
"def has_attributes(self):\n\n pass",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeComponentMapInProduct_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeComponentIndex_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.Species_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesType_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeInstance_hasRequiredAttributes(self)",
"def test_train_model_attributes_good():\n\tdf = pd.read_csv(\"test/sample_features.csv\")\n\n\ty_train = df['price']\n\tX_train = df.loc[:, df.columns != 'price']\n\n\tparams = {'n_estimators': 5, 'random_state': 2}\n\trf_test = train(X_train, y_train, params)\n\n\t# test model attributes\n\tassert rf_test.get_params()['n_estimators'] == 5\n\tassert rf_test.get_params()['random_state'] == 2",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def hasRequiredAttributes(self):\n return _libsbml.ModelCreator_hasRequiredAttributes(self)",
"def hasRequiredAttributes(self):\n return _libsbml.FluxObjective_hasRequiredAttributes(self)",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def test_sim_control_device_attribute_change(self):\n desired_attribute_name = \"temperature\"\n input_value = 100.0\n self.sim_control_device.attribute_name = self.attr_name_enum_labels.index(\n desired_attribute_name\n )\n self.sim_control_device.pause_active = True\n setattr(self.sim_control_device, \"last_val\", input_value)\n self.assertEqual(self.sim_device.temperature, input_value)"
]
| [
"0.6414206",
"0.6254846",
"0.62536204",
"0.62461764",
"0.623432",
"0.6112338",
"0.6103137",
"0.60764176",
"0.5972721",
"0.5910903",
"0.58160746",
"0.5808809",
"0.58",
"0.5781312",
"0.57746536",
"0.57669103",
"0.57643425",
"0.5759311",
"0.57515216",
"0.5723734",
"0.5718741",
"0.5679258",
"0.56550705",
"0.5646503",
"0.56435376",
"0.56284225",
"0.5628223",
"0.56242067",
"0.5620638",
"0.56173795"
]
| 0.6465823 | 0 |
Setting the desired attribute value for the device's attribute from the simulator controller device | def test_sim_control_device_attribute_change(self):
desired_attribute_name = "temperature"
input_value = 100.0
self.sim_control_device.attribute_name = self.attr_name_enum_labels.index(
desired_attribute_name
)
self.sim_control_device.pause_active = True
setattr(self.sim_control_device, "last_val", input_value)
self.assertEqual(self.sim_device.temperature, input_value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def script_set_device(self,udid=None):\n self.desired_caps['udid'] = udid;",
"def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))",
"def set_attribute(self, name, value):\n\n pass",
"def _update_device_attr(\n self, device_id: str, attr_name: str, value: Union[int, str], value_unit: str\n ) -> None:\n _LOGGER.debug(\n \"Updating %s of %s to %s (%s)\", attr_name, device_id, value, value_unit\n )\n try:\n dev = self._devices[device_id]\n except KeyError:\n _LOGGER.warning(\"Tried to update unknown device %s\", device_id)\n return\n\n try:\n dev.update_attr(attr_name, value, value_unit)\n except KeyError:\n _LOGGER.warning(\"Tried to update unknown attribute %s\", attr_name)",
"def __setattr__(self, key, val):\n if key == \"dev\":\n self.__dict__[key] = val\n else:\n self.__dict__[key] = int(val)\n return val",
"def set_device(self, device):\n self.device = device",
"def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)",
"def test_change_name_of_the_devicetrue():",
"def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)",
"def test_change_brightness_of_the_devicetrue():",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()",
"def __setattr__ (self, attr, value):\n self.set_value (attr, value)",
"def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)",
"def set_attr(self, aid, value, custom=False):\n if aid not in self.attributes and not custom:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, value)\n self.remember_custom_attribute(self.name, aid, value)\n self.attributes[aid] = {}\n else:\n # TODO: validate data_type\n pass\n self.attributes[aid]['nv'] = value\n # self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)",
"def test_device_init_command(self):\n default_val = 0\n self.assertEqual(self.sim_device.integer1, default_val)\n # Write to the attribute integer1\n self.sim_device.integer1 = 45\n self.assertEqual(self.sim_device.integer1, 45)\n # Reset the values of the device attributes to default.\n self.sim_device.Init()\n # Check that the desiredPointing attribute is reset.\n self.assertEqual(self.sim_device.integer1, default_val)",
"def __setattr__(self, attr, value):\n super().__setattr__(attr, value)",
"def __setattr__(self, attr, value):\n self[attr] = value",
"def _async_update_attrs(self) -> None:\n self._attr_is_on = self._device.light_on\n if self._device.light_brightness is not None:\n self._attr_brightness = int(min(255, self._device.light_brightness * 16))",
"def _platformix_set(self, context, fake_reply, prop, value):\r\n if hasattr(self.host, prop):\r\n if not callable(getattr(self.host, prop)):\r\n try:\r\n setattr(self.host, prop, value)\r\n except Exception as e:\r\n eprint(\"Platformix protocol: failed to set attribute {} of {} to value {} \"\r\n \"due to exception {}\".format(prop, self.host.name, value, e))\r\n exprint()\r\n self._reply(context, proto_failure(\r\n \"Failed to set attribute {} of {} to value {} \"\r\n \"due to exception {}\".format(prop, self.host.name, value, e)), fake_reply)\r\n return\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Attribute {} of {} is a method\".format(\r\n prop, self.host.name)), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)",
"def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)",
"def set_attribute(self, attribute, value) -> None:\n logging.info(f\"setting element attribute. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.setAttribute(\"{attribute}\", \"{value}\");\n \"\"\"\n self._execute_javascript(js)",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def test_change_name_of_the_devicefalse():",
"def setatt(self, value):\n if (value*4) % 1 :\n print ('RDCAT : WARNING {0} is not a multiple of 0.25 dB'.format(value))\n r=requests.get(self.url+'SETATT={0}\\n'.format(value))\n if r.text!='1':\n raise Exception('RDCAT : Error while setting attenuation.')",
"def set_value(attr_name, value, gpu_id):\n place = fluid.CPUPlace() if gpu_id < 0 \\\n else fluid.CUDAPlace(gpu_id)\n var = _fetch_var(attr_name, return_numpy=False)\n var.set(value, place)",
"def setItunesAttribute(self,key,value):\n self.itunesAttributes[key] = value",
"def set_attr(self, name, value):\n setattr(self, name, value)",
"def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value"
]
| [
"0.65906084",
"0.6350218",
"0.61830956",
"0.61666596",
"0.60464954",
"0.60099",
"0.5992313",
"0.59657156",
"0.5952257",
"0.59496844",
"0.5879743",
"0.5847188",
"0.5838993",
"0.583859",
"0.58225757",
"0.5808653",
"0.57960904",
"0.57923305",
"0.5787347",
"0.5755368",
"0.5724444",
"0.5719437",
"0.5719418",
"0.57155013",
"0.56898266",
"0.5689427",
"0.5688607",
"0.5680582",
"0.5675017",
"0.56700045"
]
| 0.6926932 | 0 |
Testing whether the attributes specified in the POGO generated XMI file are added to the TANGO device. | def test_device_attribute_list(self):
# First testing that the attribute with data format "IMAGE" is in the device.
attribute_name = "image1"
device_attributes = set(self.sim_device.get_attribute_list())
self.assertIn(
attribute_name,
device_attributes,
"The attribute {} has been added to the device.".format(attribute_name),
)
not_added_attr = self.sim_device.read_attribute("AttributesNotAdded")
not_added_attr_names = not_added_attr.value if not_added_attr.value else []
self.assertNotIn(
attribute_name,
not_added_attr_names,
"The attribute {} was not added to the list of attributes that"
" could not be added to the device.".format(attribute_name),
)
expected_attributes = []
default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES
for attribute_data in self.sim_file_parser._device_attributes:
expected_attributes.append(attribute_data["dynamicAttributes"]["name"])
expected_attributes = set(expected_attributes)
# checking to see if there were any attributes not added
if not_added_attr_names is not None:
expected_attributes = expected_attributes - set(not_added_attr_names)
self.assertEqual(
expected_attributes,
device_attributes - default_attributes,
"Actual tango device attribute list differs from expected " "list!",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the simdd json file\n device_attributes = set(self.sim_device.get_attribute_list())\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n remaining_device_attrs = device_attributes - default_attributes\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in itervalues(self.sim_file_parser._device_attributes):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the fandango generated file\n device_attributes = set(self.sim_device.get_attribute_list())\n extra_attr_from_device = set([\"NumAttributesNotAdded\", \"AttributesNotAdded\"])\n remaining_device_attrs = device_attributes - extra_attr_from_device\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in list(self.sim_file_parser._device_attributes.values()):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list {} differs from expected list {}!\".format(\n remaining_device_attrs, expected_attributes\n ),\n )",
"def test_IMAT_pass(self):\n for O in self.mod.objts.itervalues():\n self.assertTrue(O.imat.isset)",
"def exists_attrs(proj):\n if not os.path.exists(proj.ballot_attributesfile):\n return False\n ballot_attributesfile = pickle.load(open(proj.ballot_attributesfile, 'rb'))\n if not ballot_attributesfile:\n return False\n else:\n return True",
"def test_sim_control_attribute_list(self):\n implemented_attr = helper_module.SIM_CONTROL_ADDITIONAL_IMPLEMENTED_ATTR\n control_attributes = test_sim_test_interface.control_attributes(\n self.expected_model\n )\n attributes = set(self.sim_control_device.get_attribute_list())\n self.assertEqual(attributes - implemented_attr, set(control_attributes))",
"def check_attributes(self, attributes):\n self.log('StorageConfiguration.check_attributes started')\n attributes_ok = True\n\n if not self.enabled:\n self.log('Not enabled, returning True')\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok\n\n # warn if locations don't exist\n app_dir = self.options['app_dir'].value\n if not self._check_app_dir(app_dir):\n self.log(\"app_dir is used for $OSG_APP and $OSG_APP/etc on worker nodes, where they should exist and\"\n \" have permissions of 1777 or 777.\",\n level=logging.WARNING)\n\n # WN_TMP may be blank if the job manager dynamically generates it but\n # warni just in case\n if utilities.blank(self.options['worker_node_temp'].value):\n self.log(\"worker_node_temp is blank, this is okay if you've set your \" +\n \"job manager to set this dynamically, otherwise jobs may \" +\n \"fail to run\",\n section=self.config_section,\n option='worker_node_temp',\n level=logging.WARNING)\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok",
"def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)",
"def is_attribute(tag, kmip_version=None):\n kmip_1_0_attribute_tags = [\n Tags.UNIQUE_IDENTIFIER,\n Tags.NAME,\n Tags.OBJECT_TYPE,\n Tags.CRYPTOGRAPHIC_ALGORITHM,\n Tags.CRYPTOGRAPHIC_LENGTH,\n Tags.CRYPTOGRAPHIC_PARAMETERS,\n Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS,\n Tags.CERTIFICATE_TYPE,\n Tags.CERTIFICATE_IDENTIFIER,\n Tags.CERTIFICATE_SUBJECT,\n Tags.CERTIFICATE_ISSUER,\n Tags.DIGEST,\n Tags.OPERATION_POLICY_NAME,\n Tags.CRYPTOGRAPHIC_USAGE_MASK,\n Tags.LEASE_TIME,\n Tags.USAGE_LIMITS,\n Tags.STATE,\n Tags.INITIAL_DATE,\n Tags.ACTIVATION_DATE,\n Tags.PROCESS_START_DATE,\n Tags.PROTECT_STOP_DATE,\n Tags.DEACTIVATION_DATE,\n Tags.DESTROY_DATE,\n Tags.COMPROMISE_OCCURRENCE_DATE,\n Tags.COMPROMISE_DATE,\n Tags.REVOCATION_REASON,\n Tags.ARCHIVE_DATE,\n Tags.OBJECT_GROUP,\n Tags.LINK,\n Tags.APPLICATION_SPECIFIC_INFORMATION,\n Tags.CONTACT_INFORMATION,\n Tags.LAST_CHANGE_DATE,\n Tags.CUSTOM_ATTRIBUTE\n ]\n kmip_1_1_attribute_tags = copy.deepcopy(kmip_1_0_attribute_tags) + [\n Tags.CERTIFICATE_LENGTH,\n Tags.X_509_CERTIFICATE_IDENTIFIER,\n Tags.X_509_CERTIFICATE_SUBJECT,\n Tags.X_509_CERTIFICATE_ISSUER,\n Tags.DIGITAL_SIGNATURE_ALGORITHM,\n Tags.FRESH\n ]\n kmip_1_2_attribute_tags = copy.deepcopy(kmip_1_1_attribute_tags) + [\n Tags.ALTERNATIVE_NAME,\n Tags.KEY_VALUE_PRESENT,\n Tags.KEY_VALUE_LOCATION,\n Tags.ORIGINAL_CREATION_DATE\n ]\n kmip_1_3_attribute_tags = copy.deepcopy(kmip_1_2_attribute_tags) + [\n Tags.RANDOM_NUMBER_GENERATOR\n ]\n kmip_1_4_attribute_tags = copy.deepcopy(kmip_1_3_attribute_tags) + [\n Tags.PKCS12_FRIENDLY_NAME,\n Tags.DESCRIPTION,\n Tags.COMMENT,\n Tags.SENSITIVE,\n Tags.ALWAYS_SENSITIVE,\n Tags.EXTRACTABLE,\n Tags.NEVER_EXTRACTABLE\n ]\n kmip_2_0_attribute_tags = copy.deepcopy(kmip_1_4_attribute_tags) + [\n Tags.CERTIFICATE_SUBJECT_CN,\n Tags.CERTIFICATE_SUBJECT_O,\n Tags.CERTIFICATE_SUBJECT_OU,\n Tags.CERTIFICATE_SUBJECT_EMAIL,\n Tags.CERTIFICATE_SUBJECT_C,\n Tags.CERTIFICATE_SUBJECT_ST,\n Tags.CERTIFICATE_SUBJECT_L,\n Tags.CERTIFICATE_SUBJECT_UID,\n Tags.CERTIFICATE_SUBJECT_SERIAL_NUMBER,\n Tags.CERTIFICATE_SUBJECT_TITLE,\n Tags.CERTIFICATE_SUBJECT_DC,\n Tags.CERTIFICATE_SUBJECT_DN_QUALIFIER,\n Tags.CERTIFICATE_ISSUER_CN,\n Tags.CERTIFICATE_ISSUER_O,\n Tags.CERTIFICATE_ISSUER_OU,\n Tags.CERTIFICATE_ISSUER_EMAIL,\n Tags.CERTIFICATE_ISSUER_C,\n Tags.CERTIFICATE_ISSUER_ST,\n Tags.CERTIFICATE_ISSUER_L,\n Tags.CERTIFICATE_ISSUER_UID,\n Tags.CERTIFICATE_ISSUER_SERIAL_NUMBER,\n Tags.CERTIFICATE_ISSUER_TITLE,\n Tags.CERTIFICATE_ISSUER_DC,\n Tags.CERTIFICATE_ISSUER_DN_QUALIFIER,\n Tags.KEY_FORMAT_TYPE,\n Tags.NIST_KEY_TYPE,\n Tags.OPAQUE_DATA_TYPE,\n Tags.PROTECTION_LEVEL,\n Tags.PROTECTION_PERIOD,\n Tags.PROTECTION_STORAGE_MASK,\n Tags.QUANTUM_SAFE,\n Tags.SHORT_UNIQUE_IDENTIFIER,\n Tags.ATTRIBUTE\n ]\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_IDENTIFIER)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_SUBJECT)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_ISSUER)\n kmip_2_0_attribute_tags.remove(Tags.OPERATION_POLICY_NAME)\n kmip_2_0_attribute_tags.remove(Tags.CUSTOM_ATTRIBUTE)\n\n if kmip_version == KMIPVersion.KMIP_1_0:\n return tag in kmip_1_0_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_1:\n return tag in kmip_1_1_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_2:\n return tag in kmip_1_2_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_3:\n return tag in kmip_1_3_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_4:\n return tag in kmip_1_4_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_2_0:\n return tag in kmip_2_0_attribute_tags\n else:\n all_attribute_tags = set(\n kmip_1_0_attribute_tags +\n kmip_1_1_attribute_tags +\n kmip_1_2_attribute_tags +\n kmip_1_3_attribute_tags +\n kmip_1_4_attribute_tags +\n kmip_2_0_attribute_tags\n )\n return tag in all_attribute_tags",
"def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))",
"def assert_hasattributes(obj, attributes):\n \n for attr in attributes:\n # print(attr)\n assert(hasattr(obj, attr))",
"def test_meta_uid_good(self):\n handler_data = []\n\n def handle(event):\n handler_data.append(event)\n return 0x0000, event.attribute_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicGrayscalePrintManagementMeta)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_CREATE, handle)]\n )\n\n ae.add_requested_context(BasicGrayscalePrintManagementMeta)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_create(\n ds,\n ModalityPerformedProcedureStep,\n \"1.2.840.10008.5.1.1.40.1\",\n meta_uid=BasicGrayscalePrintManagementMeta,\n )\n assert status.Status == 0x0000\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()\n\n req = handler_data[0].request\n cx = handler_data[0].context\n\n assert req.AffectedSOPClassUID == ModalityPerformedProcedureStep\n assert cx.abstract_syntax == BasicGrayscalePrintManagementMeta",
"def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeComponentMapInProduct_hasRequiredAttributes(self)",
"def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)",
"def is_good_for_setup(self):\n attributes = [\n \"rotatePivot\",\n \"scalePivot\",\n \"rotatePivotTranslate\",\n \"scalePivotTranslate\",\n ]\n\n for attrStr in attributes:\n connections = self._object.attr(attrStr).connections()\n if len(connections) > 0:\n return False\n\n return True",
"def has_attributes(self):\n\n pass",
"def hasRequiredAttributes(self):\n return _libsbml.Port_hasRequiredAttributes(self)",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def test_lsusb_test_attributes_generic(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_test_attributes, quiet=True), self.generic_lsusb_test_attributes_json)",
"def test_info(get_touchmat):\n touchmat = get_touchmat\n\n info = touchmat.info()\n check_device_types.check_DeviceInfo(info)\n\n vid_pid = (info['vendor_id'], info['product_id'])\n assert vid_pid in (Devices.touchmat_g1.value,\n Devices.touchmat_g2.value)\n\n serial = info['serial']\n if Devices(vid_pid) == Devices.touchmat_g2:\n assert serial == \"Not Available\"\n else:\n assert len(serial) == 24",
"def hasRequiredAttributes(self):\n return _libsbml.Unit_hasRequiredAttributes(self)",
"def test_back_compat_attributes(self):\n cap = DeviceCapabilities.create(True)\n self.assertTrue(cap.iot_edge)",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_meta_uid_good(self):\n handler_data = []\n\n def handle(event):\n handler_data.append(event)\n return 0x0000, event.modification_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicGrayscalePrintManagementMeta)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(BasicGrayscalePrintManagementMeta)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds,\n ModalityPerformedProcedureStep,\n \"1.2.840.10008.5.1.1.40.1\",\n meta_uid=BasicGrayscalePrintManagementMeta,\n )\n\n assert status.Status == 0x0000\n assert ds is not None\n assert isinstance(ds, Dataset)\n assert status.Status == 0x0000\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()\n\n req = handler_data[0].request\n cx = handler_data[0].context\n\n assert req.RequestedSOPClassUID == ModalityPerformedProcedureStep\n assert cx.abstract_syntax == BasicGrayscalePrintManagementMeta",
"def base_data_check_shot(self):\n\n #alembic_dir\n alembic_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_dir')\n \n #is False\n if not (alembic_dir):\n #log\n self.logger.debug('Parameter alembic dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_dir)):\n #log\n self.logger.debug('Alembic dir {0} does not exist.'.format(alembic_dir))\n return False\n\n\n #alembic_path_list\n alembic_path_list = [os.path.join(alembic_dir, file).replace('\\\\', '/') for \n file in \n os.listdir(alembic_dir) if \n (os.path.isfile(os.path.join(alembic_dir, file)) and file.split('.')[-1] == 'abc')]\n #alembic_path_list empty\n if not (alembic_path_list):\n #log\n self.logger.debug('alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files.'.format(alembic_dir))\n return False\n\n\n #checked_alembic_path_list\n checked_alembic_path_list = []\n\n #iterate\n for alembic_path in alembic_path_list:\n\n #object_path_list\n object_path_list = self.alembic_functionality.get_alembic_object_path_list(alembic_path)\n #object_path_list empty\n if not (object_path_list):\n #log\n self.logger.debug('Object path list for alembic {0} empty. Continuing'.format(alembic_path))\n continue\n\n #iterate, check and create\n for object_path in object_path_list:\n\n #helga_locator_attr_exists\n helga_locator_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_locator')\n\n #helga_highpoly_rendergeo_attr_exists\n helga_highpoly_rendergeo_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_highpoly_rendergeo')\n\n #if attr exists append and break\n if (helga_locator_attr_exists and helga_highpoly_rendergeo_attr_exists):\n\n #append\n checked_alembic_path_list.append(alembic_path)\n break\n\n #checked_alembic_path_list empty\n if not (checked_alembic_path_list):\n #log\n self.logger.debug('checked_alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files with helga_highpoly_rendergeo attribute.'.format(alembic_dir))\n return False\n\n\n #alembic_highpoly_rendergeo_dir\n alembic_highpoly_rendergeo_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_highpoly_rendergeo_dir')\n \n #is False\n if not (alembic_highpoly_rendergeo_dir):\n #log\n self.logger.debug('Parameter alembic highpoly rendergeo dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_highpoly_rendergeo_dir)):\n #log\n self.logger.debug('Alembic highpoly rendergeo dir {0} does not exist.'.format(alembic_highpoly_rendergeo_dir))\n return False\n\n\n #return\n return [checked_alembic_path_list, alembic_highpoly_rendergeo_dir]",
"def test_hasattrs(self):\n self.assertTrue(hasattr(self.obj, \"id\"), \"created obj doesn't \" +\n \"have the attribute id.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__width\"), \"created \" +\n \"obj doesn't have the attribute width.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__height\"), \"created \" +\n \"obj have the attribute height.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__x\"), \"created obj \" +\n \"doesn't have the attribute x.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__y\"), \"created \" +\n \"obj doesn't have the attribute y.\")",
"def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()",
"def hasRequiredAttributes(self):\n return _libsbml.Output_hasRequiredAttributes(self)",
"def test_attributes(self):\n composer = self.store.findUnique(Composer)\n self.assertTrue(isinstance(composer.privateApplication, PrivateApplication))\n self.assertTrue(isinstance(composer.mda, MailDeliveryAgent))\n self.assertTrue(isinstance(composer.deliveryAgent, DeliveryAgent))\n self.assertTrue(isinstance(composer.prefs, ComposePreferenceCollection))",
"def test_meta_uid(self):\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicGrayscalePrintManagementMeta)\n ae.add_supported_context(Printer)\n scp = ae.start_server((\"localhost\", 11112), block=False)\n\n ae.add_requested_context(BasicGrayscalePrintManagementMeta)\n ae.add_requested_context(Printer)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n assoc.dimse = DummyDIMSE()\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n # Receives None, None from DummyDIMSE, aborts\n status, ds = assoc.send_n_create(\n ds,\n Printer,\n \"1.2.840.10008.5.1.1.40.1\",\n meta_uid=BasicGrayscalePrintManagementMeta,\n )\n assert assoc.is_aborted\n\n scp.shutdown()\n\n assert assoc.dimse.req.AffectedSOPClassUID == Printer\n assert assoc.dimse.context_id == 1\n assert (\n assoc._accepted_cx[1].abstract_syntax == BasicGrayscalePrintManagementMeta\n )"
]
| [
"0.63320553",
"0.63164514",
"0.61544424",
"0.60058045",
"0.59952205",
"0.59781814",
"0.5861376",
"0.57632935",
"0.5697044",
"0.5669564",
"0.5661544",
"0.5622584",
"0.55868214",
"0.5585633",
"0.5573848",
"0.55706286",
"0.5506343",
"0.54805017",
"0.545378",
"0.5436845",
"0.5430913",
"0.54117304",
"0.5408578",
"0.53976953",
"0.5395103",
"0.53949195",
"0.53883886",
"0.53528446",
"0.5340012",
"0.53298444"
]
| 0.6337125 | 0 |
Testing whether the attributes specified in the fandango generated fgo file are added to the TANGO device. | def test_device_attribute_list(self):
# test that the attributes from the running simulated device match the attributes
# from in the fandango generated file
device_attributes = set(self.sim_device.get_attribute_list())
extra_attr_from_device = set(["NumAttributesNotAdded", "AttributesNotAdded"])
remaining_device_attrs = device_attributes - extra_attr_from_device
not_added_attr = self.sim_device.read_attribute("AttributesNotAdded")
not_added_attr_names = not_added_attr.value
expected_attributes = []
for attr_prop in list(self.sim_file_parser._device_attributes.values()):
expected_attributes.append(attr_prop["name"])
expected_attributes = set(expected_attributes)
# checking to see if there were any attributes not added
if not_added_attr_names is not None:
expected_attributes = expected_attributes - set(not_added_attr_names)
self.assertEqual(
set(expected_attributes),
remaining_device_attrs,
"Actual tango device attribute list {} differs from expected list {}!".format(
remaining_device_attrs, expected_attributes
),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the simdd json file\n device_attributes = set(self.sim_device.get_attribute_list())\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n remaining_device_attrs = device_attributes - default_attributes\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in itervalues(self.sim_file_parser._device_attributes):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def test_device_add_from_file(self, gateway_with_devs):\n assert 'daq' in gateway_with_devs._devs\n assert 'pel' in gateway_with_devs._devs\n assert 'sg' in gateway_with_devs._devs\n assert 'not_a_driver' not in gateway_with_devs._devs",
"def test_file_builders_fandango():\n\n fgo_args = Namespace(\n fandango_file=Namespace(name=str(Path.joinpath(CONF_FILE_PATH, \"database2.fgo\")))\n )\n fgo_yaml = _build_yaml(fgo_args)\n parsed_yaml = yaml.load(fgo_yaml, Loader=yaml.FullLoader)\n validate_basic_structure(parsed_yaml)\n\n assert parsed_yaml[0][\"class\"] == \"DataBase\"\n\n attrs = [i[\"name\"] for i in parsed_yaml[0][\"meta\"][\"attributes\"]]\n assert len(attrs) == 9\n assert \"Timing_info\" in attrs\n assert \"Timing_minimum\" in attrs\n for attr in parsed_yaml[0][\"meta\"][\"attributes\"]:\n\n if attr[\"name\"] == \"Timing_info\":\n assert attr == {\n \"min_alarm\": \"Not specified\",\n \"name\": \"Timing_info\",\n \"data_type\": \"DevString\",\n \"max_alarm\": \"Not specified\",\n \"min_value\": \"Not specified\",\n \"data_format\": \"SPECTRUM\",\n \"display_unit\": \"No display unit\",\n \"writable\": \"READ\",\n \"max_dim_x\": 64,\n \"standard_unit\": \"No standard unit\",\n \"max_value\": \"Not specified\",\n \"period\": \"0\",\n \"label\": \"Timing_info\",\n \"delta_t\": \"Not specified\",\n \"delta_val\": \"Not specified\",\n \"min_warning\": \"Not specified\",\n \"max_warning\": \"Not specified\",\n }, \"Attribute config mismatch. attr: {}\".format(attr)\n if attr[\"name\"] == \"Timing_minimum\":\n assert attr == {\n \"min_alarm\": \"Not specified\",\n \"name\": \"Timing_minimum\",\n \"data_type\": \"DevDouble\",\n \"max_alarm\": \"Not specified\",\n \"min_value\": \"Not specified\",\n \"data_format\": \"SPECTRUM\",\n \"display_unit\": \"No display unit\",\n \"writable\": \"READ\",\n \"max_dim_x\": 64,\n \"standard_unit\": \"No standard unit\",\n \"period\": \"0\",\n \"max_value\": \"Not specified\",\n \"label\": \"Timing_minimum\",\n \"delta_t\": \"Not specified\",\n \"delta_val\": \"Not specified\",\n \"min_warning\": \"Not specified\",\n \"max_warning\": \"Not specified\",\n }, \"Attribute config mismatch. attr: {}\".format(attr)\n\n comms = [i[\"name\"] for i in parsed_yaml[0][\"meta\"][\"commands\"]]\n assert len(comms) == 100\n assert \"DbGetDataForServerCache\" in comms\n assert \"DbGetDeviceAttributeList\" in comms\n for comm in parsed_yaml[0][\"meta\"][\"commands\"]:\n if comm[\"name\"] == \"DbGetDataForServerCache\":\n assert comm == {\n \"doc_in\": \"Elt[0] = DS name (exec_name/inst_name), Elt[1] = Host name\",\n \"doc_out\": (\n \"All the data needed by the device server during its startup\"\n \" sequence. Precise list depend on the device server\"\n ),\n \"dtype_in\": \"DevVarStringArray\",\n \"dtype_out\": \"DevVarStringArray\",\n \"name\": \"DbGetDataForServerCache\",\n }, \"Command config mismatch. comm: {}\".format(comm)\n if comm[\"name\"] == \"DbGetDeviceAttributeList\":\n assert comm == {\n \"doc_in\": \"Str[0] = Device name\\nStr[1] = Wildcard\",\n \"doc_out\": \"attribute name list\",\n \"dtype_in\": \"DevVarStringArray\",\n \"dtype_out\": \"DevVarStringArray\",\n \"name\": \"DbGetDeviceAttributeList\",\n }, \"Command config mismatch. comm: {}\".format(comm)",
"def test_device_attribute_list(self):\n # First testing that the attribute with data format \"IMAGE\" is in the device.\n attribute_name = \"image1\"\n device_attributes = set(self.sim_device.get_attribute_list())\n self.assertIn(\n attribute_name,\n device_attributes,\n \"The attribute {} has been added to the device.\".format(attribute_name),\n )\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value if not_added_attr.value else []\n self.assertNotIn(\n attribute_name,\n not_added_attr_names,\n \"The attribute {} was not added to the list of attributes that\"\n \" could not be added to the device.\".format(attribute_name),\n )\n\n expected_attributes = []\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n\n for attribute_data in self.sim_file_parser._device_attributes:\n expected_attributes.append(attribute_data[\"dynamicAttributes\"][\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n expected_attributes,\n device_attributes - default_attributes,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def exists_attrs(proj):\n if not os.path.exists(proj.ballot_attributesfile):\n return False\n ballot_attributesfile = pickle.load(open(proj.ballot_attributesfile, 'rb'))\n if not ballot_attributesfile:\n return False\n else:\n return True",
"def has_attributes(self):\n\n pass",
"def test_fc(self):\n # These entries exist for both Nodal and VARIANT, but have different values\n # for the same model\n print(self.nhf.metadata.items())\n self.assertEqual(self.nhf.metadata[\"nMom\"], 35)\n self.assertEqual(self.nhf.metadata[\"nscoef\"], 3)\n\n # These entries are only for VARIANT\n self.assertEqual(self.nhf.metadata[\"npcbdy\"], 30)\n self.assertEqual(self.nhf.metadata[\"npcsym\"], 0)\n self.assertEqual(self.nhf.metadata[\"npcsec\"], 0)\n self.assertEqual(self.nhf.metadata[\"iwnhfl\"], 0)\n self.assertEqual(self.nhf.metadata[\"nMoms\"], 0)",
"def _IsDevice(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(file_attribute_flags & pyfsntfs.file_attribute_flags.DEVICE)",
"def testFTISetup(self):\n self.failUnless(self.portal.portal_types['FSDPerson'].Title() != \"AT Content Type\")",
"def test_fc(self):\n self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n for info in variantControlInfo:\n self.assertTrue(info not in self.nhf.metadata)",
"def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_there_are_fields(self):\n filds = ['name', 'cost', 'description', 'duration', 'reach', 'school']\n for fild in filds:\n self.assertTrue(fild in dir(Magias),\n 'Class Magias does not have the field {}'.format(fild))",
"def testGetAttributes(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertIsNone(file_entry._attributes)\n\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')",
"def test_back_compat_attributes(self):\n cap = DeviceCapabilities.create(True)\n self.assertTrue(cap.iot_edge)",
"def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()",
"def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def test_lsusb_test_attributes_generic(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_test_attributes, quiet=True), self.generic_lsusb_test_attributes_json)",
"def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))",
"def testRequiredAttributes(self):\n\n\t\trequiredAttributes = (\"name\",\n\t\t\t\t\t\t\t\"uiFile\",\n\t\t\t\t\t\t\t\"activated\",\n\t\t\t\t\t\t\t\"initializedUi\",\n\t\t\t\t\t\t\t\"deactivatable\")\n\n\t\tfor attribute in requiredAttributes:\n\t\t\tself.assertIn(attribute, dir(QWidgetComponentFactory()))",
"def test_hasattrs(self):\n self.assertTrue(hasattr(self.obj, \"id\"), \"created obj doesn't \" +\n \"have the attribute id.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__width\"), \"created \" +\n \"obj doesn't have the attribute width.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__height\"), \"created \" +\n \"obj have the attribute height.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__x\"), \"created obj \" +\n \"doesn't have the attribute x.\")\n self.assertTrue(hasattr(self.obj, \"_Rectangle__y\"), \"created \" +\n \"obj doesn't have the attribute y.\")",
"def test_DataPackageFileAttributesAreValid_match(tempdir: pathlib.Path):\n df = dpack_pb2.DataPackageFile()\n df.relative_path = \"a\"\n df.checksum_hash = dpack_pb2.SHA256\n df.checksum = SHA256_EMPTY_FILE\n (tempdir / \"a\").touch()\n assert dpack.DataPackageFileAttributesAreValid(tempdir, df)",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def check_attributes(self, attributes):\n self.log('StorageConfiguration.check_attributes started')\n attributes_ok = True\n\n if not self.enabled:\n self.log('Not enabled, returning True')\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok\n\n # warn if locations don't exist\n app_dir = self.options['app_dir'].value\n if not self._check_app_dir(app_dir):\n self.log(\"app_dir is used for $OSG_APP and $OSG_APP/etc on worker nodes, where they should exist and\"\n \" have permissions of 1777 or 777.\",\n level=logging.WARNING)\n\n # WN_TMP may be blank if the job manager dynamically generates it but\n # warni just in case\n if utilities.blank(self.options['worker_node_temp'].value):\n self.log(\"worker_node_temp is blank, this is okay if you've set your \" +\n \"job manager to set this dynamically, otherwise jobs may \" +\n \"fail to run\",\n section=self.config_section,\n option='worker_node_temp',\n level=logging.WARNING)\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok",
"def check_attributes(self):\n for key in self.json_parsed_file.keys():\n if key not in self.HARDCODED_REQUIRED_JSON_FIELDS:\n print(key)\n self.output_message += \"All JSON attribute key are not correct\\n\"\n self.is_parsed_pdf_valid = False\n\n for key in self.HARDCODED_REQUIRED_JSON_FIELDS:\n if key not in self.json_parsed_file.keys():\n self.output_message += \"All required attribute keys are not in the parsed information\\n\"\n self.is_parsed_pdf_valid = False",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def has_attributes(self):\n return bool(self.attrs)",
"def test_add_shorts(self):\n self.image.light_source = 1\n self.image.contrast = 0\n\n assert self.image.light_source == 1\n assert self.image.contrast == 0\n\n # Verify pre-existing attributes can still be read as expected.\n for attribute, func, value in read_attributes_florida_beach:\n assert check_value(func(getattr(self.image, attribute)), value)\n\n segment_hex = (\n binascii.hexlify(self.image._segments[\"APP1\"].get_segment_bytes())\n .decode(\"utf-8\")\n .upper()\n )\n self.assertEqual(\"\\n\".join(textwrap.wrap(segment_hex, 90)), ADD_SHORT_BASELINE)",
"def needs_sync(self):\n\n affected_attributes = [\n 'css_files', 'js_files',\n 'scss_files', 'widgets']\n\n for attr in affected_attributes:\n if len(getattr(self, attr)) > 0:\n return True\n return False",
"def test_change_name_of_the_devicefalse():"
]
| [
"0.63431334",
"0.61369073",
"0.609612",
"0.60764974",
"0.5959336",
"0.5766945",
"0.57135236",
"0.5707165",
"0.5706078",
"0.56799346",
"0.56355697",
"0.560508",
"0.5601962",
"0.560114",
"0.55919594",
"0.55814165",
"0.5522186",
"0.551569",
"0.54715294",
"0.5456782",
"0.545658",
"0.54280764",
"0.54207224",
"0.5390375",
"0.5387773",
"0.53819954",
"0.5370561",
"0.5354705",
"0.5353595",
"0.5351444"
]
| 0.6583468 | 0 |
Testing whether commands from running simulated device match commands from fandango file | def test_device_command_list(self):
actual_device_cmds = self.sim_device.get_command_list()
expected_cmd_list = self.sim_file_parser.get_device_command_metadata().keys()
self.assertEquals(
set(actual_device_cmds),
set(expected_cmd_list),
"The commands specified in the fgo file are not present in" " the device",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_match(self, command_bytes):",
"def _verifyCommand(self):\n for i in range(3):\n rc = self.subdevice.command_test() # Verify command is correct\n if rc is None:\n break",
"def test_device_command_list(self):\n default_cmds = helper_module.DEFAULT_TANGO_DEVICE_COMMANDS\n actual_device_cmds = set(self.sim_device.get_command_list()) - default_cmds\n expected_cmd_list = self.sim_file_parser.get_device_command_metadata().keys()\n self.assertEquals(\n actual_device_cmds,\n set(expected_cmd_list),\n \"The commands specified in the json file are not present in\" \" the device\",\n )",
"def test_verify_state_of_a_device():",
"def test_device_command_list(self):\n actual_device_commands = set(self.sim_device.get_command_list()) - {\"Init\"}\n expected_command_list = set(\n self.sim_file_parser.get_device_command_metadata().keys()\n )\n self.assertEquals(\n actual_device_commands,\n expected_command_list,\n \"The commands specified are not present in the device\",\n )",
"def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())",
"def check_commands(self):\n pass",
"def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False",
"def execute(self, **kvargs):\n cmd = kvargs.get('cmd')\n pattern = kvargs.get('pattern')\n device = kvargs['device']\n timeout = kvargs.get('timeout', 60)\n raw_output = kvargs.get('raw_output', 0)\n if isinstance(pattern, str):\n pattern = [pattern]\n pattern.append(r'---\\(more\\)---')\n pattern_new = ''\n for pat in pattern:\n pattern_new = pattern_new + pat + \",\"\n pattern_new = pattern_new[:-1]\n tnh = self.handle\n cmd_send = cmd + '\\n'\n if not hasattr(device, 'shelltype'):\n device.shelltype = 'sh'\n # if device.shelltype == 'sh':\n # cmd_re = cmd + '\\s?\\r\\n'\n # else:\n cmd_re = cmd + r'\\s?\\r{1,2}\\n'\n cmd_re = re.sub(r'\\$', '\\\\$', cmd_re)\n cmd_re = re.sub(r'\\|', '\\\\|', cmd_re)\n device.log(\"Executing command: \"+cmd_send)\n tnh.send(cmd_send)\n match = -1\n if 'no_response' in kvargs and kvargs['no_response']:\n device.response = ''\n match = 1\n else:\n (output, resp) = self.wait_for(expected=pattern,\n shell=device.shelltype,\n timeout=timeout)\n response = ''\n while '---(more)---' in resp:\n response += re.sub(r'\\n---\\(more\\)---', '', resp, 1)\n tnh.send('\\r\\n')\n (output, resp) = self.wait_for(expected=pattern,\n shell=device.shelltype,\n timeout=timeout)\n response += resp\n if not raw_output:\n response = re.sub(cmd_re, '', response)\n if not output:\n device.log(level='ERROR',\n message=\"Sent '%s' to %s, expected '%s', \"\n \"but got:\\n'%s'\" % (cmd, device.host,\n pattern_new,\n response))\n match = -1\n else:\n for pat in pattern:\n match += 1\n if re.search(pat, response):\n break\n if not raw_output:\n for pat in pattern:\n response = re.sub('\\n.*' + pat, '', response)\n response = re.sub('\\r\\n$', '', response)\n device.response = response\n device.log(response)\n return match",
"def test_command_verify():\n wozardry.parse_args([\"verify\", kValid1])\n wozardry.parse_args([\"verify\", kValid2])",
"def check_commands(line: str) -> bool:\n\n if line == acc.savek:\n # Save and notify\n acc.save()\n post_query(\"I \" + line + \"!\")\n return True\n elif line == 'x':\n exit(0)\n\n return False",
"def test_command_finds_commands(self):\r\n COMMANDLIST['!toread'] = lambda bmark: bmark\r\n\r\n bm = BmarkMock()\r\n bm.tags['!toread'] = True\r\n commander = Commander(bm)\r\n commander.build_commands()\r\n\r\n self.assertTrue(\r\n '!toread' in commander.commands,\r\n \"Our commander should find !toread command to run\")",
"def test_multiple_commands_at_same_time(self):",
"def test_process_host_commands(self):\n\n command = [\"df\", \"-h\"]\n output = run(verification.process_host_commands(command))\n self.assertTrue(\"```\\nThat command is not available.```\" not in output)\n\n command = [\"ls\", \"-la\"]\n output = run(verification.process_host_commands(command))\n self.assertEqual(\"```\\nThat command is not available.```\", output)",
"def fuzzDevice(maxCommands=1000):\n global nFailures\n global nCommands\n global nConnects\n dev = BFieldControllerInterface(addr)\n for _ in range(random.randrange(0,maxCommands+1)):\n if dev.comTest(randomString()) is not True:\n nFailures += 1\n nCommands += 1\n dev.close()\n nConnects += 1",
"def command_match(text, command):\n text = text.split()\n command = command.split()\n if len(text) != len(command):\n return False\n for i, val in enumerate(text):\n if val != command[i][:len(val)]:\n return False\n return True",
"def test_verify_connection_to_a_device():",
"def test_readiness(self):\n self.command.package = self.input_ovf\n ready, reason = self.command.ready_to_run()\n self.assertFalse(ready)\n self.assertRegex(reason, \"No file information\")\n self.assertRaises(InvalidInputError, self.command.run)\n\n self.command.file_path = \"input.vmdk\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)\n\n self.command.file_path = None\n self.command.file_id = \"file1\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)",
"def execute(self, devices, command_bytes):",
"def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"",
"def matches(self, pid):\n if self._command_wildcards or self._command_regexs:\n # Matchers requiring comm file\n path = P.join(PROC_DIR, str(pid), 'comm')\n try:\n with open(path) as f:\n comm = f.read().rstrip()\n for pattern in self._command_wildcards:\n if fnmatch(comm, pattern):\n return True\n\n for re_obj in self._command_regexs:\n if re_obj.match(comm):\n return True\n except FileNotFoundError:\n # process may have exited before file could be read\n return False\n\n return False",
"def commands():\n # Check invalid command\n check50.run(run_command).stdin(\"cs50\").stdout(\"Invalid command.\")\n\n # Check for upper case abreviation\n try:\n check50.run(run_command).stdin(\"W\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")\n\n # Check for lower case abbreviation\n try:\n check50.run(run_command).stdin(\"w\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")",
"def test_command_method_exists(self):\n motor_shield = MotorShield(self.options, self.connection)\n\n for command in motor_shield.commands:\n self.assertIn(command, dir(motor_shield))",
"def test_proc_devices_nodata(self):\n self.assertEqual(jc.parsers.proc_devices.parse('', quiet=True), {})",
"def test_flowgramAli_bin(self):\r\n ali_exe = get_flowgram_ali_exe()\r\n\r\n self.assertTrue(which(ali_exe) is not None, \"The alignment program %s \"\r\n \"is not accessible via the PATH environment variable.\"\r\n % ali_exe)\r\n\r\n # test if its callable and actually works\r\n command = \"%s -h\" % ali_exe\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n\r\n if (proc.wait() != 0):\r\n self.fail(\"Calling %s failed. Check permissions and that it is in fact an executable.\"\r\n % ali_exe)\r\n\r\n result = proc.stdout.read()\r\n # check that the help string looks correct\r\n self.assertTrue(result.startswith(\"Usage\"))",
"def test_change_name_of_the_devicetrue():",
"def testCommand():\n proc = subprocess.Popen([\"awk\",\"-f\", \"%s.awk\" % koanIndex[currentKoan]], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n proc.stdin.write(inputString)\n out, err = proc.communicate()\n\n # convert some line endings so this works on linux,mac,windows\n out = out.replace('\\r\\n','\\n').replace('\\r','\\n')\n\n if debug:\n print \"out :\" + repr(str(out.strip())) + \"::\"\n print \"output:\" + repr(str(outputString.strip())) + \"::\"\n\n return str(out.strip() ) == str(outputString.strip() )",
"def test_2_true(self):\n\t\tself.spawn(\"./quidditch\").stdin(\"2\").stdin(\"1\").stdout(\"170\\n\").exit(0)",
"def matches(self, text):\n return text == self.command",
"def test_with_run_command(self):\n self.build()\n self.runCmd(\"file \" + self.getBuildArtifact(\"a.out\"), CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_break_set_by_file_and_line(\n self, \"main.cpp\", self.line, num_expected_locations=1, loc_exact=True)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n self.expect(\"frame variable\",\n substrs=['(Speed) SPILookHex = 5.55' # Speed by default is 5.55.\n ])\n\n # This is the function to remove the custom formats in order to have a\n # clean slate for the next test case.\n def cleanup():\n self.runCmd('type format clear', check=False)\n self.runCmd('type summary clear', check=False)\n\n # Execute the cleanup function during test case tear down.\n self.addTearDownHook(cleanup)\n\n self.runCmd(\"type format add -C yes -f x Speed BitField\")\n self.runCmd(\"type format add -C no -f c RealNumber\")\n self.runCmd(\"type format add -C no -f x Type2\")\n self.runCmd(\"type format add -C yes -f c Type1\")\n\n # The type format list should show our custom formats.\n self.expect(\"type format list\",\n substrs=['RealNumber',\n 'Speed',\n 'BitField',\n 'Type1',\n 'Type2'])\n\n self.expect(\"frame variable\",\n patterns=['\\(Speed\\) SPILookHex = 0x[0-9a-f]+' # Speed should look hex-ish now.\n ])\n\n # gcc4.2 on Mac OS X skips typedef chains in the DWARF output\n if self.getCompiler() in ['clang', 'llvm-gcc']:\n self.expect(\"frame variable\",\n patterns=['\\(SignalMask\\) SMILookHex = 0x[0-9a-f]+' # SignalMask should look hex-ish now.\n ])\n self.expect(\"frame variable\", matching=False,\n patterns=['\\(Type4\\) T4ILookChar = 0x[0-9a-f]+' # Type4 should NOT look hex-ish now.\n ])\n\n # Now let's delete the 'Speed' custom format.\n self.runCmd(\"type format delete Speed\")\n\n # The type format list should not show 'Speed' at this point.\n self.expect(\"type format list\", matching=False,\n substrs=['Speed'])\n\n # Delete type format for 'Speed', we should expect an error message.\n self.expect(\"type format delete Speed\", error=True,\n substrs=['no custom formatter for Speed'])\n\n self.runCmd(\n \"type summary add --summary-string \\\"arr = ${var%s}\\\" -x \\\"char \\\\[[0-9]+\\\\]\\\" -v\")\n\n self.expect(\"frame variable strarr\",\n substrs=['arr = \"Hello world!\"'])\n\n self.runCmd(\"type summary clear\")\n\n self.runCmd(\n \"type summary add --summary-string \\\"ptr = ${var%s}\\\" \\\"char *\\\" -v\")\n\n self.expect(\"frame variable strptr\",\n substrs=['ptr = \"Hello world!\"'])\n\n self.runCmd(\n \"type summary add --summary-string \\\"arr = ${var%s}\\\" -x \\\"char \\\\[[0-9]+\\\\]\\\" -v\")\n\n self.expect(\"frame variable strarr\",\n substrs=['arr = \"Hello world!'])\n\n # check that rdar://problem/10011145 (Standard summary format for\n # char[] doesn't work as the result of \"expr\".) is solved\n self.expect(\"p strarr\",\n substrs=['arr = \"Hello world!'])\n\n self.expect(\"frame variable strptr\",\n substrs=['ptr = \"Hello world!\"'])\n\n self.expect(\"p strptr\",\n substrs=['ptr = \"Hello world!\"'])\n\n self.expect(\n \"p (char*)\\\"1234567890123456789012345678901234567890123456789012345678901234ABC\\\"\",\n substrs=[\n '(char *) $',\n ' = ptr = ',\n ' \"1234567890123456789012345678901234567890123456789012345678901234ABC\"'])\n\n self.runCmd(\"type summary add -c Point\")\n\n self.expect(\"frame variable iAmSomewhere\",\n substrs=['x = 4',\n 'y = 6'])\n\n self.expect(\"type summary list\",\n substrs=['Point',\n 'one-line'])\n\n self.runCmd(\"type summary add --summary-string \\\"y=${var.y%x}\\\" Point\")\n\n self.expect(\"frame variable iAmSomewhere\",\n substrs=['y=0x'])\n\n self.runCmd(\n \"type summary add --summary-string \\\"y=${var.y},x=${var.x}\\\" Point\")\n\n self.expect(\"frame variable iAmSomewhere\",\n substrs=['y=6',\n 'x=4'])\n\n self.runCmd(\"type summary add --summary-string \\\"hello\\\" Point -e\")\n\n self.expect(\"type summary list\",\n substrs=['Point',\n 'show children'])\n\n self.expect(\"frame variable iAmSomewhere\",\n substrs=['hello',\n 'x = 4',\n '}'])\n\n self.runCmd(\n \"type summary add --summary-string \\\"Sign: ${var[31]%B} Exponent: ${var[23-30]%x} Mantissa: ${var[0-22]%u}\\\" ShowMyGuts\")\n\n self.expect(\"frame variable cool_pointer->floating\",\n substrs=['Sign: true',\n 'Exponent: 0x',\n '80'])\n\n self.runCmd(\"type summary add --summary-string \\\"a test\\\" i_am_cool\")\n\n self.expect(\"frame variable cool_pointer\",\n substrs=['a test'])\n\n self.runCmd(\n \"type summary add --summary-string \\\"a test\\\" i_am_cool --skip-pointers\")\n\n self.expect(\"frame variable cool_pointer\",\n substrs=['a test'],\n matching=False)\n\n self.runCmd(\n \"type summary add --summary-string \\\"${var[1-3]}\\\" \\\"int [5]\\\"\")\n\n self.expect(\"frame variable int_array\",\n substrs=['2',\n '3',\n '4'])\n\n self.runCmd(\"type summary clear\")\n\n self.runCmd(\n \"type summary add --summary-string \\\"${var[0-2].integer}\\\" \\\"i_am_cool *\\\"\")\n self.runCmd(\n \"type summary add --summary-string \\\"${var[2-4].integer}\\\" \\\"i_am_cool [5]\\\"\")\n\n self.expect(\"frame variable cool_array\",\n substrs=['1,1,6'])\n\n self.expect(\"frame variable cool_pointer\",\n substrs=['3,0,0'])\n\n # test special symbols for formatting variables into summaries\n self.runCmd(\n \"type summary add --summary-string \\\"cool object @ ${var%L}\\\" i_am_cool\")\n self.runCmd(\"type summary delete \\\"i_am_cool [5]\\\"\")\n\n # this test might fail if the compiler tries to store\n # these values into registers.. hopefully this is not\n # going to be the case\n self.expect(\"frame variable cool_array\",\n substrs=['[0] = cool object @ 0x',\n '[1] = cool object @ 0x',\n '[2] = cool object @ 0x',\n '[3] = cool object @ 0x',\n '[4] = cool object @ 0x'])\n\n # test getting similar output by exploiting ${var} = 'type @ location'\n # for aggregates\n self.runCmd(\"type summary add --summary-string \\\"${var}\\\" i_am_cool\")\n\n # this test might fail if the compiler tries to store\n # these values into registers.. hopefully this is not\n # going to be the case\n self.expect(\"frame variable cool_array\",\n substrs=['[0] = i_am_cool @ 0x',\n '[1] = i_am_cool @ 0x',\n '[2] = i_am_cool @ 0x',\n '[3] = i_am_cool @ 0x',\n '[4] = i_am_cool @ 0x'])\n\n # test getting same output by exploiting %T and %L together for\n # aggregates\n self.runCmd(\n \"type summary add --summary-string \\\"${var%T} @ ${var%L}\\\" i_am_cool\")\n\n # this test might fail if the compiler tries to store\n # these values into registers.. hopefully this is not\n # going to be the case\n self.expect(\"frame variable cool_array\",\n substrs=['[0] = i_am_cool @ 0x',\n '[1] = i_am_cool @ 0x',\n '[2] = i_am_cool @ 0x',\n '[3] = i_am_cool @ 0x',\n '[4] = i_am_cool @ 0x'])\n\n self.runCmd(\"type summary add --summary-string \\\"goofy\\\" i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"${var.second_cool%S}\\\" i_am_cooler\")\n\n self.expect(\"frame variable the_coolest_guy\",\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n # check that unwanted type specifiers are removed\n self.runCmd(\"type summary delete i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"goofy\\\" \\\"class i_am_cool\\\"\")\n self.expect(\"frame variable the_coolest_guy\",\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n self.runCmd(\"type summary delete i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"goofy\\\" \\\"enum i_am_cool\\\"\")\n self.expect(\"frame variable the_coolest_guy\",\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n self.runCmd(\"type summary delete i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"goofy\\\" \\\"struct i_am_cool\\\"\")\n self.expect(\"frame variable the_coolest_guy\",\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n # many spaces, but we still do the right thing\n self.runCmd(\"type summary delete i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"goofy\\\" \\\"union i_am_cool\\\"\")\n self.expect(\"frame variable the_coolest_guy\",\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n # but that not *every* specifier is removed\n self.runCmd(\"type summary delete i_am_cool\")\n self.runCmd(\n \"type summary add --summary-string \\\"goofy\\\" \\\"wrong i_am_cool\\\"\")\n self.expect(\"frame variable the_coolest_guy\", matching=False,\n substrs=['(i_am_cooler) the_coolest_guy = goofy'])\n\n # check that formats are not sticking since that is the behavior we\n # want\n self.expect(\"frame variable iAmInt --format hex\",\n substrs=['(int) iAmInt = 0x00000001'])\n self.expect(\n \"frame variable iAmInt\",\n matching=False,\n substrs=['(int) iAmInt = 0x00000001'])\n self.expect(\"frame variable iAmInt\", substrs=['(int) iAmInt = 1'])"
]
| [
"0.686579",
"0.6190588",
"0.61735475",
"0.6020115",
"0.5998797",
"0.5967311",
"0.5941706",
"0.58935934",
"0.5856973",
"0.58003515",
"0.57989997",
"0.57601476",
"0.5729398",
"0.57259583",
"0.5711797",
"0.5648455",
"0.5644743",
"0.56248915",
"0.56072706",
"0.55872613",
"0.55554324",
"0.5550492",
"0.55373013",
"0.5537027",
"0.5536655",
"0.55298895",
"0.5527195",
"0.55237556",
"0.5522239",
"0.5520202"
]
| 0.6805937 | 1 |
Testing whether the attributes specified in the simdd json are added to the TANGO device. | def test_device_attribute_list(self):
# test that the attributes from the running simulated device match the attributes
# from in the simdd json file
device_attributes = set(self.sim_device.get_attribute_list())
default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES
remaining_device_attrs = device_attributes - default_attributes
not_added_attr = self.sim_device.read_attribute("AttributesNotAdded")
not_added_attr_names = not_added_attr.value
expected_attributes = []
for attr_prop in itervalues(self.sim_file_parser._device_attributes):
expected_attributes.append(attr_prop["name"])
expected_attributes = set(expected_attributes)
# checking to see if there were any attributes not added
if not_added_attr_names is not None:
expected_attributes = expected_attributes - set(not_added_attr_names)
self.assertEqual(
set(expected_attributes),
remaining_device_attrs,
"Actual tango device attribute list differs from expected " "list!",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_attribute_list(self):\n # test that the attributes from the running simulated device match the attributes\n # from in the fandango generated file\n device_attributes = set(self.sim_device.get_attribute_list())\n extra_attr_from_device = set([\"NumAttributesNotAdded\", \"AttributesNotAdded\"])\n remaining_device_attrs = device_attributes - extra_attr_from_device\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value\n\n expected_attributes = []\n for attr_prop in list(self.sim_file_parser._device_attributes.values()):\n expected_attributes.append(attr_prop[\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n set(expected_attributes),\n remaining_device_attrs,\n \"Actual tango device attribute list {} differs from expected list {}!\".format(\n remaining_device_attrs, expected_attributes\n ),\n )",
"def test_device_attribute_list(self):\n # First testing that the attribute with data format \"IMAGE\" is in the device.\n attribute_name = \"image1\"\n device_attributes = set(self.sim_device.get_attribute_list())\n self.assertIn(\n attribute_name,\n device_attributes,\n \"The attribute {} has been added to the device.\".format(attribute_name),\n )\n not_added_attr = self.sim_device.read_attribute(\"AttributesNotAdded\")\n not_added_attr_names = not_added_attr.value if not_added_attr.value else []\n self.assertNotIn(\n attribute_name,\n not_added_attr_names,\n \"The attribute {} was not added to the list of attributes that\"\n \" could not be added to the device.\".format(attribute_name),\n )\n\n expected_attributes = []\n default_attributes = helper_module.DEFAULT_TANGO_DEVICE_ATTRIBUTES\n\n for attribute_data in self.sim_file_parser._device_attributes:\n expected_attributes.append(attribute_data[\"dynamicAttributes\"][\"name\"])\n expected_attributes = set(expected_attributes)\n # checking to see if there were any attributes not added\n if not_added_attr_names is not None:\n expected_attributes = expected_attributes - set(not_added_attr_names)\n self.assertEqual(\n expected_attributes,\n device_attributes - default_attributes,\n \"Actual tango device attribute list differs from expected \" \"list!\",\n )",
"def test_lsusb_test_attributes_generic(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_test_attributes, quiet=True), self.generic_lsusb_test_attributes_json)",
"def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)",
"def test_sim_control_attribute_list(self):\n implemented_attr = helper_module.SIM_CONTROL_ADDITIONAL_IMPLEMENTED_ATTR\n control_attributes = test_sim_test_interface.control_attributes(\n self.expected_model\n )\n attributes = set(self.sim_control_device.get_attribute_list())\n self.assertEqual(attributes - implemented_attr, set(control_attributes))",
"def id_exists(test_name):\n result_json = None\n try:\n with open(robot_dir + \"/output/results/{}.json\".format(test_name.replace(' ', ''))) as result_file:\n result_json = json.load(result_file)\n except:\n print(\"Failed to open the result json\")\n return False\n #look for values NEW_ASSOC, NEW_PROP1, NEW_PROP2\n print(result_json)\n if 6 == 6:\n return True\n return \"Length is not 6\"",
"def test_back_compat_attributes(self):\n cap = DeviceCapabilities.create(True)\n self.assertTrue(cap.iot_edge)",
"def test_info(get_touchmat):\n touchmat = get_touchmat\n\n info = touchmat.info()\n check_device_types.check_DeviceInfo(info)\n\n vid_pid = (info['vendor_id'], info['product_id'])\n assert vid_pid in (Devices.touchmat_g1.value,\n Devices.touchmat_g2.value)\n\n serial = info['serial']\n if Devices(vid_pid) == Devices.touchmat_g2:\n assert serial == \"Not Available\"\n else:\n assert len(serial) == 24",
"def test_lsusb_test_attributes2_generic(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_test_attributes2, quiet=True), self.generic_lsusb_test_attributes2_json)",
"def test_add_software_system(attributes: dict, model: Model):\n software_system = SoftwareSystem(**attributes)\n model += software_system\n assert software_system.id == \"1\"\n assert len(model.software_systems) == 1\n for attr, expected in attributes.items():\n assert getattr(software_system, attr) == expected",
"def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_creation_when_service_attributes_value_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the yard\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"\",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response2.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute value cannot be Empty.\", str(response2.data))",
"def test_updating_when_service_attributes_name_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute name cannot be empty.\", str(response3.data))",
"def test_details_id_ok(self):\n self.check_response('/attributes/1',\n ('Attribute ID#1 not found',))",
"def test_updating_when_service_attributes_value_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the shop\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the shop\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"\",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))\n service_id = json.loads(response2.data)\n service_id = service_id['service_identifier']\n response3 = self.client.put(store_url + store_id + '/service/' + service_id + '/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response3.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute value cannot be Empty.\", str(response3.data))",
"def has_attributes(self):\n\n pass",
"def test_add_devicedata(client):\n client.socketio.emit('devicedata', {'data': '1500000010,1|12,22,1'})\n\n res = client.socketio.get_received()\n\n message_exist = False\n for message in res:\n if message['name'] == 'status':\n message_exist = True\n assert message['args'][0]['id'] == 1\n assert message['args'][0]['time'] == '2017-07-14T02:40:10+00:00'\n assert message['args'][0]['data'] == {\n 'control': True,\n 'test1': 12.0,\n 'test2': 22.0\n }\n assert message_exist is True",
"def test_add_device(self):\n\n pass",
"async def test_update_with_json_attrs_not_dict(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n json=[\"list\", \"of\", \"things\"],\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.key }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n\n state = hass.states.get(\"sensor.foo\")\n assert state.state == \"\"\n assert state.attributes == {\"friendly_name\": \"foo\"}\n assert \"not a dictionary or list\" in caplog.text",
"def test_device_add_from_file(self, gateway_with_devs):\n assert 'daq' in gateway_with_devs._devs\n assert 'pel' in gateway_with_devs._devs\n assert 'sg' in gateway_with_devs._devs\n assert 'not_a_driver' not in gateway_with_devs._devs",
"def check_attributes(self, attributes):\n self.log('StorageConfiguration.check_attributes started')\n attributes_ok = True\n\n if not self.enabled:\n self.log('Not enabled, returning True')\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok\n\n # warn if locations don't exist\n app_dir = self.options['app_dir'].value\n if not self._check_app_dir(app_dir):\n self.log(\"app_dir is used for $OSG_APP and $OSG_APP/etc on worker nodes, where they should exist and\"\n \" have permissions of 1777 or 777.\",\n level=logging.WARNING)\n\n # WN_TMP may be blank if the job manager dynamically generates it but\n # warni just in case\n if utilities.blank(self.options['worker_node_temp'].value):\n self.log(\"worker_node_temp is blank, this is okay if you've set your \" +\n \"job manager to set this dynamically, otherwise jobs may \" +\n \"fail to run\",\n section=self.config_section,\n option='worker_node_temp',\n level=logging.WARNING)\n self.log('StorageConfiguration.check_attributes completed')\n return attributes_ok",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def test_creation_when_service_attributes_name_is_empty(self):\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the yard\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response2.status, \"400 BAD REQUEST\")\n self.assertIn(\"attribute name cannot be empty.\", str(response2.data))",
"def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)",
"def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))",
"def test_add_person(attributes: dict, model: Model):\n person = Person(**attributes)\n model += person\n assert person.id == \"1\"\n assert len(model.people) == 1\n for attr, expected in attributes.items():\n assert getattr(person, attr) == expected",
"def test_creation_when_service_attributes_value_has_invalid_characters(self):\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the yard\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"!!@#$%^&*\",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response2.status, \"400 BAD REQUEST\")\n self.assertIn(\"!!@#$%^&* is not a valid attribute value\", str(response2.data))"
]
| [
"0.6757138",
"0.6639707",
"0.603285",
"0.57943916",
"0.5781796",
"0.57430536",
"0.5678223",
"0.5659225",
"0.5638481",
"0.55833596",
"0.5567453",
"0.55489945",
"0.5530007",
"0.5519677",
"0.55196536",
"0.5511401",
"0.5497804",
"0.54914",
"0.54877055",
"0.5468603",
"0.5462875",
"0.54601324",
"0.54578847",
"0.5452734",
"0.5442129",
"0.5431462",
"0.5425298",
"0.53946126",
"0.5379262",
"0.53764766"
]
| 0.71292067 | 0 |
Testing whether commands from running simulated device match commands from simmdd json file | def test_device_command_list(self):
default_cmds = helper_module.DEFAULT_TANGO_DEVICE_COMMANDS
actual_device_cmds = set(self.sim_device.get_command_list()) - default_cmds
expected_cmd_list = self.sim_file_parser.get_device_command_metadata().keys()
self.assertEquals(
actual_device_cmds,
set(expected_cmd_list),
"The commands specified in the json file are not present in" " the device",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_command_list(self):\n actual_device_cmds = self.sim_device.get_command_list()\n expected_cmd_list = self.sim_file_parser.get_device_command_metadata().keys()\n self.assertEquals(\n set(actual_device_cmds),\n set(expected_cmd_list),\n \"The commands specified in the fgo file are not present in\" \" the device\",\n )",
"def test_device_command_list(self):\n actual_device_commands = set(self.sim_device.get_command_list()) - {\"Init\"}\n expected_command_list = set(\n self.sim_file_parser.get_device_command_metadata().keys()\n )\n self.assertEquals(\n actual_device_commands,\n expected_command_list,\n \"The commands specified are not present in the device\",\n )",
"def is_match(self, command_bytes):",
"def test_command_list(self):\n from pystarlab.starlab import Story\n commands = [\"makeking -w 1.5 -s 1454677882 -n 5 -i\",\n \"makemass -i -l 0.1 -u 20 -s 1454677882\"]\n\n mass_output = \"mass.out\"\n\n mass_path = os.path.join(DATA_DIR, mass_output)\n with open(mass_path, 'r') as f:\n mass_str = f.read()\n\n mass_story = Story.from_command_list(commands)\n for line in zip(mass_str.splitlines(),\n str(mass_story).splitlines()):\n string_with_date = re.match(\"^ ===>\",line[0])\n if not string_with_date:\n self.assertEquals(line[0], line[1])",
"def _verifyCommand(self):\n for i in range(3):\n rc = self.subdevice.command_test() # Verify command is correct\n if rc is None:\n break",
"def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())",
"def check_commands(self):\n pass",
"async def test_commands(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.commands()\n\n assert response\n assert isinstance(response, List)\n\n assert response[0]\n assert isinstance(response[0], models.CommandItem)",
"def read_commands_03(self, commands):\n states = {1: {\"0\": 1, \"1\": 2},\n 2: {\"0\": 3, \"1\": 2},\n 3: {\"0\": 2, \"1\": 2}}\n return reduce(lambda s, c: states[s][c], commands, 1) == 2",
"def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False",
"async def test_command_status(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command/368630\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command-id.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.command_status(368630)\n\n assert response\n assert isinstance(response, models.CommandItem)",
"def test_features(self):\n assert list(parser.generate_commands(yaml.load(\n '- my_command: {name: my_name}'))) == [('my_command', {'name': 'my_name'})]",
"def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"",
"def Checker(self,cmdDict):\r\n for (cmd,content) in cmdDict.items():\r\n out=self.Cmd(cmd)\r\n if out[0] == 0:return out\r\n else:\r\n for c in content:\r\n if c not in out[1]:\r\n return (False,'%s cmd result not find %s'%(cmd,c))\r\n return True,None",
"def test_command_finds_commands(self):\r\n COMMANDLIST['!toread'] = lambda bmark: bmark\r\n\r\n bm = BmarkMock()\r\n bm.tags['!toread'] = True\r\n commander = Commander(bm)\r\n commander.build_commands()\r\n\r\n self.assertTrue(\r\n '!toread' in commander.commands,\r\n \"Our commander should find !toread command to run\")",
"def test_listCommand(self):\n acli = ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n acli.listCommands()\n finally:\n sys.stdout = origout\n\n self.assertIn(\"run-suite\", out.getvalue())",
"def execute(self, **kvargs):\n cmd = kvargs.get('cmd')\n pattern = kvargs.get('pattern')\n device = kvargs['device']\n timeout = kvargs.get('timeout', 60)\n raw_output = kvargs.get('raw_output', 0)\n if isinstance(pattern, str):\n pattern = [pattern]\n pattern.append(r'---\\(more\\)---')\n pattern_new = ''\n for pat in pattern:\n pattern_new = pattern_new + pat + \",\"\n pattern_new = pattern_new[:-1]\n tnh = self.handle\n cmd_send = cmd + '\\n'\n if not hasattr(device, 'shelltype'):\n device.shelltype = 'sh'\n # if device.shelltype == 'sh':\n # cmd_re = cmd + '\\s?\\r\\n'\n # else:\n cmd_re = cmd + r'\\s?\\r{1,2}\\n'\n cmd_re = re.sub(r'\\$', '\\\\$', cmd_re)\n cmd_re = re.sub(r'\\|', '\\\\|', cmd_re)\n device.log(\"Executing command: \"+cmd_send)\n tnh.send(cmd_send)\n match = -1\n if 'no_response' in kvargs and kvargs['no_response']:\n device.response = ''\n match = 1\n else:\n (output, resp) = self.wait_for(expected=pattern,\n shell=device.shelltype,\n timeout=timeout)\n response = ''\n while '---(more)---' in resp:\n response += re.sub(r'\\n---\\(more\\)---', '', resp, 1)\n tnh.send('\\r\\n')\n (output, resp) = self.wait_for(expected=pattern,\n shell=device.shelltype,\n timeout=timeout)\n response += resp\n if not raw_output:\n response = re.sub(cmd_re, '', response)\n if not output:\n device.log(level='ERROR',\n message=\"Sent '%s' to %s, expected '%s', \"\n \"but got:\\n'%s'\" % (cmd, device.host,\n pattern_new,\n response))\n match = -1\n else:\n for pat in pattern:\n match += 1\n if re.search(pat, response):\n break\n if not raw_output:\n for pat in pattern:\n response = re.sub('\\n.*' + pat, '', response)\n response = re.sub('\\r\\n$', '', response)\n device.response = response\n device.log(response)\n return match",
"def test_command_method_exists(self):\n motor_shield = MotorShield(self.options, self.connection)\n\n for command in motor_shield.commands:\n self.assertIn(command, dir(motor_shield))",
"def test_sense():\n with patch.dict(\n sensors.__salt__, {\"cmd.run\": MagicMock(return_value=\"A:a B:b C:c D:d\")}\n ):\n assert sensors.sense(\"chip\") == {\"A\": \"a B\"}",
"def test_listCommand(self):\n from armi import cli\n\n cli = cli.ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n cli.listCommands()\n finally:\n sys.stdout = origout\n self.assertIn(\"run-suite\", out.getvalue())",
"def _is_non_real_command_found(self, script_data):\n is_valid = True\n depends_on_commands = script_data.get('depends_on')\n if depends_on_commands:\n for command in depends_on_commands:\n if command != 'test-module':\n if command.endswith('dev') or command.endswith('copy'):\n error_message, error_code = Errors.invalid_command_name_in_script(script_data.get('name'),\n command)\n if self.handle_error(error_message, error_code, file_path=\"id_set.json\"):\n return not is_valid\n return is_valid",
"def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True",
"def read_input_command(parser):\n \n global input\n \n # Defining the default values. \n # Each of these values could be changed:\n # 1. By changing the 'INPUT.cfg' file (if you use \n # \"'./obspyDMT.py --type file'\")\n # 2. By defining the required command-line flag (if you use \n # \"'./obspyDMT.py --type command'\")\n input = { 'datapath': 'obspyDMT-data',\n \n 'min_date': str(UTCDateTime() - 60 * 60 * 24 * 10 * 1),\n 'max_date': str(UTCDateTime() - 60 * 60 * 24 * 5 * 1),\n 'min_mag': 5.5, 'max_mag': 9.9,\n 'min_depth': +10.0, 'max_depth': -6000.0,\n \n 'get_events': 'Y',\n 'interval': 3600*24,\n \n 'waveform': 'Y', 'response': 'Y',\n 'IRIS': 'Y', 'ArcLink': 'Y',\n \n 'preset': 0.0, 'offset': 1800.0,\n \n 'net': '*', 'sta': '*', 'loc': '*', 'cha': '*',\n \n 'evlatmin': -90.0, 'evlatmax': +90.0, \n 'evlonmin': -180.0, 'evlonmax': +180.0,\n \n 'max_result': 2500,\n \n 'lat_cba': None, 'lon_cba': None, \n 'mr_cba': None, 'Mr_cba': None,\n \n 'mlat_rbb': None, 'Mlat_rbb': None, \n 'mlon_rbb': None, 'Mlon_rbb': None,\n\n 'test': 'N',\n \n 'iris_update': 'N', 'arc_update': 'N', 'update_all': 'N',\n\n 'email_address': '',\n \n 'ic_all': 'N',\n \n 'iris_ic': 'N', 'iris_ic_auto': 'Y',\n 'arc_ic': 'N', 'arc_ic_auto': 'Y',\n 'pre_filt': '(0.008, 0.012, 3.0, 4.0)',\n 'corr_unit': 'DIS',\n \n 'merge_all': 'N',\n \n 'iris_merge': 'N', 'iris_merge_auto': 'Y',\n 'merge_folder': 'raw',\n \n 'arc_merge': 'N', 'arc_merge_auto': 'Y',\n \n 'plot_all': 'Y',\n 'plot_folder': 'raw',\n \n 'plot_ev': 'N', 'plot_sta': 'N', 'plot_se': 'N',\n 'plot_ray': 'N', 'plot_epi': 'N',\n 'plot_save': '.', 'plot_format': 'png',\n \n 'min_epi': 0.0, 'max_epi': 180.0,\n \n }\n \n # feed input dictionary of defaults into parser object\n parser.set_defaults(**input)\n \n # parse command line options\n (options, args) = parser.parse_args()\n # command line options can now be accessed via options.varname.\n \n # parse datapath (check if given absolute or relative)\n if options.version: \n bold = \"\\033[1m\"\n reset = \"\\033[0;0m\"\n print '\\t\\t' + '*********************************'\n print '\\t\\t' + '* obspyDMT version: *' \n print '\\t\\t' + '*' + '\\t\\t' + bold + '1.0' + reset + '\\t\\t' + '*'\n print '\\t\\t' + '*********************************'\n print '\\n'\n sys.exit(2)\n \n if options.datapath:\n if not os.path.isabs(options.datapath):\n options.datapath = os.path.join(os.getcwd(), options.datapath)\n \n if options.iris_update != 'N':\n if not os.path.isabs(options.iris_update):\n options.iris_update = os.path.join(os.getcwd(), options.iris_update)\n \n if options.arc_update != 'N':\n if not os.path.isabs(options.arc_update):\n options.arc_update = os.path.join(os.getcwd(), options.arc_update)\n \n if options.update_all != 'N':\n if not os.path.isabs(options.update_all):\n options.update_all = os.path.join(os.getcwd(), options.update_all)\n \n if options.iris_ic != 'N':\n if not os.path.isabs(options.iris_ic):\n options.iris_ic = os.path.join(os.getcwd(), options.iris_ic)\n \n if options.arc_ic != 'N':\n if not os.path.isabs(options.arc_ic):\n options.arc_ic = os.path.join(os.getcwd(), options.arc_ic)\n \n if options.ic_all != 'N':\n if not os.path.isabs(options.ic_all):\n options.ic_all = os.path.join(os.getcwd(), options.ic_all)\n \n if options.iris_merge != 'N':\n if not os.path.isabs(options.iris_merge):\n options.iris_merge = os.path.join(os.getcwd(), options.iris_merge)\n \n if options.arc_merge != 'N':\n if not os.path.isabs(options.arc_merge):\n options.arc_merge = os.path.join(os.getcwd(), options.arc_merge)\n \n if options.merge_all != 'N':\n if not os.path.isabs(options.merge_all):\n options.merge_all = os.path.join(os.getcwd(), options.merge_all)\n \n if options.plot_ev != 'N':\n if not os.path.isabs(options.plot_ev):\n options.plot_ev = os.path.join(os.getcwd(), options.plot_ev)\n \n if options.plot_sta != 'N':\n if not os.path.isabs(options.plot_sta):\n options.plot_sta = os.path.join(os.getcwd(), options.plot_sta)\n \n if options.plot_se != 'N':\n if not os.path.isabs(options.plot_se):\n options.plot_se = os.path.join(os.getcwd(), options.plot_se)\n \n if options.plot_ray != 'N':\n if not os.path.isabs(options.plot_ray):\n options.plot_ray = os.path.join(os.getcwd(), options.plot_ray)\n \n if options.plot_epi != 'N':\n if not os.path.isabs(options.plot_epi):\n options.plot_epi = os.path.join(os.getcwd(), options.plot_epi)\n \n if options.plot_save != 'N':\n if not os.path.isabs(options.plot_save):\n options.plot_save = os.path.join(os.getcwd(), options.plot_save)\n \n \n # extract min. and max. longitude and latitude if the user has given the\n # coordinates with -r (GMT syntax)\n if options.event_rect:\n try:\n options.event_rect = options.event_rect.split('/')\n if len(options.event_rect) != 4:\n print \"Erroneous rectangle given.\"\n sys.exit(2)\n options.evlonmin = float(options.event_rect[0])\n options.evlonmax = float(options.event_rect[1])\n options.evlatmin = float(options.event_rect[2])\n options.evlatmax = float(options.event_rect[3])\n except:\n print \"Erroneous rectangle given.\"\n sys.exit(2)\n \n # extract min. and max. longitude and latitude if the user has given the\n # coordinates with -g (GMT syntax)\n if options.station_rect:\n try:\n options.station_rect = options.station_rect.split('/')\n if len(options.station_rect) != 4:\n print \"Erroneous rectangle given.\"\n sys.exit(2)\n options.mlon_rbb = float(options.station_rect[0])\n options.Mlon_rbb = float(options.station_rect[1])\n options.mlat_rbb = float(options.station_rect[2])\n options.Mlat_rbb = float(options.station_rect[3])\n except:\n print \"Erroneous rectangle given.\"\n sys.exit(2)\n \n # circular station restriction option parsing\n if options.station_circle:\n try:\n options.station_circle = options.station_circle.split('/')\n if len(options.station_circle) != 4:\n print \"Erroneous circle given.\"\n sys.exit(2)\n options.lon_cba = float(options.station_circle[0])\n options.lat_cba = float(options.station_circle[1])\n options.mr_cba = float(options.station_circle[2])\n options.Mr_cba = float(options.station_circle[3])\n except:\n print \"Erroneous circle given.\"\n sys.exit(2)\n \n # delete data path if -R or --reset args are given at cmdline\n if options.reset:\n # try-except so we don't get an exception if path doesnt exist\n try:\n shutil.rmtree(options.datapath)\n print '----------------------------------'\n print 'The following folder has been deleted:'\n print str(options.datapath)\n print 'obspyDMT is going to create a new folder...'\n print '----------------------------------'\n except:\n pass\n \n # Extract network, station, location, channel if the user has given an\n # identity code (-i xx.xx.xx.xx)\n if options.identity:\n try:\n options.net, options.sta, options.loc, options.cha = \\\n options.identity.split('.')\n except:\n print \"Erroneous identity code given.\"\n sys.exit(2)\n \n input['datapath'] = options.datapath\n \n input['min_date'] = options.min_date\n input['max_date'] = options.max_date\n input['min_mag'] = float(options.min_mag)\n input['max_mag'] = float(options.max_mag)\n input['min_depth'] = float(options.min_depth)\n input['max_depth'] = float(options.max_depth)\n \n input['evlonmin'] = options.evlonmin\n input['evlonmax'] = options.evlonmax\n input['evlatmin'] = options.evlatmin\n input['evlatmax'] = options.evlatmax\n \n input['preset'] = float(options.preset)\n input['offset'] = float(options.offset)\n input['max_result'] = int(options.max_result)\n \n input['get_events'] = options.get_events\n \n if options.get_continuous:\n input['get_events'] = 'N'\n input['get_continuous'] = 'Y'\n else:\n input['get_continuous'] = 'N'\n input['interval'] = float(options.interval)\n \n if options.iris_bulk: options.iris_bulk = 'Y'\n input['iris_bulk'] = options.iris_bulk\n \n input['waveform'] = options.waveform\n input['response'] = options.response\n if options.SAC: options.SAC = 'Y'\n input['SAC'] = options.SAC\n \n input['IRIS'] = options.IRIS\n input['ArcLink'] = options.ArcLink\n \n if options.time_iris: options.time_iris = 'Y'\n input['time_iris'] = options.time_iris\n if options.time_arc: options.time_arc = 'Y'\n input['time_arc'] = options.time_arc\n \n if options.input_period: options.input_period = 'Y'\n input['input_period'] = options.input_period\n if options.nodes: options.nodes = 'Y'\n input['nodes'] = options.nodes\n \n input['net'] = options.net\n input['sta'] = options.sta\n if options.loc == \"''\":\n input['loc'] = ''\n elif options.loc == '\"\"':\n input['loc'] = ''\n else:\n input['loc'] = options.loc\n \n input['cha'] = options.cha\n\n input['lon_cba'] = options.lon_cba\n input['lat_cba'] = options.lat_cba\n input['mr_cba'] = options.mr_cba\n input['Mr_cba'] = options.Mr_cba\n \n input['mlon_rbb'] = options.mlon_rbb\n input['Mlon_rbb'] = options.Mlon_rbb\n input['mlat_rbb'] = options.mlat_rbb\n input['Mlat_rbb'] = options.Mlat_rbb \n \n if options.test != 'N':\n input['test'] = 'Y'\n input['test_num'] = int(options.test)\n \n input['iris_update'] = options.iris_update\n input['arc_update'] = options.arc_update\n input['update_all'] = options.update_all\n \n if input['update_all'] != 'N':\n input['iris_update'] = input['update_all']\n input['arc_update'] = input['update_all']\n \n input['iris_ic'] = options.iris_ic\n input['iris_ic_auto'] = options.iris_ic_auto\n \n input['arc_ic'] = options.arc_ic\n input['arc_ic_auto'] = options.arc_ic_auto\n \n input['ic_all'] = options.ic_all\n \n if input['ic_all'] != 'N':\n input['iris_ic'] = input['ic_all']\n input['arc_ic'] = input['ic_all']\n \n input['iris_merge'] = options.iris_merge\n input['arc_merge'] = options.arc_merge\n input['merge_all'] = options.merge_all\n \n if input['merge_all'] != 'N':\n input['iris_merge'] = input['merge_all']\n input['arc_merge'] = input['merge_all']\n \n if options.zip_w: options.zip_w = 'Y'\n input['zip_w'] = options.zip_w\n \n if options.zip_r: options.zip_r = 'Y'\n input['zip_r'] = options.zip_r\n \n input['plot_folder'] = options.plot_folder\n \n input['plot_all'] = options.plot_all\n if options.plot_iris: options.plot_iris = 'Y'\n input['plot_iris'] = options.plot_iris\n if options.plot_arc: options.plot_arc = 'Y'\n input['plot_arc'] = options.plot_arc\n \n input['plot_ev'] = options.plot_ev\n input['plot_sta'] = options.plot_sta\n input['plot_se'] = options.plot_se\n input['plot_ray'] = options.plot_ray\n input['plot_epi'] = options.plot_epi\n \n input['min_epi'] = float(options.min_epi)\n input['max_epi'] = float(options.max_epi)\n \n input['plot_save'] = options.plot_save\n input['plot_format'] = options.plot_format\n \n if options.email: options.email = 'Y'\n input['email'] = options.email\n input['email_address'] = options.email_address\n \n if options.report: options.report = 'Y'\n input['report'] = options.report\n \n input['corr_unit'] = options.corr_unit\n input['pre_filt'] = options.pre_filt\n \n #--------------------------------------------------------\n if input['get_continuous'] == 'N':\n input['iris_merge_auto'] = 'N'\n input['arc_merge_auto'] = 'N'\n else:\n input['iris_merge_auto'] = options.iris_merge_auto\n input['merge_folder'] = options.merge_folder\n input['arc_merge_auto'] = options.arc_merge_auto\n \n for i in ['iris_update', 'arc_update', 'iris_ic', 'arc_ic', \\\n 'iris_merge', 'arc_merge', 'plot_se', 'plot_sta', \\\n 'plot_ev', 'plot_ray', 'plot_epi']:\n if input[i] != 'N':\n input['get_events'] = 'N'\n input['get_continuous'] = 'N'\n input['IRIS'] = 'N'\n input['ArcLink'] = 'N'\n input['iris_ic_auto'] = 'N'\n input['arc_ic_auto'] = 'N'\n input['iris_merge_auto'] = 'N'\n input['arc_merge_auto'] = 'N'\n \n if options.IRIS == 'N':\n input['iris_ic_auto'] = 'N'\n input['iris_merge_auto'] = 'N'\n if options.ArcLink == 'N':\n input['arc_ic_auto'] = 'N'\n input['arc_merge_auto'] = 'N'\n \n if options.ic_no:\n input['iris_ic_auto'] = 'N'\n input['arc_ic_auto'] = 'N'\n \n if options.merge_no:\n input['iris_merge_auto'] = 'N'\n input['arc_merge_auto'] = 'N'\n \n if input['plot_iris'] == 'Y' or input['plot_arc'] == 'Y':\n input['plot_all'] = 'N'",
"def test_verify_list_of_devices_in_my_network():",
"def test_search_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"search master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search master instance from previous result\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n self.ins.runtime[\"route_instance_entry_list\"] = self.ins.get_route_instance_entry(mock_device_ins)\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n match_from_previous_response=True,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with brief and not interested counter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_BRIEF\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=1,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with detail\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_DETAIL\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=18,\n instance_rib_irib_holddown_count=0,\n )\n self.assertTrue(response)\n\n print(\"search instance info but entry don't have related parameter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=22,\n instance_rib_irib_holddown_count=0,\n )\n self.assertFalse(response)\n\n print(\"search instance info with extensive\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=0,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 16)",
"def test_receivedMOTD(self):\n lines = [\n \":host.name 375 nickname :- host.name Message of the Day -\",\n \":host.name 372 nickname :- Welcome to host.name\",\n \":host.name 376 nickname :End of /MOTD command.\",\n ]\n for L in lines:\n self.assertEqual(self.client.calls, [])\n self.client.dataReceived(L + \"\\r\\n\")\n\n self.assertEqual(\n self.client.calls,\n [\n (\n \"receivedMOTD\",\n {\n \"motd\": [\n \"host.name Message of the Day -\",\n \"Welcome to host.name\",\n ]\n },\n )\n ],\n )\n\n # After the motd is delivered, the tracking variable should be\n # reset.\n self.assertIdentical(self.client.motd, None)",
"def test_proc_devices_nodata(self):\n self.assertEqual(jc.parsers.proc_devices.parse('', quiet=True), {})",
"def check_commands(line: str) -> bool:\n\n if line == acc.savek:\n # Save and notify\n acc.save()\n post_query(\"I \" + line + \"!\")\n return True\n elif line == 'x':\n exit(0)\n\n return False",
"def test_verify_state_of_a_device():",
"def test_running_ok():\n cli_result = subprocess.run(\n ['kaiba', 'tests/files/good_config.json', 'tests/files/input.json'],\n capture_output=True,\n )\n assert cli_result.returncode == 0"
]
| [
"0.69326276",
"0.65383285",
"0.62935674",
"0.5962256",
"0.580752",
"0.5662563",
"0.5652129",
"0.56469244",
"0.5636164",
"0.562125",
"0.5609909",
"0.5506132",
"0.54622304",
"0.54539406",
"0.544067",
"0.5423402",
"0.54215324",
"0.5406972",
"0.5395801",
"0.53840023",
"0.53741866",
"0.5346925",
"0.53372467",
"0.5330067",
"0.5297219",
"0.5295382",
"0.52911454",
"0.52868325",
"0.52858156",
"0.5270632"
]
| 0.69376826 | 0 |
Test that the TANGO device Init command works correctly. | def test_device_init_command(self):
default_val = 0
self.assertEqual(self.sim_device.integer1, default_val)
# Write to the attribute integer1
self.sim_device.integer1 = 45
self.assertEqual(self.sim_device.integer1, 45)
# Reset the values of the device attributes to default.
self.sim_device.Init()
# Check that the desiredPointing attribute is reset.
self.assertEqual(self.sim_device.integer1, default_val) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"def test_create_device1(self):\n pass",
"def test_01_Init(self):\n pass",
"def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)",
"def initialise(self):\n self.device.initialise()\n return \"OK\"",
"def test_get_device(self):\n pass",
"def test_get_device(self):\n pass",
"def test_create_device_data(self):\n pass",
"def test_init(power_supply):\n power_supply.Init()\n assert power_supply.state() == tango.DevState.STANDBY",
"def test_begin(mock_machine,mock_network,mock_umqtt):\n ab = AppSwitch.CAppSwitch(app_device)\n ab.begin()\n # checking subscribes\n subscribe_calls = [\n call(b'contX/switch/1/cmnd/version'),\n call(b'contX/switch/1/cmnd/repoversion'),\n call(b'contX/switch/1/cmnd/download'),\n call(b'contX/switch/1/cmnd/install'),\n call(b'contX/switch/1/cmnd/memfree'),\n call(b'contX/switch/1/cmnd/memalloc'),\n call(b'contX/switch/1/cmnd/reboot'),\n call(b'contX/switch/1/cmnd/getip'),\n call(b'contX/switch/1/cmnd/state'),\n call(b'contX/switch/1/cmnd/state1')\n ]\n mock_umqtt.assert_has_calls(subscribe_calls)",
"def test_add_device(self):\n\n pass",
"def test_verify_connection_to_a_device():",
"async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")",
"def test_create_device_template(self):\n pass",
"def init():\n serverboards.info(\"Init test running\")\n time.sleep(0.5)\n serverboards.info(\"Init test stop\")\n return 30",
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"async def test_init(airsensor, hass, config):\n\n _, entity_id = airsensor\n entry = await async_setup_entity(hass, config, entity_id)\n assert entry.unique_id == \"BleBox-airSensor-1afe34db9437-0.air\"\n\n state = hass.states.get(entity_id)\n assert state.name == \"airSensor-0.air\"\n\n assert ATTR_PM_0_1 not in state.attributes\n assert ATTR_PM_2_5 not in state.attributes\n assert ATTR_PM_10 not in state.attributes\n\n assert state.attributes[ATTR_ICON] == \"mdi:blur\"\n\n assert state.state == STATE_UNKNOWN\n\n device_registry = dr.async_get(hass)\n device = device_registry.async_get(entry.device_id)\n\n assert device.name == \"My air sensor\"\n assert device.identifiers == {(\"blebox\", \"abcd0123ef5678\")}\n assert device.manufacturer == \"BleBox\"\n assert device.model == \"airSensor\"\n assert device.sw_version == \"1.23\"",
"def test_verify_state_of_a_device():",
"def test_update_device(self):\n pass",
"def test_update_device(self):\n pass",
"def test_create_tang_1(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--trust-url\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"def test_create_tang_2(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--thumbprint=print\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"def test_init():\n client = TestClient()\n client.run('config init')\n assert os.path.exists(client.cache.conan_conf_path)\n assert os.path.exists(client.cache.remotes_path)\n assert os.path.exists(client.cache.settings_path)\n assert os.path.exists(client.cache.default_profile_path)",
"def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')",
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass",
"def test_initialization(self, create_controller: Controller) -> None:\n pass",
"def test_arg_parser_init(self):\n args = self.parser.parse_args(['init'])\n self.assertEqual(args.command, 'init')",
"def test_initialization(self):\n self.assertEqual(self.installer.host_name, \"ec2.amazon.com\")\n self.assertEqual(self.installer.user_name, \"ec2\")\n self.assertEqual(self.installer.os, \"ubuntu\")\n self.assertEqual(self.installer.key_path, \"./ODFEAMIInstanceKey.pem\")\n self.assertEqual(self.installer.RPM_package_version, \"1.4.0\")\n self.assertEqual(self.installer.APT_OSS_version, \"7.4.2\")"
]
| [
"0.7282401",
"0.7282401",
"0.6997539",
"0.6789896",
"0.6767043",
"0.67537886",
"0.67022514",
"0.67022514",
"0.6655565",
"0.65647686",
"0.6561325",
"0.6535144",
"0.6534742",
"0.64849114",
"0.64815253",
"0.64324486",
"0.64249736",
"0.6416037",
"0.6411636",
"0.6399772",
"0.6399772",
"0.63879734",
"0.6380257",
"0.63465637",
"0.6329263",
"0.63203794",
"0.63203794",
"0.62793034",
"0.62627625",
"0.62420654"
]
| 0.7338796 | 0 |
Test that you can have multiple sim control devices running. | def test_multiple_sim_control_devices(self):
self.assertGreater(self.sim_control_device1.ping(), 0)
self.assertGreater(self.sim_control_device2.ping(), 0)
self.assertGreater(self.sim_control_device3.ping(), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_controller_switches(self):\n for name in self.our_controllers:\n self.start_controller(name)\n self.assertTrue(self.check_state(name, 'running'), \"{} is starting correctly\".format(name))\n time.sleep(1) # process some update() cycles\n self.stop_controller(name)\n self.assertTrue(self.check_state(name, 'stopped'), \"{} is stopping correctly\".format(name))",
"def test_get_devices1(self):\n pass",
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass",
"def the_user_should_be_able_to_connect_to_one_of_the_devices():\n assert web_app.connect_to_device1()",
"def test_verify_list_of_devices_in_my_network():",
"def test_setup_adds_proper_devices(self, mock_switch, mock_client):\n ports = {\n i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SWITCH_MODELS)\n }\n ports[\"bad\"] = mock.MagicMock(model=\"notaswitch\")\n print(ports[\"bad\"].model)\n mock_client.return_value.get_devices.return_value = [\n mock.MagicMock(ports=ports)\n ]\n assert setup_component(self.hass, switch.DOMAIN, self.GOOD_CONFIG)\n self.hass.block_till_done()\n for ident, port in ports.items():\n if ident != \"bad\":\n mock_switch.assert_any_call(port)\n assert mock.call(ports[\"bad\"], self.hass) not in mock_switch.mock_calls",
"def test_create_different_devices(self):\n command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY()\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME] + self.devices\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def test_verify_connection_to_a_device():",
"def test_verify_state_of_a_device():",
"def test_create_device1(self):\n pass",
"def test_multiple_devices(self) -> None:\n\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200,\n payload={\n \"active\": True,\n \"sub\": SUBJECT,\n \"scope\": \" \".join(\n [\n MATRIX_USER_SCOPE,\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC\",\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF\",\n ]\n ),\n \"username\": USERNAME,\n },\n )\n )\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n self.get_failure(self.auth.get_user_by_req(request), AuthError)",
"def testControlEnvironment(video1, video2):\n try:\n control.main(video1, video2, Verbose=True, Testing=True)\n return True\n except ValueError:\n return False",
"def test_get_sim_interface_returns_singleton(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n sim_interface1 = locator.get_sim_interface()\n sim_interface2 = locator.get_sim_interface()\n \n self.assertEqual(sim_interface1, sim_interface2,\n 'Two subsequent calls of get_sim_interface returned different instances.')",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES\n self.check_error(StratisCliInUseSameTierError, command_line, _ERROR)",
"def test_moving_devices_1(\n self, management_client, internal_client, inventory_attributes\n ):\n did1 = \"device-id-1\"\n did2 = \"device-id-2\"\n internal_client.create_device(did1, inventory_attributes)\n internal_client.create_device(did2, inventory_attributes)\n\n group = management_client.group(group=\"group-test-1\")\n management_client.addDeviceToGroup(group=group, device=did1)\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 2\n\n group = management_client.group(group=\"group-test-2\")\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 1\n assert len(management_client.getGroupDevices(\"group-test-2\")) == 1\n\n management_client.addDeviceToGroup(group=group, device=did1)\n assert (\n len(management_client.getGroupDevices(\"group-test-1\", expected_error=True))\n == 0\n )\n assert len(management_client.getGroupDevices(\"group-test-2\")) == 2\n\n group = management_client.group(group=\"group-test-1\")\n management_client.addDeviceToGroup(group=group, device=did1)\n management_client.addDeviceToGroup(group=group, device=did2)\n assert len(management_client.getGroupDevices(\"group-test-1\")) == 2\n assert (\n len(management_client.getGroupDevices(\"group-test-2\", expected_error=True))\n == 0\n )",
"def shutdown_simulators():\n logging.info(\"Shutting down all simulators...\")\n\n try:\n subprocess.Popen(\n \"xcrun simctl shutdown all\",\n shell=True\n ).wait()\n\n except Exception as e:\n logging.error(\"Shutting down the simulators failed with error '{ERROR}'\".format(ERROR=e))\n return False\n\n logging.info(\"Simulators shut down!\")\n return True",
"def test_begin(mock_machine,mock_network,mock_umqtt):\n ab = AppSwitch.CAppSwitch(app_device)\n ab.begin()\n # checking subscribes\n subscribe_calls = [\n call(b'contX/switch/1/cmnd/version'),\n call(b'contX/switch/1/cmnd/repoversion'),\n call(b'contX/switch/1/cmnd/download'),\n call(b'contX/switch/1/cmnd/install'),\n call(b'contX/switch/1/cmnd/memfree'),\n call(b'contX/switch/1/cmnd/memalloc'),\n call(b'contX/switch/1/cmnd/reboot'),\n call(b'contX/switch/1/cmnd/getip'),\n call(b'contX/switch/1/cmnd/state'),\n call(b'contX/switch/1/cmnd/state1')\n ]\n mock_umqtt.assert_has_calls(subscribe_calls)",
"def test_add_device(self):\n\n pass",
"def test_conditions(self):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()",
"def test_identity_multiple_tape(self, dev, tmpdir, monkeypatch):\n qml.enable_tape()\n\n dev = qml.device(dev, wires=2, keep_files=False)\n\n with qml.tape.QuantumTape() as tape1:\n qml.RX(0.133, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n circuits = [tape1, tape2]\n\n test_uuid = \"1234\"\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: None,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # No workflow files were created because we only computed with\n # identities\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n\n expected = [\n np.ones(1),\n np.ones(2),\n ]\n\n for r, e in zip(res, expected):\n assert np.allclose(r, e)\n\n qml.disable_tape()",
"def test_multiple_commands_at_same_time(self):",
"async def test_sensors_with_multiple_bridges(hass, mock_bridge):\n mock_bridge_2 = create_mock_bridge()\n mock_bridge_2.mock_sensor_responses.append({\n \"1\": PRESENCE_SENSOR_3_PRESENT,\n \"2\": LIGHT_LEVEL_SENSOR_3,\n \"3\": TEMPERATURE_SENSOR_3,\n })\n mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)\n await setup_bridge(hass, mock_bridge)\n await setup_bridge(hass, mock_bridge_2, hostname='mock-bridge-2')\n\n assert len(mock_bridge.mock_requests) == 1\n assert len(mock_bridge_2.mock_requests) == 1\n # 3 \"physical\" sensors with 3 virtual sensors each\n assert len(hass.states.async_all()) == 9",
"def run_test(dut):\n tb = MyTB(dut,int(os.environ['SIM_LEN']))\n cocotb.fork(Clock(dut.clk, 10).start())\n stim_thread = cocotb.fork(tb.stim_mux())\n read_thread = cocotb.fork(tb.read_mux())\n yield stim_thread.join()\n yield read_thread.join()\n if tb.sim_mux_sel != tb.py_sel_input:\n raise TestFailure(\"Mux selection failed.\")\n else:\n raise TestSuccess(\"Mux sel successful\")",
"async def test_device_registry_calls(opp):\n dev_reg = async_get(opp)\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"test\",\n \"url\": \"https://github.com/openpeerpower/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n os_mock_data = {\n \"board\": \"odroid-n2\",\n \"boot\": \"A\",\n \"update_available\": False,\n \"version\": \"5.12\",\n \"version_latest\": \"5.12\",\n }\n\n with patch.dict(os.environ, MOCK_ENVIRON), patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)\n config_entry.add_to_opp(opp)\n assert await opp.config_entries.async_setup(config_entry.entry_id)\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is removed, next update will remove the add-on and subsequent updates won't\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=1))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=2))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n {\n \"name\": \"test3\",\n \"slug\": \"test3\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is added, next update will reload the entry so we register\n # a new device\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=3))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3",
"def test_simulator_0_shots():\n dev = _aws_device(wires=2, device_type=AwsDeviceType.SIMULATOR, shots=0)\n assert dev.shots == 1\n assert dev.analytic",
"async def test_meross_mss425f_setup(hass: HomeAssistant) -> None:\n accessories = await setup_accessories_from_file(hass, \"mss425f.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"MSS425F-15cc\",\n model=\"MSS425F\",\n manufacturer=\"Meross\",\n sw_version=\"4.2.3\",\n hw_version=\"4.0.0\",\n serial_number=\"HH41234\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"button.mss425f_15cc_identify\",\n friendly_name=\"MSS425F-15cc Identify\",\n unique_id=\"00:00:00:00:00:00_1_1_2\",\n entity_category=EntityCategory.DIAGNOSTIC,\n state=STATE_UNKNOWN,\n ),\n EntityTestInfo(\n entity_id=\"switch.mss425f_15cc_outlet_1\",\n friendly_name=\"MSS425F-15cc Outlet-1\",\n unique_id=\"00:00:00:00:00:00_1_12\",\n state=STATE_ON,\n ),\n EntityTestInfo(\n entity_id=\"switch.mss425f_15cc_outlet_2\",\n friendly_name=\"MSS425F-15cc Outlet-2\",\n unique_id=\"00:00:00:00:00:00_1_15\",\n state=STATE_ON,\n ),\n EntityTestInfo(\n entity_id=\"switch.mss425f_15cc_outlet_3\",\n friendly_name=\"MSS425F-15cc Outlet-3\",\n unique_id=\"00:00:00:00:00:00_1_18\",\n state=STATE_ON,\n ),\n EntityTestInfo(\n entity_id=\"switch.mss425f_15cc_outlet_4\",\n friendly_name=\"MSS425F-15cc Outlet-4\",\n unique_id=\"00:00:00:00:00:00_1_21\",\n state=STATE_ON,\n ),\n EntityTestInfo(\n entity_id=\"switch.mss425f_15cc_usb\",\n friendly_name=\"MSS425F-15cc USB\",\n unique_id=\"00:00:00:00:00:00_1_24\",\n state=STATE_ON,\n ),\n ],\n ),\n )",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass"
]
| [
"0.675677",
"0.6411198",
"0.6409038",
"0.6409038",
"0.63815534",
"0.6307721",
"0.625616",
"0.62448555",
"0.62051547",
"0.61276186",
"0.6113333",
"0.6086259",
"0.6072002",
"0.6033512",
"0.60321414",
"0.5989508",
"0.595629",
"0.58943605",
"0.58777815",
"0.58610326",
"0.5858526",
"0.58490175",
"0.5837882",
"0.58259237",
"0.5794449",
"0.5783686",
"0.5775634",
"0.57750773",
"0.57696575",
"0.57696575"
]
| 0.8147376 | 0 |
Asynchronously create the TokenInstance | async def create_async(
self,
grant_type: str,
client_sid: str,
client_secret: Union[str, object] = values.unset,
code: Union[str, object] = values.unset,
code_verifier: Union[str, object] = values.unset,
device_code: Union[str, object] = values.unset,
refresh_token: Union[str, object] = values.unset,
device_id: Union[str, object] = values.unset,
) -> TokenInstance:
data = values.of(
{
"GrantType": grant_type,
"ClientSid": client_sid,
"ClientSecret": client_secret,
"Code": code,
"CodeVerifier": code_verifier,
"DeviceCode": device_code,
"RefreshToken": refresh_token,
"DeviceId": device_id,
}
)
payload = await self._version.create_async(
method="POST",
uri=self._uri,
data=data,
)
return TokenInstance(self._version, payload) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def create_token(self, *args, **kwargs) -> OAuth2Token:\n token = await super().create_token(*args, **kwargs)\n # NOTE: Save data from token to db here.\n return token",
"def create_token(self, token_id, data):\n raise exception.NotImplemented() # pragma: no cover",
"def async_token_request():\n required_params = ['login_hint',\n 'client_id',\n 'scope',\n 'mccmnc',\n 'redirect_uri']\n optional_params = ['correlation_id']\n validated_params = validate_params(request, required_params, optional_params)\n\n # if this was not a mock we would request a token from zenkey\n\n # create mock auth req id\n auth_req_id = validated_params['login_hint'] + '_' + str(secrets.SystemRandom().randrange(100000))\n\n return jsonify({\n 'auth_req_id': auth_req_id,\n 'expires_in': 3600\n })",
"async def async_initialize_token(self) -> None:\n try:\n # Get first the token\n async with self._session.get(\n f\"http://{self.host}/common_page/login.html\",\n headers=self.headers,\n timeout=10,\n ) as response:\n await response.text()\n self.token = response.cookies[\"sessionToken\"].value\n\n except (asyncio.TimeoutError, aiohttp.ClientError) as err:\n _LOGGER.error(\"Can not load login page from %s: %s\", self.host, err)\n raise exceptions.ConnectBoxConnectionError()\n\n await self._async_initialize_token_with_password(CMD_LOGIN)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_auth_token(sender, instance=None, created=False, **kwargs): # pylint: disable=unused-argument\n if created:\n Token.objects.create(user=instance) # pylint: disable=no-member",
"async def init(self) -> None:",
"async def init(self) -> None:",
"def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")",
"def init_future(self) -> Future:\n return self._init_future",
"def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token",
"def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()",
"def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token",
"def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)",
"def __init__(self):\n self.token = None\n self.login()",
"def async_token_grant():\n required_params = ['auth_req_id',\n 'state',\n 'scope']\n optional_params = ['access_token',\n 'expires_in',\n 'refresh_token',\n 'id_token',\n 'error',\n 'error_description',\n 'correlation_id']\n validate_params(request, required_params, optional_params)\n\n # if this was not a mock we would save the ranted token infromation to a db\n\n return \"\"",
"def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)",
"def create(self, request):\n token = AuthTokenClass().post(request)\n return token",
"def __enter__(self):\n logging.debug(\"In the FMC __enter__() class method.\")\n self.mytoken = Token(host=self.host, username=self.username, password=self.password, verify_cert=self.VERIFY_CERT)\n self.uuid = self.mytoken.uuid\n self.base_url = \"https://{}/{}/domain/{}\".format(self.host, self.API_CONFIG_VERSION, self.uuid)\n return self",
"def post(self, request, *args, **kwargs):\n self.create(request, *args, **kwargs)\n token, created = Token.objects.get_or_create(user=self.user)\n return Response({'token': token.key}, status=201)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n\n if created:\n # Generate API token for user.\n api_token = Token.objects.create(user=instance)\n\n # Only create agent using username and API token for non-admin users.\n if instance.is_superuser is False:\n Agent.objects.create(scan_agent=instance, api_token=api_token)",
"async def token(request: Request):\n return get_token()",
"def __init__(self, name=None, auth_token_provider_title=None, auth_token_provider_default_claims=None, auth_token_provider_endpoint=None, auth_access_token_request=None, auth_token_provider_keypair_alias=None, auth_token_provider_conn_timeout=None, auth_token_provider_so_timeout=None, auth_token_provider_client_id=None, auth_token_provider_scope=None, auth_token_provider_reuse_access_token=None, auth_token_provider_relaxed_ssl=None, token_request_customizer_type=None, auth_token_validator_type=None): # noqa: E501 # noqa: E501\n\n self._name = None\n self._auth_token_provider_title = None\n self._auth_token_provider_default_claims = None\n self._auth_token_provider_endpoint = None\n self._auth_access_token_request = None\n self._auth_token_provider_keypair_alias = None\n self._auth_token_provider_conn_timeout = None\n self._auth_token_provider_so_timeout = None\n self._auth_token_provider_client_id = None\n self._auth_token_provider_scope = None\n self._auth_token_provider_reuse_access_token = None\n self._auth_token_provider_relaxed_ssl = None\n self._token_request_customizer_type = None\n self._auth_token_validator_type = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if auth_token_provider_title is not None:\n self.auth_token_provider_title = auth_token_provider_title\n if auth_token_provider_default_claims is not None:\n self.auth_token_provider_default_claims = auth_token_provider_default_claims\n if auth_token_provider_endpoint is not None:\n self.auth_token_provider_endpoint = auth_token_provider_endpoint\n if auth_access_token_request is not None:\n self.auth_access_token_request = auth_access_token_request\n if auth_token_provider_keypair_alias is not None:\n self.auth_token_provider_keypair_alias = auth_token_provider_keypair_alias\n if auth_token_provider_conn_timeout is not None:\n self.auth_token_provider_conn_timeout = auth_token_provider_conn_timeout\n if auth_token_provider_so_timeout is not None:\n self.auth_token_provider_so_timeout = auth_token_provider_so_timeout\n if auth_token_provider_client_id is not None:\n self.auth_token_provider_client_id = auth_token_provider_client_id\n if auth_token_provider_scope is not None:\n self.auth_token_provider_scope = auth_token_provider_scope\n if auth_token_provider_reuse_access_token is not None:\n self.auth_token_provider_reuse_access_token = auth_token_provider_reuse_access_token\n if auth_token_provider_relaxed_ssl is not None:\n self.auth_token_provider_relaxed_ssl = auth_token_provider_relaxed_ssl\n if token_request_customizer_type is not None:\n self.token_request_customizer_type = token_request_customizer_type\n if auth_token_validator_type is not None:\n self.auth_token_validator_type = auth_token_validator_type",
"def __await__(self):\n\n async def _init():\n if not self._active:\n await self._setup()\n self._active = True\n self._awaited = True\n return self\n\n return _init().__await__()",
"def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)",
"def _mint_oauth_token_async(\n token_factory, email, scopes, lifetime_sec=0, delegates=None):\n # Query IAM credentials generateAccessToken API to obtain an OAuth token for\n # a given service account. Maximum lifetime is 1 hour. And can be obtained\n # through a chain of delegates.\n logging.info(\n 'Refreshing the access token for %s with scopes %s',\n email, scopes\n )\n\n request_body = {'scope': scopes}\n if delegates:\n request_body['delegates'] = delegates\n if lifetime_sec > 0:\n # Api accepts number of seconds with trailing 's'\n request_body['lifetime'] = '%ds' % lifetime_sec\n\n http_auth, _ = yield token_factory()\n response = yield _call_async(\n url='https://iamcredentials.googleapis.com/v1/projects/-/'\n 'serviceAccounts/%s:generateAccessToken' % urllib.parse.quote_plus(email),\n method='POST',\n headers={\n 'Accept': 'application/json',\n 'Authorization': 'Bearer %s' % http_auth,\n 'Content-Type': 'application/json; charset=utf-8',\n },\n payload=utils.encode_to_json(request_body),\n )\n expired_at = int(utils.datetime_to_timestamp(\n utils.parse_rfc3339_datetime(response['expireTime'])) / 1e6)\n raise ndb.Return({\n 'access_token': response['accessToken'],\n 'exp_ts': expired_at,\n })",
"def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()",
"async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return"
]
| [
"0.67421234",
"0.6041978",
"0.5987314",
"0.5978837",
"0.5966229",
"0.5966229",
"0.5966229",
"0.591489",
"0.5881262",
"0.5881262",
"0.58688843",
"0.5868686",
"0.5779239",
"0.5765783",
"0.5763097",
"0.57302094",
"0.57297224",
"0.5726904",
"0.5722842",
"0.57181853",
"0.57093436",
"0.5681856",
"0.56565833",
"0.5622326",
"0.55911815",
"0.5535805",
"0.55153805",
"0.5507628",
"0.5497201",
"0.54772294"
]
| 0.6723087 | 1 |
Convert a description to grammar. Each line is a rule for a | def grammar(description, whitespace=r'\s*'):
G={' ':whitespace}
description = description.replace('\t',' ') # handle tabs in description
for line in split(description,"\n"):
lhs, rhs = split(line,"=>")
alternatives = split(rhs, ' | ')
G[lhs]=tuple(map(split, alternatives))
return G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grammar(description, whitespace = r'\\s*'):\n def split(text, sep = None, maxsplit = -1):\n \"Like str.split applied to text, but strips whitespace from each piece.\"\n return [t.strip() for t in text.strip().split(sep, maxsplit) if t]\n\n G = {' ': whitespace}\n description = description.replace('\\t', ' ') # no tabs!\n for line in split(description, '\\n'):\n lhs, rhs = split(line, ' => ', 1)\n alternatives = split(rhs, ' | ')\n G[lhs] = tuple(map(split, alternatives))\n return G",
"def read_grammar_rules(istream):\n for line in istream:\n line = line.strip()\n if not line:\n continue\n fields = line.split('|||')\n if len(fields) != 3:\n raise ValueError('I expected 3 fields: %s', fields)\n lhs = fields[0].strip()\n\n if lhs[0] == '[':\n lhs = Nonterminal(lhs[1:-1])\n else:\n lhs = Terminal(lhs)\n rhs = fields[1].strip().split()\n new_rhs = []\n for r in rhs:\n if r[0] == '[':\n r = Nonterminal(r[1:-1])\n else:\n r = Terminal(r)\n new_rhs.append(r)\n\n prob = float(fields[2].strip())\n yield Rule(lhs, new_rhs, prob)",
"def from_string(representation):\r\n gramm = Grammar()\r\n\r\n for rule in representation.strip().split('\\n'):\r\n gramm._add_rule(rule)\r\n\r\n return gramm",
"def convert_grammar(grammar):\n\n # Remove all the productions of the type A -> X B C or A -> B a.\n global RULE_DICT\n unit_productions, result = [], []\n res_append = result.append\n index = 0\n\n for rule in grammar:\n new_rules = []\n if len(rule) == 2 and rule[1][0] != \"'\":\n # Rule is in form A -> X, so back it up for later and continue with the next rule.\n unit_productions.append(rule)\n add_rule(rule)\n continue\n elif len(rule) > 2:\n # Rule is in form A -> X B C [...] or A -> X a.\n terminals = [(item, i) for i, item in enumerate(rule) if item[0] == \"'\"]\n if terminals:\n for item in terminals:\n # Create a new non terminal symbol and replace the terminal symbol with it.\n # The non terminal symbol derives the replaced terminal symbol.\n rule[item[1]] = f\"{rule[0]}{str(index)}\"\n new_rules += [f\"{rule[0]}{str(index)}\", item[0]]\n index += 1\n while len(rule) > 3:\n new_rules.append([f\"{rule[0]}{str(index)}\", rule[1], rule[2]])\n rule = [rule[0]] + [f\"{rule[0]}{str(index)}\"] + rule[3:]\n index += 1\n # Adds the modified or unmodified (in case of A -> x i.e.) rules.\n add_rule(rule)\n res_append(rule)\n if new_rules:\n result.extend(new_rules)\n # Handle the unit productions (A -> X)\n while unit_productions:\n rule = unit_productions.pop()\n if rule[1] in RULE_DICT:\n for item in RULE_DICT[rule[1]]:\n new_rule = [rule[0]] + item\n if len(new_rule) > 2 or new_rule[1][0] == \"'\":\n result.insert(0, new_rule)\n else:\n unit_productions.append(new_rule)\n add_rule(new_rule)\n return result",
"def parse(description, grammar_path=None, start_rule=None):\n parser = HgvsParser(grammar_path, start_rule)\n\n return FinalTransformer().transform(\n AmbigTransformer().transform(\n ProteinTransformer().transform(parser.parse(description))\n )\n )",
"def iterrules(istream):\n for line in istream:\n if line.startswith('#'):\n continue\n line = line.strip()\n if not line:\n continue\n fields = line.split(' ||| ')\n if len(fields) < 4:\n raise ValueError('I expected at least 4 fields, got %d: %s' % (len(fields), fields))\n if not is_nonterminal(fields[0]):\n raise ValueError('Expected a nonterminal LHS, got something else: <%s>' % fields[0])\n lhs = Nonterminal(fields[0][1:-1]) # ignore brackets\n f_rhs = tuple(Nonterminal(x[1:-1]) if is_nonterminal(x) else Terminal(x) for x in fields[1].split())\n e_rhs = tuple(Nonterminal(x[1:-1]) if is_nonterminal(x) else Terminal(x) for x in fields[2].split())\n features = defaultdict(None, iterpairs(fields[3]))\n yield SCFGProduction.create(lhs, f_rhs, e_rhs, features)",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def parse_description(description):\n nodes = dict()\n outputs = dict()\n edges = dict()\n for line in description:\n components = line.strip().split()\n if not len(components): continue\n sentinel = components[0]\n if sentinel == 'value':\n _, value, _, _, _, node_name = components\n node = nodes.get(node_name, tuple())\n nodes[node_name] = node + (value, )\n else:\n (_, node_name, _, _, _, low_dictionary_name, low_node_name,\n _, _, _, high_dictionary_name, high_node_name) = components\n low_dictionary = \\\n nodes if low_dictionary_name == 'bot' else outputs\n high_dictionary = \\\n nodes if high_dictionary_name == 'bot' else outputs\n edges[node_name] = ((low_dictionary, low_node_name),\n (high_dictionary, high_node_name))\n return nodes, outputs, edges",
"def 语法规则(自身, 描述, 优先级=None):\n 部分 = 描述.split()\n 名称 = 部分[0]\n if 部分[1] != \":\":\n raise ParserGeneratorError(\"Expecting :\")\n 规则文本 = \" \".join(部分[2:])\n 所有规则 = 规则文本.split(\"|\")\n\n def inner(func):\n for 规则 in 所有规则:\n 各符号 = 规则.split()\n 自身.productions.append((名称, 各符号, func, 优先级))\n return func\n return inner",
"def parse (self, filename, verbose=False) :\n\t\tout_grammar = Grammar()\n\t\tself.preproc.addToQueue (filename)\n\n\t\twhile not self.preproc.queueIsEmpty() :\n\n\t\t\t#tokenize grammar source\n\t\t\tfilename = self.preproc.queue[0]\n\t\t\tsource = io.gettextfilecontent (filename)\n\t\t\tlang = GenericGrammarTokenizer._tokenize (\n\t\t\t\tTokenizer (GenericGrammarTokenizer.grammartokens), \n\t\t\t\tsource,\n\t\t\t\tverbose\n\t\t\t)\n\t\t\t\n\t\t\t#preprocessor here (one pass preprocessor)\n\t\t\tlang.tokenized = self.preproc.preprocess (filename, lang.tokenized)\n\n\t\t\t#text tokens are needed for next step\n\t\t\ttxtok = transformtosource (lang.tokenized)\n\t\t\t#tokenize in abstract grammar tokens\n\t\t\tgram = GenericGrammarTokenizer._tokenize (\n\t\t\t\tTokenizer (GenericGrammarTokenizer.genericgrammarprodrules),\n\t\t\t\ttxtok,\n\t\t\t\tverbose\n\t\t\t)\n\n\t\t\t##make production rules\n\t\t\tgrammar = Grammar ()\n\t\t\tresult = grammar.makegrammar (\n\t\t\t\tgram.tokenized,\n\t\t\t\tlang.tokenized,\n\t\t\t)\n\t\t\tif (result == []) :\n\t\t\t\tif verbose : print (grammar)\n\t\t\t\tout_grammar.merge (grammar)\n\t\t\telse :\n\t\t\t\tio.Printer.showerr (result)\n\t\t\t\treturn Grammar()\n\n\t\treturn out_grammar",
"def read_file(self, file_src):\n with open(file_src, \"r\") as fobj:\n grammar = Grammar()\n settings = Settings()\n for line in fobj:\n rhs = None #right-hand-side of a rule\n lhs = None #left-hand-side of a rule\n state = \"lhs\"\n words = line.rstrip().split()\n for word in words:\n if (words.index(word) == 0 and word == \"axiom:\"):\n words.remove(word)\n grammar.axiom = ' '.join(words)\n elif (words.index(word) > 0 and words[0] == \"angle_z:\"):\n settings.angle_z_min = int(words[1])\n settings.angle_z_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_y:\"):\n settings.angle_y_min = int(words[1])\n settings.angle_y_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_x:\"):\n settings.angle_x_min = int(words[1])\n settings.angle_x_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"branch-shortening:\"):\n settings.branch_min = float(words[1])\n settings.branch_max = float(words[3])\n #elif (words.index(word) > 0 and words[0] == \"num_sides:\"):\n #grammar.num_sides = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_radius:\"):\n settings.base_radius = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"rules:\"):\n if(state == \"lhs\"):\n lhs = word\n if(lhs not in grammar.variables):\n grammar.variables.add(lhs)\n state = \"rhs\"\n continue\n if(state == \"rhs\" and word != \"->\"):\n rhs = word\n if(\",\" in rhs):\n rhs = rhs.replace(\",\", \"\")\n grammar.rules.add(Rule(lhs,rhs))\n state = \"lhs\"\n elif (words.index(word) > 0 and words[0] == \"generations:\"):\n settings.generations = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_length:\"):\n settings.base_length = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"bark_texture:\"):\n settings.bark_path = words[1]\n elif (words.index(word) > 0 and words[0] == \"leaf_texture:\"):\n settings.leaf_path = words[1]\n return [grammar, settings]",
"def create_product_rule(path):\n def add_rule_to_dict(rule_dict, lhs, rhs):\n \"\"\"Add production rule to dictionary.\n\n Add production rule and nonterminal to dict\n if don't have the key.\n\n Args:\n rule_dict: dict, mapping terminal symbol or production rule\n to nonterminal\n lhs: rule of right hand side\n rhs: rule of left hand side\n\n Returns:\n rule_dict: a updated dict\n \"\"\"\n if rhs not in rule_dict:\n rule_dict[rhs] = list()\n rule_dict[rhs].append(lhs) \n return rule_dict\n\n grammar = dict()\n for line in load_data(path):\n lhs, rhs = line.split('->')\n lhs, rhs = lhs.strip(), rhs.strip()\n rhs = [ tok for tok in rhs.split() if tok !='']\n if len(rhs) == 1:\n tok = rhs[0]\n assert '\\\"' in tok\n # add production rule\n add_rule_to_dict(grammar, lhs, tok)\n elif len(rhs) == 2:\n jointed_rhs = '|'.join(rhs)\n # add production rule\n add_rule_to_dict(grammar, lhs, jointed_rhs)\n else:\n print('production rule not in CNF', line)\n return grammar # grammar {'\"token1\"': ['V', 'SIGMA'], 'V|NP': ['VP']}",
"def generate_grammar(gram):\r\n c = 0\r\n while gram[c] != \"start_variable\": # find start variable\r\n c += 1\r\n start = gram[c+1]\r\n grammar = pcfg.PCFG(start) # create a PCFG with start and no rules\r\n while gram[c] != \"Grammar\": # find the index of the first rule\r\n c += 1\r\n c += 3\r\n\r\n while gram[c] != '###########':\r\n c = adding_rules_grammar(c, gram, grammar) # find each rule from the grammar and add it to the grammar\r\n c += 1\r\n\r\n while gram[c] != \"Lexicon\": # find the index of the first rule of the lexicon\r\n c += 1\r\n c += 3\r\n\r\n while c < len(gram):\r\n var = gram[c]\r\n c = adding_rules_lexicon(c, gram, grammar, var) # find each rule from the lexicon and add it to the grammar\r\n c += 1\r\n return grammar",
"def translate_coding_to_rule(self, rule):\n node = Node(\"\", None, None, None)\n node.code_to_rule(rule, None)\n self.rule = node\n self.human_read = self.rule.visit_easy_read()\n self.polish_notation = self.rule.visit_with_polish_notation()\n self.coding = self.rule.visit_make_coding()\n self.find_needed_premises()\n self.find_conclusions()",
"def adding_rules_grammar(c, gram, grammar):\r\n vari = gram[c] # starting at the variable\r\n tup = [] # a list of the variables in the derivation\r\n c += 2 # skip vari and the arrow\r\n while \"[\" not in gram[c]: # while there are variables in the derivation\r\n tup.append(gram[c]) # add them to the list\r\n c += 1\r\n tuple(tup)\r\n num = gram[c][1:len(gram[c]) - 1] # get the probability out of the brackets\r\n r = pcfg.PRule(vari, tup, num) # create a new rule\r\n grammar.add_rule(r)\r\n return c # return the index\r",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def test_clips_defrules_parse_text(self):\n G = CreateTestCarpetingGraph()\n \n nlpGraphProcessor = NLPResultGraphParser()\n res = BLNlpClipsRuleBase()\n seen = []\n\n nlpGraphProcessor.ParseObject(G, \"LayCarpetSubStep5\", seen, res)\n nlpGraphProcessor.ParseObject(G, \"LayCarpetSubStep6\", seen, res)\n\n for defrule in res.Defrules:\n text = defrule.ClipsConstruct()\n if defrule.Name == \"LayCarpetSubStep5\":\n self.assertTrue(\"(defrule LayCarpetSubStep5\" in text)\n self.assertTrue(\"(Room (Id ?id) )\" in text)\n self.assertTrue(\"(CarpetingPattern \" in text)\n self.assertTrue(\"(ShiftRatio ?ratio) \" in text)\n self.assertTrue(\"(ShiftDirection ?sd) \" in text)\n self.assertTrue(\"(Id ?name) \" in text)\n self.assertTrue(\"(if (neq ?ratio 0.0)\" in text)\n self.assertTrue(\"(assert (ProcessSubStep \" in text)\n self.assertTrue(\"(Instruction (str-cat (str-cat (str-cat \\\"Offset tiles by \\\" ?ratio ) \\\" of a tile along the \\\" ) ?sd ) ) \" in text)\n self.assertTrue(\"(Pattern ?name ) \" in text)\n self.assertTrue(\"(ProcessName Carpeting ) \" in text)\n self.assertTrue(\"(StepNumber 4 ) \" in text)\n self.assertTrue(\"(ElementId ?id ) \" in text)\n self.assertTrue(\"(ProcessStepId \\\"Lay the tiles\\\" ) \" in text)\n \n elif defrule.Name == \"LayCarpetSubStep6\":\n self.assertTrue(\"(defrule LayCarpetSubStep6\" in text)\n self.assertTrue(\"(Room (Id ?id) )\" in text)\n self.assertTrue(\"(CarpetingPattern \" in text)\n self.assertTrue(\"(ShiftRatio ?ratio) \" in text)\n self.assertTrue(\"(Id ?name) \" in text)\n self.assertTrue(\"=>\" in text)\n self.assertTrue(\"(bind ?BindedVariable1 5)\" in text)\n self.assertTrue(\"(if (eq ?ratio 0.0 )\" in text)\n self.assertTrue(\"(bind ?BindedVariable1 4 ))\" in text) \n self.assertTrue(\"(assert (ProcessSubStep \" in text)\n self.assertTrue(\"(Instruction \\\"Form the carpet row by row\\\" ) \" in text)\n self.assertTrue(\"(Pattern ?name ) \" in text)\n self.assertTrue(\"(ProcessName Carpeting ) \" in text)\n self.assertTrue(\"(StepNumber ?BindedVariable1 ) \" in text)\n self.assertTrue(\"(ElementId ?id ) \" in text)\n self.assertTrue(\"(ProcessStepId \\\"Lay the tiles\\\" ) \" in text)",
"def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc",
"def parse(self, line):\n # BEGIN_YOUR_CODE\n line = line.strip('\\n')\n orig_line = line\n\n line = line.split(' ')\n n = len(line)\n\n # initialize log_probs and backpointer\n log_probs = {}\n backpointer = {}\n for i in range(n):\n for j in range(i, n):\n log_probs[(i, j)] = {}\n backpointer[(i, j)] = {}\n for A in self.nonterminal:\n log_probs[(i, j)][A] = -float('inf')\n backpointer[(i, j)][A] = (0, [None])\n\n # fill terminal rules\n for i in range(n):\n for rule in self.from_rhs((line[i],)):\n A = rule.lhs\n new_prob = rule.log_prob\n if new_prob > log_probs[(i, i)][A]:\n log_probs[(i, i)][A] = new_prob\n backpointer[(i, i)][A] = (1, [rule])\n\n # main loop\n binary_filter = lambda rule: len(rule.rhs) == 2\n for l in range(1, n+1):\n for i in range(n-l+1):\n j = i+l-1\n for k in range(i, j):\n for rule in filter(binary_filter, self.rules):\n A = rule.lhs\n B = rule.rhs[0]\n C = rule.rhs[1]\n new_prob = rule.log_prob + log_probs[(i, k)][B] + log_probs[(k+1, j)][C]\n if new_prob > log_probs[(i, j)][A]:\n log_probs[(i, j)][A] = new_prob\n backpointer[(i, j)][A] = (k-i+1, [rule])\n\n found = True\n while found:\n found = False\n for A in log_probs[(i, j)].keys():\n for rule in self.from_rhs((A,)):\n B = rule.lhs\n new_prob = rule.log_prob + log_probs[(i, j)][A]\n if new_prob > log_probs[(i, j)][B]:\n log_probs[(i, j)][B] = new_prob\n backpointer[(i, j)][B] = (j-i+1, [rule])\n found = True\n\n self.print_parse_result(orig_line, log_probs, backpointer, n)\n # END_YOUR_CODE",
"def parse_amie(line: str, relation_to_id: Dict[URIRef, int]) -> Optional['Rule']:\n # extract fields from tsv-formatted AMIE rule\n cells = line.split(\"\\t\")\n rule_string = cells[0]\n std_confidence = float(cells[2].strip())\n pca_confidence = float(cells[3].strip())\n\n # split rule into premise and conclusion\n assert \"=>\" in rule_string, \"Rule string does not contain \\\"=>\\\" substring!\"\n premise, conclusion = [rule_part.strip() for rule_part in rule_string.split(\"=>\") if rule_part]\n\n # TODO: why this replacement (matches \"?[a-zA-Z0-9_]+<whitespace>+?\" (i.e., relation begins with ?)\n premise = re.sub(\"(\\?\\w+)\\s+\\?\", \"\\g<1>|?\", premise)\n conclusion = re.sub(\"(\\?\\w+)\\s+\\?\", \"\\g<1>|?\", conclusion)\n\n # split premise into single literals (i.e., triples)\n antecedents = []\n for antecedent in premise.split(\"|\"):\n literal = Literal.parse_amie(antecedent, relation_to_id)\n if literal is None:\n return None\n antecedents.append(literal)\n\n # split conclusion into single literals (i.e., triples)\n consequents = []\n for consequent in conclusion.split(\"|\"):\n literal = Literal.parse_amie(consequent, relation_to_id)\n if literal is None:\n return None\n consequents.append(literal)\n\n return Rule(antecedents, consequents, std_confidence, pca_confidence)",
"def build_pcfg(self):\n part = 0 # 0 for grammar, 1 for lexicon\n rule = r'(\\d*\\.\\d*)\\ (.*)->(.*)[\\n]*'\n\n with open(self.grammar_txt) as file:\n for line in file:\n if line == 'Grammar\\n':\n continue\n elif line == 'Lexicon\\n':\n part = 1\n else:\n line = [s for s in re.split(rule, line) if s]\n prob, parent, child = line[0], line[1], line[2]\n if part is 0: # Grammar part\n child = tuple(i for i in child.split())\n self.grammar[parent][child] = Decimal(prob)\n else: # Lexicon part\n self.lexicon[parent][child.lower()] = Decimal(prob)\n # print_nested_dict(self.grammar)\n # print_nested_dict(self.lexicon)\n file.close()",
"def str_to_rule(str_in):\r\n log.debug(\"str_to_rule: \"+str_in.strip())\r\n str_i = str_in.strip().split('#')[0].strip()\r\n if len(str_i)>0:\r\n dic_rule = dict(valid=False,type='para',errors=list(),key=\"\",val=\"\")\r\n if(str_i[0]==\"%\"): # % Parameter str_i\r\n lst_par = str_i[1:].split('=')\r\n lst_par = [par.strip() for par in lst_par]\r\n if lst_par[0] in lst_known_para:\r\n dic_rule['key']=lst_par[0].strip()\r\n dic_rule['val']=lst_par[1].strip()\r\n dic_rule['valid']=True\r\n log.info('Parameter recognised: '+str(dic_rule['key'])+' = '+str(dic_rule['val']))\r\n else:\r\n dic_rule['valid']=False\r\n dic_rule['errors'].append(\"Unrecognised parameter: \"+lst_par[0])\r\n log.warning('#205 > '+str(dic_rule['errors'])+' raw line: '+str_i)\r\n elif(str_i[0]==\":\"): # : Rule str_i\r\n dic_rule = dict(valid=False,type='rule',errors=list(),id=\"\",title=\"\",mode=\"\",data_table=\"\",condition=\"\",action=\"\",act_param=\"\",comment=\"\")\r\n lst_items = str_i[1:].split(\":\")\r\n lst_items = [itm.strip() for itm in lst_items]\r\n if len(lst_items)==8:\r\n dic_rule['id']=lst_items[0]\r\n dic_rule['title']=lst_items[1]\r\n dic_rule['mode']=lst_items[2].upper()\r\n dic_rule['layer']=lst_items[3]\r\n dic_rule['condition']=lst_items[4]\r\n dic_rule['action']=lst_items[5]\r\n dic_rule['act_param']=lst_items[6]\r\n dic_rule['comment']=lst_items[7]\r\n dic_rule = sanity_check(dic_rule)\r\n if not dic_rule['valid']:\r\n log.warning('#203 invalid rule > '+str(dic_rule['errors'])+' raw line: '+str_in)\r\n log.debug('parsed good rule: '+str(dic_rule))\r\n else:\r\n dic_rule['errors'].append(\"Rule string does not contain the correct number of elements - Check that you comment do not contain ':'. Ignoring this rule. \\n\\t\"+str_in.strip()+\"\\n\\t\"+str(len(lst_items))+'\\t'+str(lst_items))\r\n log.warning('#202 '+dic_rule['errors'])\r\n dic_rule['valid']=False\r\n else:\r\n dic_rule['errors'].append(\"Rule string must start with #, % or : But I found: \"+str_in[0]+\" in line (\"+str_in+\")\")\r\n log.warning('#201 '+str(dic_rule['errors'][-1:]))\r\n dic_rule['valid']=False\r\n else: # Empty (or only comments) str_i\r\n return {'type':'null', 'valid':True}\r\n return dic_rule",
"def __init__(self, description):\n self.contents = HashMap()\n for gdl in description:\n if not self.contents.containsKey(key):\n self.contents.put(key, ArrayList())\n self.contents.get(key).add(rule)",
"def make_grammar(parse, mrepr='tokens-and-lemmas'):\n prods = []\n for morph in parse:\n pos, lemma = morph.pos, morph.lemma\n if pos.endswith('WB'):\n pos = pos[:-2]\n leaf = morph.pprint(mrepr)\n # tags with '|' split all non-lexical lemmas from lexical ones\n if '|' in pos:\n superpos, pos = pos.split('|')\n if pos.startswith('INFL'):\n nonterminalpos = 'INFL:%s' % lemma\n nonterms = [Nonterminal(nonterminalpos), Nonterminal(pos[-1])]\n elif pos.endswith('INFL'):\n nonterminalpos = 'INFL:%s' % lemma\n nonterms = [Nonterminal(pos[0]), Nonterminal(nonterminalpos)]\n elif pos.startswith('*'): # it's a prefix\n nonterminalpos = 'PRE:%s' % lemma\n nonterms = nonterminals([nonterminalpos]+list(pos[1:]))\n elif pos.endswith(('*', '*WB')): # it's a suffix\n pos = pos[:pos.find('*')]\n nonterminalpos = 'SUF:%s' % lemma\n nonterms = nonterminals((list(pos)+[nonterminalpos]))\n else: # it's a linking element\n nonterminalpos = 'LE:%s' % lemma\n leidx = pos.find('*')\n nonterms = nonterminals(\n list(pos[:leidx])+[nonterminalpos]+list(pos[leidx+1:]))\n if 'x' in pos:\n prods.append(Production(Nonterminal('x'), [leaf]))\n prods.append(Production(Nonterminal(nonterminalpos), [leaf]))\n if nonterms:\n prods.append(Production(Nonterminal(superpos), nonterms))\n else:\n prods.append(Production(Nonterminal(pos), [leaf]))\n return prods",
"def from_string(s):\n r_rule = re.compile(\"^(\\w+): (.*)$\")\n try:\n parent_tag, rules_string = s.split(\" -> \")\n rules = []\n for i in rules_string.split(\",\"):\n optional = i.strip().startswith(\"(\")\n match = r_rule.match(i.strip().strip(\"()\"))\n assert match\n tag, rule = match.groups()\n rules.append(\n {\"optional\": optional, \"tag\": tag, \"rule\": rule})\n return Grammar(parent_tag, rules)\n except (ValueError, AssertionError):\n raise Exception(\"Can not parse.\")",
"def makegrammar (self, tokenizedgrammar, grammartokens) :\n\t\tngp = SequentialParser (tokenizedgrammar, grammartokens) #ngp for naive grammar parser\n\n\t\tngp.parse ()\n\n\t\tself.production_rules = ngp.production_rules\n\t\tself.tokens = ngp.tokens\n\t\tself.labels = ngp.labels\n\t\tself.strnodes = ngp.strnodes\n\n\t\tself.keeper = odict() #ngp.keeper\n\t\tfor k, val in ngp.keeper.items() :\n\t\t\tself.keeper[k] = [v.val if type(v) != str else v for v in val]\n\t\t\tself.keeper[k] = list(set(self.keeper[k]))\n\n\n\t\tself = eliminatedoubles (self)\n\t\t#gramtest = checkproductionrules(self.production_rules) #is fuckedup\n\t\t#return gramtest\n\t\treturn []",
"def __init__(self, rules, lib_name=None):\n if len(rules) < 1:\n raise PegvmException(\"Cannot create a grammar with no rules!\")\n if \"EOI\" in rules:\n raise PegvmException(\"Invalid rule name: 'EOI'\")\n\n self.lib_name = lib_name\n self.lib = imp.load_source('lib', self.lib_name+'.py') if self.lib_name != None else None\n self.top_rule = rules[0]\n self.rules = rules\n self.rule_dict = {}\n for rule in rules:\n rule.set_grammar(self)\n self.rule_dict[rule.name] = rule\n self.rule_dict[\"EOI\"] = EOI([])",
"def parseFile(self, filename):\n grammarFlag = True\n with open(filename, 'r') as f:\n inlines = [line.rstrip() for line in f.readlines()]\n lines = [line for line in inlines if line and line[0] != \"#\"] # eliminate blank line\n for line in lines:\n words = line.split()\n key = words[1]\n rule = [float(words[0])]\n for w in words[2:]:\n if w == \"#\": break # skip comments\n rule.append(w)\n self.grammar[key].append(rule)",
"def _format_description(parser):\n for line in statemachine.string2lines(\n parser.description, tab_width=4, convert_whitespace=True):\n yield line",
"def convert_to_lf(input: str, output_1: str, output_2: str = None):\n\n commands = {'predicates': ['jump', 'run', 'look', 'turn', 'walk'],\n 'directions': ['right', 'left'],\n 'manners': ['around', 'opposite'],\n 'connectives': ['and', 'after'],\n 'repetitions': ['twice', 'thrice']}\n\n examples_parsed = []\n\n all_possible_tokens = []\n for tokens in commands.values():\n all_possible_tokens += tokens\n with open(input, 'r') as f:\n for row in f:\n connective = None\n question = row.split('OUT:')[0].replace('IN:', '').strip()\n denotation = row.split('OUT:')[1].strip()\n parts = [question]\n for token in parts[0].split(' '):\n assert token in all_possible_tokens\n for connective_candidate in commands['connectives']:\n parts = parts[0].split(connective_candidate)\n if len(parts) > 1:\n connective = connective_candidate\n break\n inner_programs = []\n for i, part in enumerate(parts):\n inner_programs.append(get_inner_program(part.split(' '), commands))\n if not connective:\n assert len(inner_programs) == 1\n program = inner_programs[0]\n else:\n assert len(inner_programs) == 2\n program = '{} ( {} , {} )'.format(PREFIX+connective+SUFFIX, inner_programs[0],\n inner_programs[1])\n program = program.replace(' ', ' ')\n examples_parsed.append({'question': question, 'program': program, 'answer': denotation})\n if output_2 is not None: # take 20% for dev\n random.shuffle(examples_parsed)\n train_size = math.ceil(0.8 * len(examples_parsed))\n with open(output_1, 'w') as f_1:\n with open(output_2, 'w') as f_2:\n for i, ex in enumerate(examples_parsed):\n if i < train_size:\n json.dump(ex, f_1)\n f_1.write('\\n')\n else:\n json.dump(ex, f_2)\n f_2.write('\\n')\n else:\n with open(output_1, 'w') as f_1:\n for i, ex in enumerate(examples_parsed):\n json.dump(ex, f_1)\n f_1.write('\\n')"
]
| [
"0.6861162",
"0.64110154",
"0.6409674",
"0.6297929",
"0.61312383",
"0.5980318",
"0.5977484",
"0.5864739",
"0.5814279",
"0.5779114",
"0.57233554",
"0.5705856",
"0.57030374",
"0.5701753",
"0.5641272",
"0.5619116",
"0.56173664",
"0.55892825",
"0.5587026",
"0.55722344",
"0.55215514",
"0.5520074",
"0.5475177",
"0.54745024",
"0.5454612",
"0.54462045",
"0.54381746",
"0.5401832",
"0.53758675",
"0.5361818"
]
| 0.71617424 | 0 |
Set the value of a variable. The variable will be defined in the runtime context of the preprocessor minilanguage. The value is either the result of safely evaluating the value token with `ast.literal_eval`, or the token itself if it cannot be evaluated. | def set(self, identifier, value_token, *, preprocessor=None):
try:
value = ast.literal_eval(value_token)
except (SyntaxError, ValueError):
value = value_token
setattr(self, identifier, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_value(v):\r\n try:\r\n type (eval(v))\r\n except NameError:\r\n return \"\"\r\n return eval(v)",
"def set_value(v):\r\n try:\r\n type (eval(v))\r\n except NameError:\r\n return \"\"\r\n return eval(v)",
"def set_var(parser,token):\n parts =token.split_contents()\n if len(parts) < 4:\n raise template.TemplateSyntaxError(\"'set' tag must be of the form:{% set <var_name>=\n <var_value>%}\")\n return SetVarNode(parts[1],parts[3])",
"def set(self, var, value):\n cmd = '{0}={1};'.format(var, value)\n out = self.eval(cmd)\n if out.find(\"error\") != -1:\n raise TypeError(\"Error executing code in Matlab\\nCODE:\\n\\t{0}\\nMatlab ERROR:\\n\\t{1}\".format(cmd, out))",
"def setvar(parser, token):\n try:\n # split_contents() knows not to split quoted strings.\n tag_name, varname = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError, \"%r tag requires a single argument for variable name\" % token.contents.split()[0]\n\n nodelist = parser.parse(('endsetvar',))\n parser.delete_first_token()\n return SetVariable(varname, nodelist)",
"def set_variable(self, request, context):\n response = SetVariableResponse()\n value = decode(request.value)\n self._delegator.set_variable(request.component, request.variable, value)\n return response",
"def assign_variable(self, name, value):\n return self.set_variable(name, value)",
"def set_variable(self, name, value):\n self.send_to_backend('set', name=name, value=value)\n self.refresh_variable(name)",
"def set(self, value):\n assert (not is_sequence_like(value)) and (not is_dict_like(value)), 'the value must be an atomic primitive'\n token_index = self._value_token_index()\n self._tokens[token_index] = py2toml.create_primitive_token(value)",
"def set_variable(self, name, value):\n if name not in self._variables:\n logging.warning(\"Pipeline variable '%s' was not initialized\", name)\n self._variables[name].update({'value': value})\n return self",
"def set_assignment(self, var, value):\n self.variable_to_value[var] = value",
"def set_variable_value(self, var, value):\n \n namespace = self.first_namespace_that_binds_the_var(var)\n if namespace is None:\n raise LookupError(f'cannot set the variable \"{var}\" to the value {value}: '\n 'the variable is not bound in the current environment')\n namespace[var] = value",
"def set_variable(self, name, value):\n if self._scalamagic and (not name.startswith(\"_i\")):\n self.scala_interpreter.bind(name, value)\n else:\n self.log.debug('Not setting variable %s', name)",
"def assign_variable(executor, variable, value):\n variable = variable.replace(\" \", \"\")\n # TODO Should move parsing of this to ParsedStatementLet.\n # TODO Need to handle N-dimensional array element assignment.\n i = variable.find(\"(\")\n if i != -1:\n # Array reference\n j = variable.find(\")\", i+1)\n if j == -1:\n raise BasicSyntaxError(F\"Missing ) in in array assignment to {variable}\")\n if i+1 == j:\n raise BasicSyntaxError(F\"Missing array subscript in assignment to {variable}\")\n\n subscripts = variable[i+1:j].split(\",\")\n variable = variable[:i]\n is_valid_identifier(variable)\n subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts]\n executor.put_symbol_element(variable, value, subscripts)\n else:\n is_valid_identifier(variable)\n executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None)",
"def replace_variable(self, lexeme, variable_value):\r\n\r\n lexeme.replace(\r\n self.variable_lexeme_replace_type,\r\n variable_value\r\n )",
"def set(self, key, value):\n if (\n key in self.variables and\n type(value).__name__ == self.variables[key]['type']\n ):\n self.variables[key]['value'] = value\n else:\n raise ValueError(\"Bad key or wrong variable type\")",
"def f_setvar(self, name, expr):\r\n self.locals_ptr[name] = self.eval(expr, self.locals_ptr)\r\n return \"\"",
"def define_variable(self, var, value):\n self.namespace[var] = value",
"def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)",
"def token(self, value):\r\n self._token = value",
"def assign(self, var, value):\n\t\tself._root = self._insert(self._root, var, value)",
"def setVariable(self, *args):\n return _libsbml.Rule_setVariable(self, *args)",
"def set_variable(self, name, value):\n # Note that \":=\" is used so that we can control the behavior for\n # both Makefile and CMake variables similarly.\n self.write_line(name + \" := \" + value)",
"def set(self, key, value):\n if value is not None:\n self.vars[key] = value",
"def set(self, key, value):\n self.data[key] = value\n logger.debug('Setting value \"%s\" for variable \"%s\"', value, key)",
"def define_var(self, var, value):\n self.binding[var] = value",
"def use_variable(self, v):\n self.resolve(v)",
"def var(parser, token):\n # This version uses a regular expression to parse tag contents.\n try:\n # Splitting by None == splitting by spaces.\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError, \"%r needs arguments\" % token.contents.split()[0]\n m = re.search(r'(\\S+) is (.+)', arg)\n if not m:\n raise template.TemplateSyntaxError, \"%r tag had invalid arguments\" % tag_name\n var_name, var_value = m.groups()\n return VarNode(var_name, var_value)",
"def set_value(self, value: ScalarType) -> None:\n if isinstance(value, bool):\n value_str = 'true' if value else 'false'\n else:\n value_str = str(value)\n start_mark = self.yaml_node.start_mark\n end_mark = self.yaml_node.end_mark\n # If we're of a class type, then we want to keep that tag so that the\n # correct Constructor is called. If we're a built-in type, set the tag\n # to the appropriate YAML tag.\n tag = self.yaml_node.tag\n if tag.startswith('tag:yaml.org,2002:'):\n tag = scalar_type_to_tag[type(value)]\n new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)\n self.yaml_node = new_node",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)"
]
| [
"0.66423774",
"0.66423774",
"0.6500075",
"0.6474883",
"0.6296716",
"0.62630725",
"0.6242456",
"0.60815567",
"0.60435575",
"0.5987802",
"0.59656864",
"0.59643215",
"0.5951412",
"0.5936918",
"0.58330405",
"0.57996327",
"0.5723935",
"0.57217634",
"0.56929225",
"0.5661026",
"0.5657384",
"0.56193364",
"0.5579898",
"0.5577633",
"0.55509394",
"0.55412924",
"0.5536327",
"0.5474809",
"0.5450019",
"0.5445114"
]
| 0.76264775 | 0 |
r""" Interpret additional tokens if a condition is true. Note This keyword can be written as ``if`` (instead of ``if_``) in the preprocessor directives. | def if_(self, condition_token, keyword_token, *tokens, preprocessor=None):
condition = self._get_token_value(condition_token)
if not isinstance(condition, bool):
raise DoxhooksTypeError(condition, condition_token, "bool")
if condition:
self.interpret(keyword_token, *tokens, preprocessor=preprocessor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)",
"def if_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"if\")\n expr = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n\n return IfNode(expr)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid if directive.\")",
"def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)",
"def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)",
"def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()",
"def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)",
"def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()",
"def conditional(self) -> global___Statement.Conditional:",
"def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)",
"def newif(line):\n if not line.startswith(\"\\\\newif\"):\n return False\n pieces = line.split(\"\\\\\")\n if len(pieces) != 4 or pieces[0] != \"\" or pieces[1] != \"newif\":\n print(\"%Wrong number of pieces: \"+line)\n return False\n if not pieces[2].startswith(\"if\"):\n print(\"%Missing if: \"+line)\n return False\n name = pieces[2][2:]\n if not pieces[3].startswith(name):\n print(\"%Name missing: \"+line)\n return False\n value = pieces[3][len(name):]\n if not value in truth:\n print(\"Misunderstood truth value: \"+line)\n return False\n conditionals[\"\\\\if\"+name] = truth[value]\n return True",
"def condition(self) -> global___Expression:",
"def condition(self) -> global___Expression:",
"def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)",
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def convert_if(self, condition, if_kw):\n\n # Run super definition\n condition = super().convert_if(condition)\n\n # Create if template\n if_template = \"{if_kw} {cond}:\" if condition else \"{if_kw}:\"\n\n # Convert if keyword from standard to python\n if if_kw == \"else if\":\n if_kw = \"elif\"\n\n # Replace logical operators\n condition = self.replace_logical_ops(condition, direction=\"from\")\n\n # Return converted if statement\n return [if_template.format(if_kw=if_kw, cond=condition)], []",
"def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)",
"def visitIfElse(self, ctx):\n # type: (RelayParser.IfElseContext) -> expr.If\n cond = self.visit(ctx.expr())\n\n self.enter_var_scope()\n true_branch = self.visit(ctx.body(0))\n self.exit_var_scope()\n\n self.enter_var_scope()\n false_branch = self.visit(ctx.body(1))\n self.exit_var_scope()\n\n return expr.If(cond, true_branch, false_branch)",
"def parse_if_cmd(self, line):\n line = re.sub(\"^if *\", \"\", line)\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n # Check all variables have been declared\n any_vars = [i for i in re.findall(IN_STR_VAR_REGEX, statement)]\n # Get the variables declared\n _vars = []\n for var in any_vars:\n _Var = getattr(self, var.strip('$'))\n if type(_Var) == inp_types.Variable: _vars.append(_Var.data)\n else: _vars.append(_Var)\n\n for var_name, var_val in zip(any_vars, _vars):\n statement = statement.replace(var_name, str(var_val))\n\n # Evaluate the if statement\n try:\n var_container = {}\n exec(f\"val = {statement}\", var_container)\n val = var_container['val']\n except Exception as e:\n self.print_error(\"Couldn't parse the if statement\\n\\nError:\"\n + str(e))\n\n end_line = self.get_end_brace()\n\n self.line_num += 1\n if val is False:\n self.line_num = end_line",
"def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"",
"def __parse_conditional(self, buffer):\n\t\tret = []\n\t\t\n\t\twhile True:\n\t\t\tcondition = Condition(self.__read_until(buffer, \"[\"))\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tvalue = self.__read_block(buffer, startchr=\"[\", endchr=\"]\")\n\t\t\tvalue = SpellString(value).format(self.obj, proxy=self.proxy)\n\t\t\tret.append((condition, value))\n\t\t\tif condition.is_else():\n\t\t\t\tbreak\n\t\t\n\t\treturn ret",
"def _if_node(self):\n # `cond` returns a tensor that contains boolean values. We add a `min`\n # operator to checks if there is any false value. If so, this condition\n # doesn't not hold.\n cond = tvm.relay.op.min(self.cond)\n return tvm.relay.If(cond, self.true_branch, self.false_branch)",
"def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end",
"def compile_if(self):\n\n\t\txml = '<ifStatement>\\n' + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\tself.outfile.write('</statements>\\n' + self.tokenizer.symbol())\n\n\t\tif self.tokenizer.get_token() == 'else':\n\t\t\tself.compile_else()\n\n\t\tself.outfile.write('</ifStatement>\\n')",
"def conditional_value(self) -> global___Expression.ConditionalOperator:",
"def embedCondition(self, c, string):\n ifdef = endif = \"\";\n condition = str(c.dependsOn())\n if condition == \"<empty>\": return string\n else:\n return \"#if %s\\n%s#endif // %s\\n\" % (condition, string, condition)",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def ParseCondition(self, condition):\n cl = condition.lower()\n if cl in OS_CONDITIONS:\n self.os_conditions.append(cl)\n elif cl in BROWSER_TYPE_CONDITIONS:\n self.browser_conditions.append(cl)\n elif cl in ASAN_CONDITIONS:\n self.asan_conditions.append(cl)\n else:\n raise ValueError('Unknown expectation condition: \"%s\"' % cl)"
]
| [
"0.6379466",
"0.6259943",
"0.61396646",
"0.61340076",
"0.60854274",
"0.6037291",
"0.60143036",
"0.60072577",
"0.6006162",
"0.5856047",
"0.58316565",
"0.58316565",
"0.57916826",
"0.577586",
"0.5679327",
"0.5647371",
"0.5636845",
"0.56265545",
"0.56044257",
"0.5583414",
"0.55766565",
"0.5573644",
"0.55224556",
"0.5518615",
"0.54879606",
"0.54862607",
"0.54862607",
"0.54862607",
"0.54862607",
"0.5439408"
]
| 0.729511 | 0 |
Modify a context to allow lowercase boolean representations. | def lowercase_booleans(context_class):
context_class._convert_bool_to_str = _convert_to_lowercase_str
return context_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def startcase_booleans(context_class):\n context_class._convert_bool_to_str = _convert_to_str\n return context_class",
"def _bool_encode(self, d):\n for k, v in d.items():\n if isinstance(v, bool):\n d[k] = str(v).lower()\n \n return d",
"def set_case_sensitive(self, v):\n self._case_sensitive = bool(v)",
"def _set_bool(name, value, context):\n if name in os.environ:\n envval = os.environ.get(name).lower()\n if envval in [\"1\", \"true\", \"y\", \"yes\"]:\n context[name] = True\n elif envval in [\"0\", \"false\", \"n\", \"no\"]:\n context[name] = False\n else:\n raise ValueError(f\"{name} is a boolean, cannot match '{os.environ[name]}'\")\n\n _set_default(name, value, context)",
"def booleanize(text):\n ltext = text.lower()\n if ltext == 'true':\n booleanized = True\n elif ltext == 'false':\n booleanized = False\n else:\n raise ValueError('A monk asked: Is \"{}\" true or false.'.format(text))\n return booleanized",
"def str2bool(self, v):\n \tprint('Entering conversion function')\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")",
"def caseSensitive(self):\n return self.__caseSensitive",
"def _str2bool(self, v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")",
"def isLowercase(self, config):\n\t\treturn self.LOWERCASE & config",
"def boolstr(value: bool) -> str:\n return str(value).lower()",
"def boolify(x):\r\n if isinstance(x, str):\r\n x = x.lower()\r\n try:\r\n return _BOOLIFY_DICT[x]\r\n except KeyError as e:\r\n raise ValueError('Can\\'t boolify value: %r' % x) from None",
"def case_sensitive(self):\n\n return True",
"def cast(self, value: Any) -> Any:\n if value is None:\n return False\n if isinstance(value, bool):\n return value\n strvalue = str(value).lower()\n if strvalue in ['1', 't', 'true']:\n return True\n elif strvalue in ['', '0', 'f', 'false']:\n return False\n raise err.InvalidArgumentError(\"not a Boolean '{}'\".format(value))",
"def Bool(arg):\n return arg.lower() in ('y', 'true', 't', '1')",
"def str2bool(self, val):\n return val.lower() in ('true','yes','t',1)",
"def get_string(self):\n boolean_value_string = str(bool(self._boolean_value)).lower()\n return BOOLEAN_TEMPLATE.substitute(\n boolean_name = str(self._boolean_name),\n boolean_value = boolean_value_string)",
"def str2bool(v) -> bool:\n\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def setTrue(self):\n self.cond = CT.TRUE\n self.left = self.right = None\n self.z3 = BoolSort().cast(True)\n self.cleaned = self.Z3Simplified = self.customSimplified = self.checked = True\n self.customSimplifiedValue = CE.TRUE",
"def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def _text2bool(val):\n lval = val.lower()\n if lval in __true_strings: return True\n if lval in __false_strings: return False\n raise ValueError(\"Invalid value for boolean option: %s\" % val)",
"def clean_value(self, value):\n if isinstance(value, str):\n return value.lower() in ('1', 'true')\n\n return value in (1, True)",
"def setCaseSensitive(self, state):\n self.__caseSensitive = state",
"def str2bool(v):\n if v.lower() == 'true':\n return True\n elif v.lower() == 'false':\n return False\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def str2bool(v):\n\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise ArgumentTypeError('Boolean value expected.')",
"def preprocess_bools(args):\n for arg in args:\n if type(args[arg]) == bool:\n args[arg] = int(args[arg])\n return args",
"def variable_boolean(self, value):\n\n text_value = to_text(value)\n text_value = text_value.lower()\n\n if text_value == 'true' or text_value == 'false':\n return True\n\n return False",
"def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"",
"def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
]
| [
"0.7548539",
"0.63619083",
"0.6342019",
"0.6148815",
"0.61256915",
"0.5853296",
"0.5798188",
"0.57786196",
"0.5686606",
"0.5661465",
"0.5574734",
"0.5535997",
"0.5514914",
"0.55106956",
"0.5468126",
"0.5459818",
"0.54399335",
"0.5392206",
"0.5390457",
"0.5377692",
"0.5372112",
"0.5345961",
"0.53293777",
"0.53138775",
"0.53113675",
"0.5305645",
"0.52966076",
"0.5293462",
"0.52810895",
"0.52810895"
]
| 0.83106977 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.