query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test case for get_file_object | def test_get_file_object(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Open(self, file_object):",
"def testGetFileObject(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject()\n self.assertIsNotNone(file_object)\n\n self.assertEqual(file_object.get_size(), 22)\n\n file_object = file_entry.GetFileObject(data_stream_name='bogus')\n self.assertIsNone(file_object)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject()\n self.assertIsNone(file_object)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n file_object = file_entry.GetFileObject(data_stream_name='rsrc')\n self.assertIsNotNone(file_object)\n\n self.assertEqual(file_object.get_size(), 17)",
"def test_get_file_content(self):\n pass",
"def test_get_object_to_file(self):\n err = None\n try:\n response = self.bos.put_object_from_string(self.BUCKET, self.KEY, \"This is a string.\")\n except BceServerError as e:\n err = e\n finally:\n self.assertIsNone(err)\n self.check_headers(response)\n\n err = None\n try:\n response = self.bos.get_object_to_file(self.BUCKET, self.KEY, b\"Filename\")\n except BceServerError as e:\n err = e\n finally:\n self.assertIsNone(err)\n os.remove(\"Filename\")\n\n self.check_headers(response)\n self.assertEqual(response.metadata.etag, '13562b471182311b6eea8d241103e8f0')",
"def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile",
"def test_file_field():",
"def get_file_object(self):\n try:\n # FieldFile.open() and File.open() don't return file objects, so\n # accessing it directly\n return self.datafile.file.file # FileStoreItem.FieldFile.File.file\n except ValueError as exc:\n logger.error(\"Error opening %s: %s\", self.datafile, exc)\n return None",
"def test_kyc_get_file(self):\n pass",
"def test_get_object_link_file(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n url = reverse(\n 'filesfolders:file_serve',\n kwargs={'file': self.file.sodar_uuid, 'file_name': self.file.name},\n )\n ret = plugin.get_object_link('File', self.file.sodar_uuid)\n self.assertEqual(ret['url'], url)\n self.assertEqual(ret['label'], self.file.name)\n self.assertEqual(ret['blank'], True)",
"def test_get_file_accessors(self):\n pass",
"def read(self, fileobj):\n raise NotImplementedError",
"def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r",
"def test_get_file(self):\n django_file = None\n \n with open(TEST_AVATAR_PATH, 'rb') as avatar:\n self.user.avatar_tmp = File(avatar)\n self.user.save()\n\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n archive.add_model_file(self.user.avatar_tmp)\n\n django_file = archive.get_file()\n archive_path = archive.file_path\n\n self.assertIsNotNone(archive_path)\n self.assertEqual(django_file.name, archive.file_path)\n self.assertFalse(django_file.closed)\n\n self.assertIsNone(archive.file)\n self.assertIsNone(archive.file_path)\n self.assertTrue(django_file.closed)",
"def testGetFile(self):\n try:\n remoteLocator = self.__pathPdbxDictionaryFile\n fn = self.__fileU.getFileName(remoteLocator)\n # _, fn = os.path.split(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n ok = self.__fileU.remove(lPath)\n self.assertTrue(ok)\n dPath = os.path.join(self.__workPath, \"tdir\")\n ok = self.__fileU.mkdir(dPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(dPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(\";lakdjf\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed",
"def get_file_object(file_name, path):\n os.chdir(path)\n info = os.stat(file_name)\n\n time_format = \"%a %b %d %H:%M:%S %Y\"\n file_mod_date = time.ctime(info.st_mtime)\n file_mod_date = datetime.strptime(file_mod_date, time_format)\n\n file_size = str(info.st_size)\n\n file_type = \"folder\" if os.path.isdir(f\"{path}/{file_name}\") else \"file\"\n\n name, path, size, ftype, mod_date = file_name, path, file_size, file_type, file_mod_date\n\n file = File(name, path, size, ftype, mod_date)\n\n return file",
"def read_file(string_object):\n try:\n return open(string_object,\"r\")\n except FileNotFoundError:\n return None",
"def get_file_obj(self, file):\n repository = \"{}/{}\".format(self.org, self.repo)\n ghrepo = self.github.get_repo(repository)\n obj = ghrepo.get_contents(file)\n return obj",
"def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now",
"def test_resource_user_resource_get_avatar_file_get(self):\n pass",
"def test_open_file_entity(self):\n virtpath = self.path_translator.split_virtual_path(\n \"/test/search1/rien_12345\")\n self.assertTrue(self.path_translator.is_file_entity(virtpath))\n ftp_file = self.path_translator.open_cw_file(virtpath)\n expected_file_content = \"nothing in 12345\"\n self.assertEqual(expected_file_content,\n ftp_file.readChunk(0, -1))\n self.assertEqual({\n \"size\": len(expected_file_content),\n \"uid\": 0,\n \"gid\": 0,\n \"mtime\": 0,\n \"atime\": 0,\n \"permissions\": self.path_translator.file_perm},\n ftp_file.getAttrs())\n self.assertTrue(hasattr(ftp_file, \"close\"))\n ftp_file.close()",
"def test_s3_gets_object_content(self):\n mock_s3 = Mock()\n mock_s3_object = Mock()\n s3_response = {'Body': mock_s3_object}\n mock_s3_object.read.return_value = \"file content\"\n mock_s3.get_object.return_value = s3_response\n\n s3_bucket = S3Bucket('bucket_name', s3_client=mock_s3)\n assert s3_bucket.get_content('/file.text') == \\\n 'file content'",
"def test_other_types(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n self.assertNotEqual(1, qs[0])\n self.assertNotEqual(NotImplementedError, qs[0])\n\n class MockFileObject:\n path = qs[0].path\n\n self.assertNotEqual(MockFileObject(), qs[0])",
"def test_put_object_from_file(self):\n self.get_file(20)\n response = self.bos.put_object_from_file(self.BUCKET, self.KEY, self.FILENAME)\n self.check_headers(response, [\"etag\"])",
"def get_file(object_name: str, **kwargs) -> HTTPResponse:\n data = client.get_object(DATASETS_BUCKET, object_name, **kwargs)\n return data",
"def fileobj(path_or_file, mode='r'):\n if isinstance(path_or_file, basestring):\n try:\n return open(path_or_file, mode)\n except:\n return closing(StringIO())\n else:\n return closing(path_or_file)",
"def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))",
"def isFileObject(fileObj):\n if sys.version_info[0] == 2:\n return isinstance(fileObj, file)\n else:\n # for python 3:\n # has read() method for:\n # io.IOBase\n # io.BytesIO\n # io.StringIO\n # io.RawIOBase\n return hasattr(fileObj, 'read')",
"def test_fileobj(self, ext, dtype):\n sample_rate = 16000\n num_frames = 3 * sample_rate\n num_channels = 2\n with self.assertRaisesRegex(ValueError, \"SoX backend does not support reading\"):\n self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames)",
"def fileOpen(filePath,fileType):\n if os.path.exists(filePath) and os.path.getsize(filePath) > 0:\n print \"Retrieving file:%s\" %filePath\n if fileType.lower() == \"xl\":\n fileObject = xlrd.open_workbook(filePath)\n else:\n with open(filePath, 'r') as FH:\n if fileType.lower() == \"json\":\n fileObject = json.load(FH) \n elif fileType.lower() == \"txt\":\n fileObject = FH.readlines()\n elif fileType.lower() == \"csv\":\n file_data = csv.reader(FH)\n fileObject = output = list(file_data)\n elif fileType.lower() == \"j2\":\n fileObject = Template(FH.read())\n else:\n print \"Invalid fileType\"\n fileObject = False\n return fileObject\n else:\n print \"File does not exist or is empty: %s\" %filePath\n return False"
] | [
"0.7580879",
"0.7461194",
"0.7427491",
"0.7276669",
"0.7233623",
"0.7066448",
"0.7036892",
"0.7000198",
"0.6970483",
"0.6954348",
"0.67913127",
"0.677296",
"0.6692556",
"0.66749793",
"0.66078466",
"0.6588215",
"0.65780336",
"0.65684533",
"0.6526653",
"0.646957",
"0.6447769",
"0.643879",
"0.64252496",
"0.64070886",
"0.6373726",
"0.63656026",
"0.63518775",
"0.63144064",
"0.6313706",
"0.6286359"
] | 0.92290837 | 0 |
Test case for get_host | def test_get_host(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHost():",
"def getHost():",
"def test_get_host_access(self):\n pass",
"def getHostInfo():",
"def test_host(self):\n url = create_url(host=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")",
"def checkGetHostByName(self, result):\n self.assertEquals(result, '127.0.0.1')",
"def getRemoteHost():",
"def test_getHostByName(self):\n d = client.getHostByName(self.ghbntest)\n d.addCallback(self.checkGetHostByName)\n return d",
"def test_address_host(self):\n url = create_url(address=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")",
"def get_host(name):\n raise NotImplementedError('derived class should overload me')",
"def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)",
"def get_host(self):\r\n return self.host",
"def GetServerHost():\n return GetHostName(True)",
"def test_host_path(self):\n url = create_url(\n host=\"www.example.com\", path=\"path/to/resource\", scheme_no_ssl=\"http\"\n )\n self.assertEqual(url, \"http://www.example.com/path/to/resource\")",
"def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())",
"def test_safe_get_host(self):\r\n settings.SITE_NAME = 'siteName.com'\r\n factory = RequestFactory()\r\n request = factory.request()\r\n request.META['HTTP_HOST'] = 'www.userProvidedHost.com'\r\n # If ALLOWED_HOSTS is not set properly, safe_get_host should return SITE_NAME\r\n settings.ALLOWED_HOSTS = None\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n settings.ALLOWED_HOSTS = [\"*\"]\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n settings.ALLOWED_HOSTS = [\"foo.com\", \"*\"]\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n\r\n # If ALLOWED_HOSTS is set properly, and the host is valid, we just return the user-provided host\r\n settings.ALLOWED_HOSTS = [request.META['HTTP_HOST']]\r\n self.assertEqual(safe_get_host(request), request.META['HTTP_HOST'])\r\n\r\n # If ALLOWED_HOSTS is set properly but the host is invalid, we should get a SuspiciousOperation\r\n settings.ALLOWED_HOSTS = [\"the_valid_website.com\"]\r\n with self.assertRaises(SuspiciousOperation):\r\n safe_get_host(request)",
"def test_hostname_value(self):\n \n hostname = get_hostname()\n \n # Check to make sure the hostname is \"tjw-imac.grid.labs\"\n self.assertEqual(hostname, 'tjw-imac.grid.labs')",
"def test(cls, hostname):\n pass",
"def test_get_current_request_hostname(self):\n assert get_current_request_hostname() is None",
"def test_host_port(self):\n url = create_url(host=\"www.example.com\", port=8000, scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com:8000\")",
"def get_host(self):\n return self.host",
"def test_address_host_port(self):\n url = create_url(address=\"www.example.com:8000\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com:8000\")",
"def test_download_host(self):\n pass",
"def getHost(self): #$NON-NLS-1$\r",
"def host(self) :\n\t\ttry :\n\t\t\treturn self._host\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_get_internal_host(matrix):\n matrix.charm_config[\"prefer-internal-ip\"] = True\n matrix.charm_config[\"prefer-internal-host\"] = True\n assert matrix.get_internal_host() == \"10.10.10.10\"\n matrix.charm_config[\"prefer-internal-ip\"] = False\n assert matrix.get_internal_host() == \"mock.fqdn\"",
"def test_host_header_no_port_in_uri(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)",
"def is_host(self):\n return self.host",
"def getHost(self):\n\n\t\treturn HOST",
"def hostname_get():\n try:\n return json_response.success({'hostname': hostname.determine()})\n except hostname.Error as e:\n return json_response.error(str(e)), 200"
] | [
"0.83691925",
"0.83691925",
"0.8170636",
"0.7675683",
"0.7643619",
"0.7493968",
"0.7489131",
"0.72870094",
"0.71952164",
"0.71942747",
"0.71517736",
"0.71448296",
"0.7126831",
"0.71263605",
"0.70717627",
"0.70696527",
"0.70255715",
"0.69447917",
"0.69007003",
"0.688928",
"0.6860277",
"0.685945",
"0.6822105",
"0.68219006",
"0.677096",
"0.6762122",
"0.674532",
"0.6720554",
"0.66919315",
"0.66789895"
] | 0.9202221 | 0 |
Test case for get_host_access | def test_get_host_access(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host(self):\n pass",
"def getHostInfo():",
"def access():",
"def getHost():",
"def getHost():",
"def test_perform_host_action(self):\n pass",
"def test_safe_get_host(self):\r\n settings.SITE_NAME = 'siteName.com'\r\n factory = RequestFactory()\r\n request = factory.request()\r\n request.META['HTTP_HOST'] = 'www.userProvidedHost.com'\r\n # If ALLOWED_HOSTS is not set properly, safe_get_host should return SITE_NAME\r\n settings.ALLOWED_HOSTS = None\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n settings.ALLOWED_HOSTS = [\"*\"]\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n settings.ALLOWED_HOSTS = [\"foo.com\", \"*\"]\r\n self.assertEqual(safe_get_host(request), \"siteName.com\")\r\n\r\n # If ALLOWED_HOSTS is set properly, and the host is valid, we just return the user-provided host\r\n settings.ALLOWED_HOSTS = [request.META['HTTP_HOST']]\r\n self.assertEqual(safe_get_host(request), request.META['HTTP_HOST'])\r\n\r\n # If ALLOWED_HOSTS is set properly but the host is invalid, we should get a SuspiciousOperation\r\n settings.ALLOWED_HOSTS = [\"the_valid_website.com\"]\r\n with self.assertRaises(SuspiciousOperation):\r\n safe_get_host(request)",
"def checkGetHostByName(self, result):\n self.assertEquals(result, '127.0.0.1')",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"def getRemoteHost():",
"def test_default_host_http_required(self):\n client = self.base_scenario(\n frang_config=\"\", requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"]\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def test_host_header_as_ip(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def getHost(theurl,host):\n\tglobal username,password\n\tif (username == None):\n\t\tusername = raw_input(\"Enter username : \")\n\tif (password == None):\n\t\tpassword = getpass.getpass(\"Enter your password for user [\"+username+\"] :\")\n\n\t\n\traw_params = {'cata_query' : 'list host with host_fqdn =\"'+host+'%\" tag h join via has_netif to netif,netvif, tag n result order n.if_ipv4addr print h.host_fqdn,n.if_name,n.if_ipv4addr','sidenav' : 'query','tabnav' : 'main', 'RUN' : 'run'}\n\tparams = urllib.urlencode(raw_params)\n\treq = urllib2.Request(theurl,params)\n\n\tif options.verbose:\n\t\tprint \"Using url : \" +theurl\n\t\tprint 'Using query params : ' + raw_params['cata_query']\n\t\tprint \"#################################################\"\n\ttry:\n\t handle = urllib2.urlopen(req)\n\texcept IOError, e:\n\t # here we *want* to fail\n\t pass\n\telse:\n\t # If we don't fail then the page isn't protected\n\t print \"This page isn't protected by authentication.\"\n\t sys.exit(1)\n\n\tif not hasattr(e, 'code') or e.code != 401:\n\t # we got an error - but not a 401 error\n\t print \"This page isn't protected by authentication.\"\n\t print 'But we failed for another reason.'\n\t sys.exit(1)\n\n\tauthline = e.headers['www-authenticate']\n\t# this gets the www-authenticate line from the headers\n\t# which has the authentication scheme and realm in it\n\t\n\n\tauthobj = re.compile(\n\t r'''(?:\\s*www-authenticate\\s*:)?\\s*(\\w*)\\s+realm=['\"]([^'\"]+)['\"]''',\n\t re.IGNORECASE)\n\t# this regular expression is used to extract scheme and realm\n\tmatchobj = authobj.match(authline)\n\n\tif not matchobj:\n\t # if the authline isn't matched by the regular expression\n\t # then something is wrong\n\t print 'The authentication header is badly formed.'\n\t print authline\n \t sys.exit(1)\n\n\tscheme = matchobj.group(1)\n\trealm = matchobj.group(2)\n\t# here we've extracted the scheme\n\t# and the realm from the header\n\tif scheme.lower() != 'basic':\n\t print 'This example only works with BASIC authentication.'\n \t sys.exit(1)\n\n\tbase64string = base64.encodestring(\n '%s:%s' % (username, password))[:-1]\n\tauthheader = \"Basic %s\" % base64string\n\treq.add_header(\"Authorization\", authheader)\n\ttry:\n\t handle = urllib2.urlopen(req)\n\texcept IOError, e:\n\t # here we shouldn't fail if the username/password is right\n\t print \"It looks like the username or password is wrong.\"\n\t sys.exit(1)\n\tthepage = handle.read()\n\ttree = html.fromstring(thepage)\n\ttr_nodes = tree.xpath('//table[@class=\"olist\"]/tr')\n\ttd_content = [[td.text for td in tr.xpath('td')] for tr in tr_nodes[0:]]\n\t\n\tif DEBUG:\n\t\tprint td_content\n\n\tfor content in sorted(td_content,key=lambda ip:ip[2]):\n\t\t_host_fqdn = content[0]\n\t\t_host = _host_fqdn.split('.',1)[0]\n\t\t_domain = _host_fqdn.split('.',1)[1]\n\t\t_eth = content[1]\n\t\t_ip = content[2]\n\t\treturn _host_fqdn",
"def request_access(self):\n pass",
"def is_host_accessible(self):\n return self._host_array is not None",
"def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True",
"def test_verify_ssh_access_with_root_works(driver):",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)",
"def test_host_header_mismath_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def usage(self, host):",
"def test_host_header_as_ip6(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: [20:11:abb::1]:80\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def test_tenant_external_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.domain.domain)\n self.assertEqual(response.status_code, 200)",
"def test_get_access_information(self):\n\n self.assertEqual(account.get_access_information('123'), account.INVALID_CODE_ERR_MSG)",
"def test_hosts_file(host):\n hosts_file = host.file('/etc/hosts')\n assert hosts_file.exists\n assert hosts_file.user == 'root'\n assert hosts_file.group == 'root'",
"def get_server_access(server_name):\n qibuild_cfg = qibuild.config.QiBuildConfig()\n qibuild_cfg.read(create_if_missing=True)\n access = qibuild_cfg.get_server_access(server_name)\n return access",
"def test_download_host(self):\n pass",
"def test_getHostByName(self):\n d = client.getHostByName(self.ghbntest)\n d.addCallback(self.checkGetHostByName)\n return d",
"def test_get_public_guest_access(self):\n self.project.public_guest_access = True\n self.project.save()\n user_new = self.make_user('user_new')\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )",
"def is_accessible(self):\n if self._is_accessible:\n return self._is_accessible\n\n check_host_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no ls > /dev/null'\n rc = subprocess.call(check_host_cmd.format(ip=self._ip), shell=True)\n logger.info(\"Checking if {} is accessible\".format(self._ip))\n\n\n\n if rc != 0:\n return False\n\n self._is_accessible = True\n return self._is_accessible"
] | [
"0.73861194",
"0.6704132",
"0.6600314",
"0.6589656",
"0.6589656",
"0.65601325",
"0.6264574",
"0.6159319",
"0.6117409",
"0.6116472",
"0.6080605",
"0.605468",
"0.60324967",
"0.60264456",
"0.6003083",
"0.591241",
"0.5909089",
"0.59026635",
"0.58959866",
"0.5892609",
"0.5866478",
"0.58486116",
"0.5847851",
"0.58209026",
"0.58098584",
"0.5798304",
"0.57950443",
"0.5774337",
"0.57482994",
"0.57364225"
] | 0.9290242 | 0 |
Test case for get_host_configuration_metrics | def test_get_host_configuration_metrics(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_configuration_metrics1(self):\n pass",
"def get_config_metrics():\n\n metrics = {'disk_usage': 'YES',\n 'cpu_percent': 'YES',\n 'memory_info': 'YES',\n 'cpu_stats': 'YES'}\n\n return metrics",
"def test_get_deployment_metric(self):\n pass",
"def test_metrics_server(self):\n validate_metrics_server()",
"def test_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(self.log_file.name, NAGIOS_TEST_HOST_TEMPLATE),\n host_perf=True,\n tags=CUSTOM_TAGS,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n # Write content to log file and run check\n self._write_log('\\t'.join(self.HOST_LOG_DATA))\n nagios.check(config['instances'][0])\n\n # Test metric\n for metric_data in self.HOST_LOG_SERVICEPERFDATA:\n name, info = metric_data.split(\"=\")\n metric_name = \"nagios.host.\" + name\n\n values = info.split(\";\")\n\n index = values[0].find(\"ms\") if values[0].find(\"ms\") != -1 else values[0].find(\"%\")\n index = len(values[0]) - index\n value = float(values[0][:-index])\n expected_tags = ['unit:' + values[0][-index:]] + CUSTOM_TAGS\n if len(values) == 4:\n expected_tags.append('warn:' + values[1])\n expected_tags.append('crit:' + values[2])\n expected_tags.append('min:' + values[3])\n\n aggregator.assert_metric(metric_name, value=value, tags=expected_tags, count=1)\n\n aggregator.assert_all_metrics_covered()",
"def test_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(self.log_file.name, NAGIOS_TEST_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n # Write content to log file and run check\n self._write_log('\\t'.join(self.HOST_LOG_DATA))\n nagios.check(config['instances'][0])\n\n # Test metric\n for metric_data in self.HOST_LOG_SERVICEPERFDATA:\n name, info = metric_data.split(\"=\")\n metric_name = \"nagios.host.\" + name\n\n values = info.split(\";\")\n\n index = values[0].find(\"ms\") if values[0].find(\"ms\") != -1 else values[0].find(\"%\")\n index = len(values[0]) - index\n value = float(values[0][:-index])\n expected_tags = ['unit:' + values[0][-index:]]\n if len(values) == 4:\n expected_tags.append('warn:' + values[1])\n expected_tags.append('crit:' + values[2])\n expected_tags.append('min:' + values[3])\n\n aggregator.assert_metric(metric_name, value=value, tags=expected_tags, count=1)\n\n aggregator.assert_all_metrics_covered()",
"def test_get_derived_metric(self):\n pass",
"def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)",
"def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_bytes(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()",
"def test_get_all_derived_metrics(self):\n pass",
"def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_string(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()",
"def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200",
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def testGetHostConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs('postsubmit')\n self.assertEqual(2, len(hosts))\n self.assertEqual('atc', hosts[0].lab_name)\n self.assertEqual('postsubmit1.atc.google.com', hosts[0].hostname)\n self.assertEqual('lab_user1', hosts[0].host_login_name)\n self.assertEqual('postsubmit', hosts[0].cluster_name)\n self.assertEqual('ramdisk-host-config.xml', hosts[0].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[0].control_server_url)\n self.assertEqual(['mdb-group:some_owner', 'foo', 'bar'], hosts[0].owners)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[0].docker_image)\n self.assertEqual('docker_server_2', hosts[0].docker_server)\n self.assertEqual('postsubmit2.atc.google.com', hosts[1].hostname)\n hosts = pool.GetHostConfigs('crystalball-power')\n self.assertEqual(2, len(hosts))\n self.assertEqual('atc', hosts[0].lab_name)\n self.assertEqual('lab_docker_image', hosts[0].docker_image)\n self.assertEqual('docker_server_1', hosts[0].docker_server)\n self.assertEqual('cp1.atc.google.com', hosts[0].hostname)\n self.assertEqual(\n ['--device-cgroup-rule', '\"c 188:* rwm\"'],\n hosts[0].extra_docker_args)\n self.assertEqual('cp2.atc.google.com', hosts[1].hostname)",
"def test_result_fields_with_metrics(cbcsdk_mock):\n api = cbcsdk_mock.api\n result = Result(api, initial_data=GET_RUN_RESULTS_RESP_1)\n metrics = result.metrics_\n assert metrics._info == {\"cpu\": 24.3, \"memory\": 8.0}",
"async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()",
"def fusion_api_get_metrics_configuration(self, api=None, headers=None):\n return self.metrics.get(api=api, headers=headers, param='/configuration')",
"def test_get_host(self):\n pass",
"def test_metrics(self):\n self.assertIsInstance(self.analytics.suites[testReportSuite].metrics, omniture.utils.AddressableList)",
"def get_host_stats(self, refresh=False):",
"def get_evaluation_metric(config, logger, device):\r\n metrics = config['eval']['metrics']\r\n if not isinstance(metrics, list):\r\n metrics = [metrics]\r\n curves = config['eval'].get('curves', list())\r\n threshold = config['eval'].get('probability_threshold', 0.5)\r\n num_classes = config['model']['num_classes']\r\n\r\n metrics_dict = {}\r\n curves_dict = {}\r\n cm = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n cm_per_target = None\r\n cms = None\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in metrics]):\r\n cm_per_target = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n if any(['auc' in item for item in metrics]) or any(['ap' in item for item in metrics]) \\\r\n or any(['pr' in item for item in curves]) or any(['pr' in item for item in curves]):\r\n thresholds_type = config['eval'].get('thresholds_type', 'logspace')\r\n if thresholds_type == 'logspace':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2) - 1) / 9)[1: -1]\r\n elif thresholds_type == 'logspace_pro':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2, base=100) - 1) / 99)[1: -1]\r\n elif thresholds_type == 'linspace':\r\n thresholds = np.linspace(0.0, 1.0, config['eval']['num_thresholds'] + 2)[1: -1]\r\n elif thresholds_type == 'uline':\r\n thresholds = (((np.logspace(0, 1, config['eval']['num_thresholds'] // 2 + 2,\r\n base=10000000000) - 1) / 9999999999)[1: -1]) / 2\r\n if config['eval']['num_thresholds'] % 2 == 1:\r\n thresholds = np.append(thresholds, 0.5)\r\n for i in range(config['eval']['num_thresholds'] // 2 - 1, -1, -1):\r\n thresholds = np.append(thresholds, 1.0 - thresholds[i])\r\n else:\r\n logger.critical('thresholds_type is not supported: %s' % thresholds_type)\r\n exit(1)\r\n cms = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in curves]) or any(\r\n item.startswith('per_target_') for item in metrics):\r\n cms_per_target = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n update_flags = [True, True, True, True] # single, multiple, per_target_single, per_target_multiple\r\n for metric_name in metrics:\r\n if metric_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if metric_name.endswith('tp'):\r\n metrics_dict[metric_name] = TruePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fp'):\r\n metrics_dict[metric_name] = FalsePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fn'):\r\n metrics_dict[metric_name] = FalseNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('tn'):\r\n metrics_dict[metric_name] = TrueNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('precision'):\r\n metrics_dict[metric_name] = Precision(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('recall'):\r\n metrics_dict[metric_name] = Recall(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('sensitivity'):\r\n metrics_dict[metric_name] = Sensitivity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('specificity'):\r\n metrics_dict[metric_name] = Specificity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('dsc'):\r\n metrics_dict[metric_name] = DSC(used_cm, update=update_flags[update_flag_id], callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('auc'):\r\n metrics_dict[metric_name] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('ap'):\r\n metrics_dict[metric_name] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('hd95'):\r\n metrics_dict[metric_name] = HD95(threshold).to(device)\r\n else:\r\n logger.error('Unrecognized metric: %s' % metric_name)\r\n continue\r\n for curve_name in curves:\r\n if curve_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if curve_name.endswith('roc'):\r\n if curve_name.replace('roc', 'auc') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('roc', 'auc'), curve_name, curve_name.replace('roc', 'auc')))\r\n metrics_dict[curve_name.replace('roc', 'auc')] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('roc', 'auc')]\r\n elif curve_name.endswith('pr'):\r\n if curve_name.replace('pr', 'ap') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('pr', 'ap'), curve_name, curve_name.replace('pr', 'ap')))\r\n metrics_dict[curve_name.replace('pr', 'ap')] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('pr', 'ap')]\r\n if len(metrics_dict) == 0:\r\n logger.critical('No metric is added')\r\n exit(1)\r\n return metrics_dict, curves_dict",
"def _get_metric_config(self, config):\n metric_config = dict()\n metric_config['include_metrics'] = config.get('include_metrics', {})\n metric_config['exclude_metrics'] = config.get('exclude_metrics', {})\n return metric_config",
"async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED",
"def test_api_build_metrics_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.MetricsRequest()\n path, method = default_api.api_build_metrics_get(params)\n self.assertEqual(path, '/api/metrics/builds')\n self.assertEqual(method, 'GET')",
"def test_health_get(self):\n pass",
"def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(6, len(hosts))",
"def test_load_metrics_help_nc_params(self) -> None:\n result = load_help_nc_params(\"metrics\")\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})",
"def testGetHostConfigs(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs('cluster1')\n self.assertEqual(3, len(hosts))\n self.assertEqual('lab1', hosts[0].lab_name)\n self.assertEqual('host1', hosts[0].hostname)\n self.assertEqual('user1', hosts[0].host_login_name)\n self.assertEqual('cluster1', hosts[0].cluster_name)\n self.assertEqual('path/to/config.xml', hosts[0].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[0].control_server_url)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[0].owners)\n self.assertTrue(hosts[0].graceful_shutdown)\n self.assertTrue(hosts[0].enable_stackdriver)\n self.assertTrue(hosts[0].enable_autoupdate)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[0].docker_image)\n self.assertEqual('docker_server_2', hosts[0].docker_server)\n self.assertEqual(\n ['--arg1', 'value1', '--arg2', 'value2'],\n hosts[0].extra_docker_args)\n self.assertEqual('host2', hosts[1].hostname)\n self.assertEqual('user1', hosts[1].host_login_name)\n self.assertEqual('cluster1', hosts[1].cluster_name)\n self.assertEqual('path/to/config.xml', hosts[1].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[1].control_server_url)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[1].owners)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[1].docker_image)\n self.assertEqual('docker_server_2', hosts[1].docker_server)\n self.assertEqual(['--arg1', 'value1'], hosts[1].extra_docker_args)\n self.assertEqual('host3', hosts[2].hostname)\n self.assertEqual('user1', hosts[2].host_login_name)\n self.assertEqual('cluster1', hosts[2].cluster_name)\n self.assertEqual('path/to/new/config.xml', hosts[2].tf_global_config_path)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[2].owners)\n self.assertEqual('tfc_url', hosts[2].control_server_url)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:canary',\n hosts[2].docker_image)\n self.assertEqual('docker_server_3', hosts[2].docker_server)\n hosts = pool.GetHostConfigs('cluster2')\n self.assertEqual(2, len(hosts))\n self.assertEqual('lab1', hosts[0].lab_name)\n self.assertEqual('lab_user1', hosts[0].host_login_name)\n self.assertEqual('tfc_control_server_url', hosts[0].control_server_url)\n self.assertEqual('lab_docker_image', hosts[0].docker_image)\n self.assertEqual('docker_server_1', hosts[0].docker_server)\n self.assertTrue(hosts[0].enable_stackdriver)\n self.assertTrue(hosts[0].enable_autoupdate)",
"def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder"
] | [
"0.9346211",
"0.6972427",
"0.6909013",
"0.6469852",
"0.6434656",
"0.64344317",
"0.62992746",
"0.6273626",
"0.6251684",
"0.6232934",
"0.6228584",
"0.62276554",
"0.61592716",
"0.6148309",
"0.61010396",
"0.60895073",
"0.6073374",
"0.5999785",
"0.59587926",
"0.59502757",
"0.5926723",
"0.5889717",
"0.5877618",
"0.5857148",
"0.5856251",
"0.5846883",
"0.58328444",
"0.57985187",
"0.5786721",
"0.57815933"
] | 0.94905114 | 0 |
Test case for get_host_configuration_metrics1 | def test_get_host_configuration_metrics1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_configuration_metrics(self):\n pass",
"def get_config_metrics():\n\n metrics = {'disk_usage': 'YES',\n 'cpu_percent': 'YES',\n 'memory_info': 'YES',\n 'cpu_stats': 'YES'}\n\n return metrics",
"def test_get_deployment_metric(self):\n pass",
"def test_metrics_server(self):\n validate_metrics_server()",
"def test_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(self.log_file.name, NAGIOS_TEST_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n # Write content to log file and run check\n self._write_log('\\t'.join(self.HOST_LOG_DATA))\n nagios.check(config['instances'][0])\n\n # Test metric\n for metric_data in self.HOST_LOG_SERVICEPERFDATA:\n name, info = metric_data.split(\"=\")\n metric_name = \"nagios.host.\" + name\n\n values = info.split(\";\")\n\n index = values[0].find(\"ms\") if values[0].find(\"ms\") != -1 else values[0].find(\"%\")\n index = len(values[0]) - index\n value = float(values[0][:-index])\n expected_tags = ['unit:' + values[0][-index:]]\n if len(values) == 4:\n expected_tags.append('warn:' + values[1])\n expected_tags.append('crit:' + values[2])\n expected_tags.append('min:' + values[3])\n\n aggregator.assert_metric(metric_name, value=value, tags=expected_tags, count=1)\n\n aggregator.assert_all_metrics_covered()",
"def test_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(self.log_file.name, NAGIOS_TEST_HOST_TEMPLATE),\n host_perf=True,\n tags=CUSTOM_TAGS,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n # Write content to log file and run check\n self._write_log('\\t'.join(self.HOST_LOG_DATA))\n nagios.check(config['instances'][0])\n\n # Test metric\n for metric_data in self.HOST_LOG_SERVICEPERFDATA:\n name, info = metric_data.split(\"=\")\n metric_name = \"nagios.host.\" + name\n\n values = info.split(\";\")\n\n index = values[0].find(\"ms\") if values[0].find(\"ms\") != -1 else values[0].find(\"%\")\n index = len(values[0]) - index\n value = float(values[0][:-index])\n expected_tags = ['unit:' + values[0][-index:]] + CUSTOM_TAGS\n if len(values) == 4:\n expected_tags.append('warn:' + values[1])\n expected_tags.append('crit:' + values[2])\n expected_tags.append('min:' + values[3])\n\n aggregator.assert_metric(metric_name, value=value, tags=expected_tags, count=1)\n\n aggregator.assert_all_metrics_covered()",
"def fusion_api_get_metrics_configuration(self, api=None, headers=None):\n return self.metrics.get(api=api, headers=headers, param='/configuration')",
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)",
"def test_get_derived_metric(self):\n pass",
"def testGetHostConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs('postsubmit')\n self.assertEqual(2, len(hosts))\n self.assertEqual('atc', hosts[0].lab_name)\n self.assertEqual('postsubmit1.atc.google.com', hosts[0].hostname)\n self.assertEqual('lab_user1', hosts[0].host_login_name)\n self.assertEqual('postsubmit', hosts[0].cluster_name)\n self.assertEqual('ramdisk-host-config.xml', hosts[0].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[0].control_server_url)\n self.assertEqual(['mdb-group:some_owner', 'foo', 'bar'], hosts[0].owners)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[0].docker_image)\n self.assertEqual('docker_server_2', hosts[0].docker_server)\n self.assertEqual('postsubmit2.atc.google.com', hosts[1].hostname)\n hosts = pool.GetHostConfigs('crystalball-power')\n self.assertEqual(2, len(hosts))\n self.assertEqual('atc', hosts[0].lab_name)\n self.assertEqual('lab_docker_image', hosts[0].docker_image)\n self.assertEqual('docker_server_1', hosts[0].docker_server)\n self.assertEqual('cp1.atc.google.com', hosts[0].hostname)\n self.assertEqual(\n ['--device-cgroup-rule', '\"c 188:* rwm\"'],\n hosts[0].extra_docker_args)\n self.assertEqual('cp2.atc.google.com', hosts[1].hostname)",
"async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()",
"def test_get_all_derived_metrics(self):\n pass",
"def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_bytes(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()",
"def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_string(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()",
"def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200",
"def test_result_fields_with_metrics(cbcsdk_mock):\n api = cbcsdk_mock.api\n result = Result(api, initial_data=GET_RUN_RESULTS_RESP_1)\n metrics = result.metrics_\n assert metrics._info == {\"cpu\": 24.3, \"memory\": 8.0}",
"def testGetHostConfigs(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs('cluster1')\n self.assertEqual(3, len(hosts))\n self.assertEqual('lab1', hosts[0].lab_name)\n self.assertEqual('host1', hosts[0].hostname)\n self.assertEqual('user1', hosts[0].host_login_name)\n self.assertEqual('cluster1', hosts[0].cluster_name)\n self.assertEqual('path/to/config.xml', hosts[0].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[0].control_server_url)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[0].owners)\n self.assertTrue(hosts[0].graceful_shutdown)\n self.assertTrue(hosts[0].enable_stackdriver)\n self.assertTrue(hosts[0].enable_autoupdate)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[0].docker_image)\n self.assertEqual('docker_server_2', hosts[0].docker_server)\n self.assertEqual(\n ['--arg1', 'value1', '--arg2', 'value2'],\n hosts[0].extra_docker_args)\n self.assertEqual('host2', hosts[1].hostname)\n self.assertEqual('user1', hosts[1].host_login_name)\n self.assertEqual('cluster1', hosts[1].cluster_name)\n self.assertEqual('path/to/config.xml', hosts[1].tf_global_config_path)\n self.assertEqual('tfc_url', hosts[1].control_server_url)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[1].owners)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n hosts[1].docker_image)\n self.assertEqual('docker_server_2', hosts[1].docker_server)\n self.assertEqual(['--arg1', 'value1'], hosts[1].extra_docker_args)\n self.assertEqual('host3', hosts[2].hostname)\n self.assertEqual('user1', hosts[2].host_login_name)\n self.assertEqual('cluster1', hosts[2].cluster_name)\n self.assertEqual('path/to/new/config.xml', hosts[2].tf_global_config_path)\n self.assertCountEqual(['lab_user1', 'user1', 'user2'], hosts[2].owners)\n self.assertEqual('tfc_url', hosts[2].control_server_url)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:canary',\n hosts[2].docker_image)\n self.assertEqual('docker_server_3', hosts[2].docker_server)\n hosts = pool.GetHostConfigs('cluster2')\n self.assertEqual(2, len(hosts))\n self.assertEqual('lab1', hosts[0].lab_name)\n self.assertEqual('lab_user1', hosts[0].host_login_name)\n self.assertEqual('tfc_control_server_url', hosts[0].control_server_url)\n self.assertEqual('lab_docker_image', hosts[0].docker_image)\n self.assertEqual('docker_server_1', hosts[0].docker_server)\n self.assertTrue(hosts[0].enable_stackdriver)\n self.assertTrue(hosts[0].enable_autoupdate)",
"def _get_metric_config(self, config):\n metric_config = dict()\n metric_config['include_metrics'] = config.get('include_metrics', {})\n metric_config['exclude_metrics'] = config.get('exclude_metrics', {})\n return metric_config",
"def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder",
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(6, len(hosts))",
"def test_load_metrics_help_nc_params(self) -> None:\n result = load_help_nc_params(\"metrics\")\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})",
"def test_get_hyperflex_config_result_by_moid(self):\n pass",
"def get_evaluation_metric(config, logger, device):\r\n metrics = config['eval']['metrics']\r\n if not isinstance(metrics, list):\r\n metrics = [metrics]\r\n curves = config['eval'].get('curves', list())\r\n threshold = config['eval'].get('probability_threshold', 0.5)\r\n num_classes = config['model']['num_classes']\r\n\r\n metrics_dict = {}\r\n curves_dict = {}\r\n cm = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n cm_per_target = None\r\n cms = None\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in metrics]):\r\n cm_per_target = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n if any(['auc' in item for item in metrics]) or any(['ap' in item for item in metrics]) \\\r\n or any(['pr' in item for item in curves]) or any(['pr' in item for item in curves]):\r\n thresholds_type = config['eval'].get('thresholds_type', 'logspace')\r\n if thresholds_type == 'logspace':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2) - 1) / 9)[1: -1]\r\n elif thresholds_type == 'logspace_pro':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2, base=100) - 1) / 99)[1: -1]\r\n elif thresholds_type == 'linspace':\r\n thresholds = np.linspace(0.0, 1.0, config['eval']['num_thresholds'] + 2)[1: -1]\r\n elif thresholds_type == 'uline':\r\n thresholds = (((np.logspace(0, 1, config['eval']['num_thresholds'] // 2 + 2,\r\n base=10000000000) - 1) / 9999999999)[1: -1]) / 2\r\n if config['eval']['num_thresholds'] % 2 == 1:\r\n thresholds = np.append(thresholds, 0.5)\r\n for i in range(config['eval']['num_thresholds'] // 2 - 1, -1, -1):\r\n thresholds = np.append(thresholds, 1.0 - thresholds[i])\r\n else:\r\n logger.critical('thresholds_type is not supported: %s' % thresholds_type)\r\n exit(1)\r\n cms = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in curves]) or any(\r\n item.startswith('per_target_') for item in metrics):\r\n cms_per_target = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n update_flags = [True, True, True, True] # single, multiple, per_target_single, per_target_multiple\r\n for metric_name in metrics:\r\n if metric_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if metric_name.endswith('tp'):\r\n metrics_dict[metric_name] = TruePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fp'):\r\n metrics_dict[metric_name] = FalsePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fn'):\r\n metrics_dict[metric_name] = FalseNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('tn'):\r\n metrics_dict[metric_name] = TrueNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('precision'):\r\n metrics_dict[metric_name] = Precision(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('recall'):\r\n metrics_dict[metric_name] = Recall(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('sensitivity'):\r\n metrics_dict[metric_name] = Sensitivity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('specificity'):\r\n metrics_dict[metric_name] = Specificity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('dsc'):\r\n metrics_dict[metric_name] = DSC(used_cm, update=update_flags[update_flag_id], callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('auc'):\r\n metrics_dict[metric_name] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('ap'):\r\n metrics_dict[metric_name] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('hd95'):\r\n metrics_dict[metric_name] = HD95(threshold).to(device)\r\n else:\r\n logger.error('Unrecognized metric: %s' % metric_name)\r\n continue\r\n for curve_name in curves:\r\n if curve_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if curve_name.endswith('roc'):\r\n if curve_name.replace('roc', 'auc') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('roc', 'auc'), curve_name, curve_name.replace('roc', 'auc')))\r\n metrics_dict[curve_name.replace('roc', 'auc')] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('roc', 'auc')]\r\n elif curve_name.endswith('pr'):\r\n if curve_name.replace('pr', 'ap') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('pr', 'ap'), curve_name, curve_name.replace('pr', 'ap')))\r\n metrics_dict[curve_name.replace('pr', 'ap')] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('pr', 'ap')]\r\n if len(metrics_dict) == 0:\r\n logger.critical('No metric is added')\r\n exit(1)\r\n return metrics_dict, curves_dict",
"async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED",
"def test_metrics(self):\n self.assertIsInstance(self.analytics.suites[testReportSuite].metrics, omniture.utils.AddressableList)",
"def test_api_build_metrics_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.MetricsRequest()\n path, method = default_api.api_build_metrics_get(params)\n self.assertEqual(path, '/api/metrics/builds')\n self.assertEqual(method, 'GET')",
"def test_get_host(self):\n pass",
"def getConfigs(self, host):\n raise \"not implemented\""
] | [
"0.9261441",
"0.69093883",
"0.66357327",
"0.6320196",
"0.61783504",
"0.61759925",
"0.6166086",
"0.60826665",
"0.6074115",
"0.6073981",
"0.60514337",
"0.60396063",
"0.60320044",
"0.6024438",
"0.6004357",
"0.5943179",
"0.59247345",
"0.5835556",
"0.5833599",
"0.5822436",
"0.58194226",
"0.58163667",
"0.58161587",
"0.5816018",
"0.5799027",
"0.57934535",
"0.5770916",
"0.57486796",
"0.5717763",
"0.5695316"
] | 0.9500964 | 0 |
Test case for get_network | def test_get_network(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_networking_project_network_get(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_get_default_network(self):\n pass",
"def test_networking_project_network_service_get(self):\n pass",
"def test_add_network(self):\n pass",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_create_network():\n _network = Network()",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def test_register_network(self):\n pass",
"def test_networking_project_network_event_get(self):\n pass",
"def test_networking_project_network_tag_get(self):\n pass",
"def test_get_lab_network_by_name(\n self, authenticated_client, lab_path, test_network, test_network_data\n ):\n resp = authenticated_client.api.get_lab_network(lab_path, test_network)\n assert resp[\"data\"][\"name\"] == test_network_data[\"name\"]",
"def test_read_cluster_network(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)",
"def test_support_NETWORK(self):\n self.assertEqual(self._parseFeature(\"NETWORK\", \"IRCNet\"), \"IRCNet\")",
"def show_network(self, network, **_params):\r\n return self.get(self.network_path % (network), params=_params)",
"def test_api_use_web_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/web-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def _build_network(self):\n pass",
"def get_network(self):\n return self._network",
"def test_03(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_03\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"3-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network,\n destination=Address(\"1:*\"),\n ).doc(\"3-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"3-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"3-2-1\") \\\n .success()\n\n # network 2 sees nothing\n tnet.sniffer2.start_state.doc(\"3-3-0\") \\\n .timeout(10).doc(\"3-3-1\") \\\n .success()\n\n # network 3 sees nothing\n tnet.sniffer3.start_state.doc(\"3-4-0\") \\\n .timeout(10).doc(\"3-4-1\") \\\n .success()\n\n # run the group\n tnet.run()",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_02(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_02\")\n\n # create a network\n tnet = TNetwork()\n\n # extract the adapter to network 1\n net_1_adapter = tnet.iut.nsap.adapters[1]\n if _debug: TestIAmRouterToNetwork._debug(\" - net_1_adapter: %r\", net_1_adapter)\n\n # test device sends request\n tnet.iut.start_state.doc(\"2-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network, adapter=net_1_adapter).doc(\"2-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"2-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"2-2-1\") \\\n .success()\n\n # network 2 sees nothing\n tnet.sniffer2.start_state.doc(\"2-3-0\") \\\n .timeout(10).doc(\"2-3-1\") \\\n .success()\n\n # network 3 sees nothing\n tnet.sniffer3.start_state.doc(\"2-4-0\") \\\n .timeout(10).doc(\"2-4-1\") \\\n .success()\n\n # run the group\n tnet.run()",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def test_networking_project_network_update(self):\n pass",
"def test_api_use_royal_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/royal-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_04(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_04\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"4-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network, network=1).doc(\"4-1-1\") \\\n .success()\n\n # network 1 sees nothing\n tnet.sniffer1.start_state.doc(\"4-2-0\") \\\n .timeout(10).doc(\"4-2-1\") \\\n .success()\n\n # network 2 sees router to network 1\n tnet.sniffer2.start_state.doc(\"4-3-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1],\n ).doc(\"4-3-1\") \\\n .success()\n\n # network 3 sees router to network 1\n tnet.sniffer3.start_state.doc(\"4-4-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1],\n ).doc(\"4-4-1\") \\\n .success()\n\n # run the group\n tnet.run()",
"def test_delete_network(self):\n pass",
"def test_create_cluster_network(self):\n pass"
] | [
"0.85412544",
"0.82629603",
"0.8242901",
"0.77703416",
"0.7688053",
"0.76391965",
"0.7559011",
"0.7338006",
"0.7306965",
"0.7251807",
"0.7096172",
"0.70703405",
"0.7001781",
"0.69212246",
"0.6829163",
"0.68044156",
"0.6790233",
"0.67613745",
"0.6760504",
"0.6757719",
"0.6749192",
"0.67017144",
"0.6646815",
"0.6612994",
"0.6588925",
"0.6584048",
"0.6580782",
"0.6577719",
"0.6565161",
"0.65148515"
] | 0.9362557 | 0 |
Test case for get_networks | def test_get_networks(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_aws_service_api_networks_get(self):\n pass",
"def test_get_network(self):\n pass",
"def test_networking_project_network_get(self):\n pass",
"def test_get_unregistered_networks(self):\n pass",
"def test_networking_project_network_list(self):\n pass",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def show_networks():\n return get_networks()",
"def test_get_default_network(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def test_add_network(self):\n pass",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def test_networking_project_network_service_get(self):\n pass",
"def test_list_cluster_network(self):\n pass",
"def test_external_networks(self):\n network_list = self.neutron_operations.find_networks(router_external=True)\n self.assertNotEqual(len(network_list), 0, \"No external networks found\")",
"def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()",
"def test_retrieve_networks(site):\n models.Attribute.objects.create(\n site=site, resource_name='Network', name='test'\n )\n\n net_8 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/8', attributes={'test': 'foo'}\n )\n net_24 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/24', attributes={'test': 'bar'}\n )\n net_25 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/25', attributes={'test': 'baz'}\n )\n ip = models.Network.objects.create(\n site=site, cidr=u'10.0.0.1/32'\n )\n\n # root=True\n assert list(site.networks.filter(parent_id=None)) == [net_8]\n\n # include_networks=True, include_ips=Fals\n assert list(site.networks.filter(is_ip=False)) == [net_8, net_24, net_25]\n\n # include_networks=False, include_ips=False\n assert list(site.networks.none()) == []\n\n # include_networks=True, include_ips=True\n assert list(site.networks.all()) == [net_8, net_24, net_25, ip]\n\n # include_networks=False, include_ips=True\n assert list(site.networks.filter(is_ip=True)) == [ip]\n\n # Filter by attribute\n assert list(site.networks.by_attribute(None, 'foo')) == []\n assert list(site.networks.by_attribute('test', 'foo')) == [net_8]\n\n # Get by address\n assert site.networks.get_by_address(u'10.0.0.0/8') == net_8\n\n #\n # .get_closest_parent()\n #\n # Closest parent for non-existent 10.0.0.128/32 network should be /24\n assert site.networks.get_closest_parent(u'10.0.0.128/32') == net_24\n\n # Closest parent for non-existent 10.0.0.2/32 network should be /25\n assert site.networks.get_closest_parent(u'10.0.0.2/32') == net_25\n\n # Matching ip with shorter prefix_length should not match\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length=27)\n\n # Non-existent closest parent should error\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'1.0.0.2/32')\n\n # Invalid prefix_length\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length='shoe')\n\n # Invalid CIDR\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'1')",
"def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def test_networking_project_network_create(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)",
"def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")",
"def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data",
"def print_networks(self):\n print('Networks initialized')\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('[Network {0}] Total number of parameters : {1:.3f} M'.format(name, num_params / 1e6))",
"def _test_network_list_paged(\n self, filter_params, expected_networks, page_data,\n source_networks=None, **extra_kwargs):\n filter_params = filter_params or {}\n sort_dir = page_data['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n\n params = filter_params.copy()\n params.update(call_args)\n if page_data.get('marker_id'):\n params.update({'marker': page_data.get('marker_id')})\n extra_kwargs.update({'marker': page_data.get('marker_id')})\n return_values.append(all_networks[0:21])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val, has_more_data, has_prev_data = api.neutron.network_list_paged(\n self.request, page_data, **extra_kwargs)\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data",
"def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")",
"def test_create_network():\n _network = Network()",
"def test_networking_project_network_tag_get(self):\n pass"
] | [
"0.86475784",
"0.8380495",
"0.77561074",
"0.7518428",
"0.7511001",
"0.7463857",
"0.72843784",
"0.7266594",
"0.7242535",
"0.7191487",
"0.7143435",
"0.7140653",
"0.7107686",
"0.69693047",
"0.69627374",
"0.69444",
"0.69138145",
"0.68920267",
"0.6867035",
"0.6854733",
"0.67448276",
"0.6739571",
"0.67357296",
"0.6715929",
"0.6690356",
"0.66836435",
"0.6659929",
"0.6652618",
"0.6612549",
"0.66053766"
] | 0.9308238 | 0 |
Test case for get_pending_users | def test_get_pending_users(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_users_eligible_for_fist_notification_with_no_result(self):\n # Given:\n self.batch_setup()\n # When:\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(0, len(users))\n self.assertNotIn(self.user_0, users)\n self.assertNotIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)",
"def test_list_inactive_users(self):\r\n # for now just make sure we can get a 200 call on it.\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/accounts/inactive',\r\n params=params,\r\n status=200)\r\n # by default we shouldn't have any inactive users\r\n data = json.loads(res.body)\r\n users = [u for u in data['users']]\r\n for u in users:\r\n self.assertEqual(0, u['invite_ct'], \"Count should be 0 to start.\")",
"def test_get_users_eligible_for_second_notification_with_no_result(self):\n # Given:\n self.batch_setup()\n # When:\n response = self.client.get(\"/api/batch/account/users/eligible-for-second-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(0, len(users))\n self.assertNotIn(self.user_0, users)\n self.assertNotIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)",
"def test_get_total_users_get(self):\n pass",
"def test_get_users_eligible_for_third_notification_with_no_result(self):\n # Given:\n self.batch_setup()\n # When:\n response = self.client.get(\"/api/batch/account/users/eligible-for-third-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(0, len(users))\n self.assertNotIn(self.user_0, users)\n self.assertNotIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)",
"def test_get_users_eligible_for_fist_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_24_months_ago = datetime.utcnow() - timedelta(days=750)\n criteria = {\"last_login_date\": _datetime_24_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_24_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)",
"def test_get_users(self):\n pass",
"def test_users_get(self):\n pass",
"def test_users_get(self):\n pass",
"def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])",
"def test_find_pending_invitations_by_user(session): # pylint:disable=unused-argument\n invitation = factory_invitation_model(session=session, status='PENDING')\n session.add(invitation)\n session.commit()\n\n retrieved_invitation = InvitationModel.find_pending_invitations_by_user(invitation.sender_id)\n assert len(retrieved_invitation) == 1\n assert retrieved_invitation[0].recipient_email == invitation.recipient_email",
"def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users",
"def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids",
"def test_get_users_eligible_for_fist_notification_with_last_login_null(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_24_months_ago = datetime.utcnow() - timedelta(days=730)\n criteria_one = {\"account_creation_date\": _datetime_24_months_ago}\n criteria = {\"last_login_date\": None}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_0, criteria_one)\n self.update_test_data(self.user_2, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(2, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)",
"def get_pending_registration_requests(self,user,site):\n\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.PENDING)",
"def test_get_users_eligible_for_second_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_30_months_ago = datetime.utcnow() - timedelta(days=1064)\n criteria = {\"last_login_date\": _datetime_30_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_30_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-second-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)",
"def get_all_users():",
"def getInterestedUsers():",
"def getResponsibleUsers():",
"def users_groups_pending():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n\n # Query user's pending project requests\n project_requests = get_user_pending_project_requests(unix_name)\n project_requests = [\n project_request\n for project_request in project_requests\n if session[\"url_host\"][\"unix_name\"] in project_request[\"name\"]\n ]\n # Check user status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n return render_template(\n \"users_groups_pending.html\",\n project_requests=project_requests,\n user_status=user_status,\n )",
"def test_get_users_eligible_for_fist_notification_with_last_login_not_null(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_24_months_ago = datetime.utcnow() - timedelta(days=730)\n criteria = {\"last_login_date\": _datetime_24_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(2, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertNotIn(self.user_1, users)\n self.assertNotIn(self.user_3, users)",
"def test_get_users_eligible_for_third_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_35_months_ago = datetime.utcnow() - timedelta(days=1069)\n criteria = {\"last_login_date\": _datetime_35_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_35_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-third-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)",
"def test_resource_user_resource_find_users_get(self):\n pass",
"def test_api_user_get(self):\n pass",
"def test_get_users(self):\n print('(' + self.test_get_users.__name__+')',\n self.test_get_users.__doc__)\n users = self.connection.get_users()\n # Check we get right size of users table\n self.assertEqual(len(users), INITIAL_USERS_COUNT)\n # check PATIENT and DOCTOR data with users object we got\n for user in users:\n if user['username'] == PATIENT_USERNAME:\n self.assertDictContainsSubset(user, PATIENT['public_profile'])\n elif user['username'] == DOCTOR_USERNAME:\n self.assertDictContainsSubset(user, DOCTOR['public_profile'])",
"def test_list_pending_template_subscriptions(self):\n pass",
"def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])",
"def test_resource_user_resource_get_user_get(self):\n pass",
"def test_groups_group_users_get(self):\n pass",
"def test_groups_group_users_get(self):\n pass"
] | [
"0.702112",
"0.67669266",
"0.67535657",
"0.6745148",
"0.6674903",
"0.6654138",
"0.6634914",
"0.6570787",
"0.6570787",
"0.6568846",
"0.6496768",
"0.638089",
"0.63309",
"0.63273996",
"0.63139266",
"0.63125527",
"0.63123757",
"0.6269257",
"0.6266714",
"0.6230968",
"0.6192803",
"0.61892176",
"0.61432654",
"0.6121558",
"0.6101281",
"0.60954416",
"0.60857964",
"0.60833603",
"0.6061685",
"0.6061685"
] | 0.9332466 | 0 |
Test case for get_project | def test_get_project(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_projects(self):\n pass",
"def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_list_project(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_get_project(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n response = self.request_knox(url)\n\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'title': self.project.title,\n 'type': self.project.type,\n 'parent': str(self.category.sodar_uuid),\n 'description': self.project.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': True,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n },\n str(self.owner_as.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as.sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(response_data, expected)",
"def test_create_project_request(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_find_project(self):\n result = Project.objects.find(\n ['test'], project_type=PROJECT_TYPE_PROJECT\n )\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.project)",
"def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)",
"def get_project(self):\n raise NotImplementedError(\"get_project is not implemented\")",
"def test_add_project(self):\n pass",
"def get_project(arn=None):\n pass",
"def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)",
"def project():",
"def project():",
"def project():",
"def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)",
"def GetProject(args):\n return args.project or properties.VALUES.core.project.GetOrFail()",
"def test_demo_project_call(self):\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_project_names()\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'romainducarrouge-31f2' in resp",
"def get_project(self, project_name):\n raise self._get_notimplementederror(\"get_project\")",
"def test_get_project_id_from_name(self, mock_get):\n mock_get.return_value = {'projectid': 'P100000'}\n self.assertEqual(self.project_id, \n get_project_id_from_name(self.project_name))",
"def test_project_detail(self):\n rv = self.app.get(\"/Assignment0\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"2015-02-04 21:57:12.156363\", rv.data)\n self.assertIn(\"221\", rv.data)\n self.assertIn(\"commit assignment0\", rv.data)\n\n self.assertIn(\"Assignment0/Procfile\", rv.data)\n self.assertIn(\"Assignment0/README.md\", rv.data)",
"def test_retrieve_project(self):\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.project_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)"
] | [
"0.84153426",
"0.8268446",
"0.8254282",
"0.8254282",
"0.7976006",
"0.7739771",
"0.7720523",
"0.7649945",
"0.76177305",
"0.76177305",
"0.76177305",
"0.7544601",
"0.7544601",
"0.7538402",
"0.7496717",
"0.7475971",
"0.7425998",
"0.7409784",
"0.7373276",
"0.73607486",
"0.7268182",
"0.7268182",
"0.7268182",
"0.72537357",
"0.7208701",
"0.7139335",
"0.7109974",
"0.7069893",
"0.7058777",
"0.7039981"
] | 0.94270986 | 0 |
Test case for get_project_virt_realms | def test_get_project_virt_realms(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_get_virtualization_realms(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_set_project_default_virtualization_realm(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_enable_virt_realm_remote_access(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_update_virt_realm(self):\n pass",
"def test_show_vcs_resources(mgmt_session):\n vcs_resource = rift.vcs.vcs.VcsResource(mgmt_session)\n vcs_resource_info = None\n\n # Get vcs resources\n vcs_resource_info = vcs_resource.get_vcs_resource()\n\n # Verify there are VM entries in the vcs resource info container\n vms = [vm for vm in vcs_resource_info.vm]\n if len(vms) == 0:\n raise AssertionError(\"No entries found in vcs resource info\")",
"def test_ipam_vrfs_list(self):\n pass",
"def init_cloud_virtual_resources():\n test_cldvirt_resources = []\n\n # add info to list in memory, one by one, following signature values\n cldvirtres_ID = 1\n cldvirtres_name = \"nova-compute-1\"\n cldvirtres_info = \"nova VM in Arm pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 2\n cldvirtres_name = \"nova-compute-2\"\n cldvirtres_info = \"nova VM in LaaS\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [2,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 3\n cldvirtres_name = \"nova-compute-3\"\n cldvirtres_info = \"nova VM in x86 pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n\n # write list to binary file\n write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)\n\n return test_cldvirt_resources",
"def test_get_project(self):\n pass",
"def test_update_virt_realm_remote_access_config(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_add_multiple_pis_simultaneously_to_vpg_check_reallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 6\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 3\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[0:2]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 2\n # Attach 2 PIs from PR1 to VPG-2\n vpg_name = vpg_names[1]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[2:4]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [1, 1])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 3\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 4\n # Attach 2 PIs from PR1 to VPG-3\n vpg_name = vpg_names[2]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[4:6]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 5\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.add_physical_interface(pi_obj)\n self._vnc_lib.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [2, 2])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1, 2])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])",
"def test_get_projects(self):\n pass",
"def test_list_project(self):\n pass",
"def test_get_all_virtualservices(self,setup_suite):\n _, resp = get('virtualservice')\n vs_obj_list = resp['results']\n for vs_obj in vs_obj_list:\n logger.info(\" >>> VS Name: %s <<<\" % vs_obj['name'])",
"def test_get_virtual_account_clients(self):\n pass",
"def test_get_cloud_resources(self):\n pass",
"def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)",
"def test_virtualservice_get(self):\n pass"
] | [
"0.79547757",
"0.7938157",
"0.7327878",
"0.7324844",
"0.7058304",
"0.68130153",
"0.64632165",
"0.63006413",
"0.615659",
"0.60483974",
"0.6028249",
"0.59369504",
"0.59081393",
"0.5822455",
"0.57989305",
"0.5722813",
"0.56844753",
"0.5654413",
"0.56344986",
"0.5625462",
"0.55771405",
"0.5506954",
"0.54876566",
"0.5472437",
"0.54688156",
"0.5457958",
"0.54467005",
"0.54378736",
"0.54320735",
"0.54299843"
] | 0.9593597 | 0 |
Test case for get_projects | def test_get_projects(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_get_project(self):\n pass",
"def test_list_project(self):\n pass",
"def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_list_project_request(self):\n pass",
"def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)",
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def test_get_projects_expanded(self):\n pass",
"def test_get_projects(client, session, models, tokens):\n response = client.get(\n \"/projects\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200\n assert len(response.json) > 0",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)",
"def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response",
"def test_project_list(self):\n rv = self.app.get(\"/\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"Assignment1.0\", rv.data)\n self.assertIn(\"Assignment2.0\", rv.data)",
"def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())",
"def test_projects_id_get(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=56),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs",
"def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)",
"def test_get_project(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n response = self.request_knox(url)\n\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'title': self.project.title,\n 'type': self.project.type,\n 'parent': str(self.category.sodar_uuid),\n 'description': self.project.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': True,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n },\n str(self.owner_as.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as.sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(response_data, expected)",
"def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)",
"def test_create_project_request(self):\n pass",
"def test_project_view(self):\n response = self.client.get('/projects/')\n self.assertEqual(response.status_code, 200)",
"def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)",
"def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)",
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects",
"def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()"
] | [
"0.8629419",
"0.8629419",
"0.85190177",
"0.8330099",
"0.8318978",
"0.83128667",
"0.8309023",
"0.82416004",
"0.8237626",
"0.807021",
"0.7747111",
"0.76708764",
"0.76370007",
"0.76370007",
"0.7572887",
"0.7512718",
"0.74851847",
"0.7437206",
"0.7353582",
"0.73048246",
"0.72927946",
"0.7272464",
"0.72511315",
"0.7194709",
"0.7164287",
"0.7116919",
"0.709441",
"0.7080594",
"0.7068021",
"0.7056202"
] | 0.9264087 | 0 |
Test case for get_projects_expanded | def test_get_projects_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_get_project(self):\n pass",
"def test_list_project(self):\n pass",
"def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_list_project_request(self):\n pass",
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)",
"def test_get_deployments_expanded(self):\n pass",
"def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)",
"def test_get_test_assets_expanded(self):\n pass",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_get_scenarios_expanded(self):\n pass",
"def test_get_depth_project(self):\n self.assertEqual(self.project.get_depth(), 1)",
"def test_get_children_project(self):\n children = self.project.get_children()\n self.assertEqual(children.count(), 0)",
"def test_replace_project(self):\n pass",
"def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)",
"def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_get_software_set_expanded(self):\n pass",
"def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs",
"def project_grp():\n pass",
"def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)",
"def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())",
"def test_get_systems_expanded(self):\n pass",
"def test_get_container_assets_expanded(self):\n pass",
"def _page_projects(self):\n return self._open(self.app.page_projects)",
"def test_demo_project_call(self):\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_project_names()\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'romainducarrouge-31f2' in resp"
] | [
"0.7080929",
"0.67166996",
"0.67166996",
"0.6607443",
"0.6550806",
"0.63488877",
"0.6285188",
"0.62777454",
"0.62764364",
"0.62764364",
"0.61782056",
"0.617426",
"0.60859096",
"0.6053304",
"0.6003903",
"0.5992418",
"0.5978342",
"0.5841123",
"0.58395314",
"0.5798718",
"0.5794759",
"0.5792088",
"0.5784538",
"0.578427",
"0.57474643",
"0.5739762",
"0.57368517",
"0.56957614",
"0.5679569",
"0.5660083"
] | 0.94688165 | 0 |
Test case for get_revision | def test_get_revision(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def revision():\n pass",
"def test_revision_with_additional_parameters(self, mock_get_entity): # pylint: disable=C0103\n mock_get_entity.return_value = self.content_row_response\n\n revision = 'TestRevision'\n url = '/?index_key=123&{0}={1}&revision=456'.format(\n TestConfig.REVISION_PARAMETER,\n revision\n )\n response = self.client.get(url)\n\n row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)\n mock_get_entity.assert_called_once_with(\n TestConfig.AZURE_STORAGE_TABLE,\n TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,\n row_key\n )\n self.assertEqual(response.data, self.content_row_response.content)\n self.assertEqual(response.status_code, 200)",
"def test_parse_diff_revision(self):\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'doc/readme',\n revision=b'bf544ea'),\n (b'doc/readme', b'bf544ea'))\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'/dev/null',\n revision=b'bf544ea'),\n (b'/dev/null', PRE_CREATION))\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'/dev/null',\n revision=b'0000000'),\n (b'/dev/null', PRE_CREATION))",
"def get_revision(self) -> str:\n raise NotImplementedError",
"def test_get_file_with_git_and_revision(self):\n self._test_get_file(\n tool_name='Git',\n revision='123',\n base_commit_id=None,\n expected_revision='123')",
"def get_revision(self, revision_id: int):\n return self.phab.differential.query(ids=[revision_id])[0]",
"def test_get_file_with_svn_and_revision(self):\n self._test_get_file(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123')",
"def svn_client_commit_info_t_revision_get(svn_client_commit_info_t_self): # real signature unknown; restored from __doc__\n pass",
"def test_get_version(self):\n pass",
"def svn_client_commit_item_t_revision_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n pass",
"def svn_info_t_rev_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass",
"def get_revision(self, ehr_record, version):\n return self.version_manager.get_revision(ehr_record.record_id, version)",
"def test_root_valid_revision(self, mock_get_entity):\n mock_get_entity.return_value = self.content_row_response\n\n revision = 'TestRevision'\n url = '/?{0}={1}'.format(TestConfig.REVISION_PARAMETER, revision)\n response = self.client.get(url)\n\n row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)\n mock_get_entity.assert_called_once_with(\n TestConfig.AZURE_STORAGE_TABLE,\n TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,\n row_key\n )\n self.assertEqual(response.data, self.content_row_response.content)\n self.assertEqual(response.status_code, 200)",
"def get_revision(vcs, reporoot, dirtyfunc=lambda rev, **kw: rev + '*'):\n revinfo = get_raw_revision(vcs, reporoot)\n if not revinfo:\n return None\n\n rev, dirty = revinfo\n return dirtyfunc(rev, repo=reporoot, vcs=vcs) if dirty and dirtyfunc is not None else rev",
"def revision(cls, ref=None):\n # TODO\n raise NotImplementedError",
"def get_revision(self):\n try:\n info = self.client.info(self.datastore)\n self.revision = info.revision\n self.svn_root = info.url\n return str(self.revision.number)\n except:\n self.logger.error(\"Svn2: Failed to get revision\", exc_info=1)\n self.revision = None\n return str(-1)",
"def svn_client_commit_item2_t_revision_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n pass",
"def test_contains_revision(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n\n with self.subTest(name='new revision'):\n mock_cursor.__iter__.return_value = [(0,)]\n\n result = database.contains_revision(sentinel.revision)\n\n # compare with boolean literal to test the type cast\n self.assertIs(result, False)\n query_values = mock_cursor.execute.call_args[0][-1]\n self.assertEqual(query_values, (sentinel.revision,))\n\n with self.subTest(name='old revision'):\n mock_cursor.__iter__.return_value = [(1,)]\n\n result = database.contains_revision(sentinel.revision)\n\n # compare with boolean literal to test the type cast\n self.assertIs(result, True)\n query_values = mock_cursor.execute.call_args[0][-1]\n self.assertEqual(query_values, (sentinel.revision,))",
"def provide_git_revision(cls):\n version = str(VERSION)\n git_revision = str(GIT_REVISION)\n git_date = str(GIT_DATE)\n if os.path.exists(\".git\"):\n from subprocess import check_output\n command = 'git describe --tags --long --dirty'\n version_string = check_output(command.split()).decode('utf-8').strip()\n if version_string != 'fatal: No names found, cannot describe anything.':\n # git describe -> tag-commits-sha-dirty\n version_string = version_string.replace('-dirty', '')\n version_string = version_string.lstrip('v')\n parts = version_string.split('-')\n parts_len = len(parts)\n # only tag or git sha\n if parts_len == 1:\n if cls.is_git_sha(parts[0]):\n git_revision = parts[0]\n git_revision = git_revision.lstrip('g')\n else:\n version = parts[0]\n if parts_len == 2:\n version = parts[0]\n git_revision = cls.get_git_revision(parts[1])\n if parts_len > 2:\n # git sha\n git_revision = cls.get_git_revision(parts[-1])\n # commits after given tag\n commits = cls.get_commits_count(parts[-2])\n # version based on tag\n version = ''.join(parts[:-1])\n if commits is not None:\n version = ''.join(parts[:-2])\n # normalize rc to rcN for PEP 440 compatibility\n version = version.lower()\n if version.endswith('rc'):\n version += '0'\n else:\n cls.logger.warning(\"Git describe command failed for current git repository\")\n git_date = cls.get_git_date(git_revision)\n else:\n from pkg_resources import get_distribution\n try:\n version, git_revision = get_distribution(\"hivemind\").version.split(\"+\")\n except:\n cls.logger.warning(\"Unable to get version and git revision from package data\")\n cls._save_version_file(version, git_revision, git_date)\n return version, git_revision",
"def test_get_url_on_diff_viewer_revision(self) -> None:\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n url_name='view-diff-revision')),\n 'raw/')",
"def revision(self):\n return self._revision",
"def revision(self):\n return self._revision",
"def get_document_revision(draft):\n rev = draft.Properties.Item[\"ProjectInformation\"][\"Revision\"].Value\n return int(rev)",
"def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=True)\n except exc.CommandError:\n return \"initial\"",
"def getRevisionNumber(self):\n return self.getDocumentedObject().getRevision()",
"def revision(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"revision\")",
"def get_revision(ledger_name, document_id, block_address, digest_tip_address):\n result = qldb_client.get_revision(Name=ledger_name, BlockAddress=block_address, DocumentId=document_id,\n DigestTipAddress=digest_tip_address)\n return result",
"def test_item_revision_history(testapp, registry):\n objv1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n objv2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n objv3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n item_uuid = testapp.post_json('/embedding-tests', objv1, status=201).json['@graph'][0]['uuid']\n testapp.patch_json('/' + item_uuid, objv2, status=200)\n testapp.patch_json('/' + item_uuid, objv3, status=200)\n\n # now get revision history\n revisions = testapp.get('/' + item_uuid + '/@@revision-history').json['revisions']\n assert len(revisions) == 3 # we made 3 edits\n\n # lets make some more\n testapp.patch_json('/' + item_uuid, objv2, status=200)\n testapp.patch_json('/' + item_uuid, objv1, status=200)\n revisions = testapp.get('/' + item_uuid + '/@@revision-history').json['revisions']\n assert len(revisions) == 5 # now we made 5 edits\n # they should be ordered by sid, recall the patch order above\n for patched_metadata, revision in zip([objv1, objv2, objv3, objv2, objv1], revisions):\n assert revision['title'] == patched_metadata['title']",
"def needs_revision(self, text):\n match = re.search(\"rev: ([\\\\d]+)-([\\\\d]+)-([\\\\d]+)$\", text)\n\n if match is None:\n return\n\n month, day, year = match.groups()\n return date(int(year), int(month), int(day))",
"def getRepoRev(self, path):\r\n\r\n if self.verbose:\r\n print(\"INFO : Getting info in {}\".format(path))\r\n\r\n rev = None\r\n with workInDirectory(path):\r\n\r\n rev_cmd_args = ['git', 'rev-parse', 'HEAD']\r\n\r\n if self.verbose:\r\n print(\"INFO : Running command : {}\".format(\" \".join(rev_cmd_args)))\r\n\r\n rev = SubProcessUtility.runCommand(rev_cmd_args)\r\n\r\n if rev == None:\r\n print(\"Unable to get revision for {}, make sure config is correct\".format(path))\r\n\r\n return rev"
] | [
"0.7727598",
"0.7206311",
"0.70790094",
"0.70218754",
"0.69879144",
"0.6979561",
"0.6933143",
"0.6873911",
"0.68099153",
"0.68093705",
"0.6794752",
"0.6793481",
"0.67908335",
"0.6771646",
"0.67625374",
"0.67578036",
"0.6699831",
"0.66189593",
"0.6586928",
"0.65060014",
"0.64739597",
"0.64739597",
"0.64553356",
"0.6449481",
"0.6448635",
"0.6426194",
"0.63934934",
"0.635665",
"0.63316905",
"0.63114434"
] | 0.94098747 | 0 |
Test case for get_root_html | def test_get_root_html(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_root_html1(self):\n pass",
"def test_get_root_html2(self):\n pass",
"def test_get_root_html3(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_layout_root(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SpaceRocks' in html.find(\"title\").text",
"def get_root():\r\n return render_template(\"index.html\"), 200",
"def test_html_output(self):\n pass",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_gettesttools_html(self):\n pass",
"def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))",
"def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def test_cms_plugins_htmlsitemap_no_root_page(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_error_html_using_get(self):\n pass",
"def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(\n b'<h1>42 Coffee Cups Test Assignment</h1>',\n response.content)",
"def test_get_services_html(self):\n pass",
"def test_root(self):\n\n with self.client:\n result = self.client.get('/', follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)",
"def test_cms_plugins_htmlsitemap_root_page_include(self):\n _root, parent_page, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=parent_page,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def testHTML(self):\n\n html = self.E.html()",
"def test_get_monitor_content_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.data, \"OK\")",
"def root():\n return render_template('root.html')",
"def get_html(self):\r\n return u'This is supposed to be test html.'",
"def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)",
"def test_htmldir(self):\n self.chck_triple('htmldir')",
"def test_cms_plugins_htmlsitemap_root_page_exclude(self):\n _root, parent_page, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=parent_page,\n include_root_page=False,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </div>\n \"\"\",\n )"
] | [
"0.9208287",
"0.9183189",
"0.8821761",
"0.71576107",
"0.71576107",
"0.69408536",
"0.67095774",
"0.66896033",
"0.66644514",
"0.6587587",
"0.6564367",
"0.6535696",
"0.65011555",
"0.64619464",
"0.64505744",
"0.64086664",
"0.63938594",
"0.6381569",
"0.6348248",
"0.6347358",
"0.63284767",
"0.6315148",
"0.63002574",
"0.6254653",
"0.6197712",
"0.61934793",
"0.6190255",
"0.6189446",
"0.6140963",
"0.61223984"
] | 0.93789506 | 0 |
Test case for get_root_html1 | def test_get_root_html1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_root_html2(self):\n pass",
"def test_get_root_html3(self):\n pass",
"def test_get_root_html(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_layout_root(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SpaceRocks' in html.find(\"title\").text",
"def get_root():\r\n return render_template(\"index.html\"), 200",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_html_output(self):\n pass",
"def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(\n b'<h1>42 Coffee Cups Test Assignment</h1>',\n response.content)",
"def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))",
"def test_gettesttools_html(self):\n pass",
"def test_root(self):\n\n with self.client:\n result = self.client.get('/', follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)",
"def testHTML(self):\n\n html = self.E.html()",
"def root():\n return render_template('root.html')",
"def get_root(self) -> object:",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)",
"def test_tester01(self):\n result = self.init_test_app().get('/tester')\n self.assertEqual(result.data[0:33],\n b'<!DOCTYPE html>\\n<html lang=\"en\">\\n')",
"def test_error_html_using_get(self):\n pass",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def test_get_services_html(self):\n pass",
"def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)",
"def test_cms_plugins_htmlsitemap_root_page_include(self):\n _root, parent_page, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=parent_page,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_cms_plugins_htmlsitemap_no_root_page(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_index_layout(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SPACE' in html.find(\"h1\").text",
"def test_htmldir(self):\n self.chck_triple('htmldir')",
"def test_no_breadcrumbs(app: Sphinx) -> None:\n app.build()\n tree = parse_html(app.outdir / \"another.html\")\n nav = tree(\"nav\", attrs={\"aria-label\": \"breadcrumbs\"})\n assert len(nav) == 0"
] | [
"0.91706455",
"0.89792466",
"0.89171356",
"0.69257957",
"0.69257957",
"0.6632274",
"0.64663285",
"0.6308058",
"0.6271014",
"0.6236074",
"0.61630845",
"0.614734",
"0.61308604",
"0.6107725",
"0.6089825",
"0.60671157",
"0.6037479",
"0.6028422",
"0.60110754",
"0.60108376",
"0.59631133",
"0.59074646",
"0.5903833",
"0.590282",
"0.588848",
"0.5885315",
"0.5878052",
"0.585882",
"0.5800073",
"0.57940155"
] | 0.93548894 | 0 |
Test case for get_root_html2 | def test_get_root_html2(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_root_html1(self):\n pass",
"def test_get_root_html3(self):\n pass",
"def test_get_root_html(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_html_output(self):\n pass",
"def test_gettesttools_html(self):\n pass",
"def test_layout_root(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SpaceRocks' in html.find(\"title\").text",
"def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")",
"def get_root():\r\n return render_template(\"index.html\"), 200",
"def test_error_html_using_get(self):\n pass",
"def test_get_from_html(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"nested_folder\",\n \"another.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_nested_method()\n\n expected = textwrap.dedent(\n '''\\\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter",
"def test_get_services_html(self):\n pass",
"def get_root(self) -> object:",
"def testHTML(self):\n\n html = self.E.html()",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def get_html(self):\r\n pass",
"def test_cms_plugins_htmlsitemap_no_root_page(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_cms_plugins_htmlsitemap_root_page_include_max_depth(self):\n root_page, _parent, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=root_page,\n max_depth=2,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a></li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_parseHtml(self):\n dom = lunchr.parseHtml(self.html)\n self.assertTrue(isinstance(dom, xml.dom.minidom.Document))",
"def test_cms_plugins_htmlsitemap_no_root_page_max_depth(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n max_depth=2,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a></li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_cms_plugins_htmlsitemap_root_page_include(self):\n _root, parent_page, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=parent_page,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)",
"def test_htmldir(self):\n self.chck_triple('htmldir')",
"def test_no_breadcrumbs(app: Sphinx) -> None:\n app.build()\n tree = parse_html(app.outdir / \"another.html\")\n nav = tree(\"nav\", attrs={\"aria-label\": \"breadcrumbs\"})\n assert len(nav) == 0",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))"
] | [
"0.9036053",
"0.8845179",
"0.8749008",
"0.67276555",
"0.67276555",
"0.6346344",
"0.62273747",
"0.6178673",
"0.6021169",
"0.5989284",
"0.5977305",
"0.59632355",
"0.59604466",
"0.5943587",
"0.5923321",
"0.5895129",
"0.5886565",
"0.5867366",
"0.5842231",
"0.58347493",
"0.5797675",
"0.57816315",
"0.5778594",
"0.57511175",
"0.5725702",
"0.5721901",
"0.567661",
"0.5671178",
"0.56478894",
"0.5637802"
] | 0.93711907 | 0 |
Test case for get_root_html3 | def test_get_root_html3(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_root_html1(self):\n pass",
"def test_get_root_html2(self):\n pass",
"def test_get_root_html(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_layout_root(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SpaceRocks' in html.find(\"title\").text",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def get_root(self) -> object:",
"def get_root():\r\n return render_template(\"index.html\"), 200",
"def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_html_output(self):\n pass",
"def test_root(self):\n\n with self.client:\n result = self.client.get('/', follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)",
"def test_htmldir(self):\n self.chck_triple('htmldir')",
"def test_validate_wc3(self):\r\n assert self.wc2_tree != 0",
"def test_cms_plugins_htmlsitemap_root_page_include(self):\n _root, parent_page, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=parent_page,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def root():\n return render_template('root.html')",
"def test_si_sample_html_partial(self):\n sample = load_sample('si-game.sample.html')\n doc = Document('http://sportsillustrated.cnn.com/baseball/mlb/gameflash/2012/04/16/40630_preview.html',\n sample)\n res = doc.get_clean_article()\n self.assertEqual('<div><div class=\"', res[0:17])",
"def test_error_html_using_get(self):\n pass",
"def test_gettesttools_html(self):\n pass",
"def url_root():\n return \"OK\"",
"def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))",
"def get_root(self):\n return self._root",
"def test_cms_plugins_htmlsitemap_root_page_include_max_depth(self):\n root_page, _parent, _page = self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n root_page=root_page,\n max_depth=2,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a></li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n </ul>\n </div>\n \"\"\",\n )",
"def testHTML(self):\n\n html = self.E.html()",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()",
"def get_html_local():\n import os\n TESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'html.html')\n with open(TESTDATA_FILENAME, 'r') as html_file:\n testdata = html_file.read()\n return testdata",
"def test_cms_plugins_htmlsitemap_no_root_page(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n placeholder = Placeholder.objects.create(slot=\"maincontent\")\n page.placeholders.add(placeholder)\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)"
] | [
"0.8423391",
"0.83745444",
"0.80464387",
"0.6529371",
"0.6529371",
"0.6125781",
"0.584347",
"0.58388925",
"0.5734809",
"0.572631",
"0.55734205",
"0.55210054",
"0.54271966",
"0.541828",
"0.54173034",
"0.53728527",
"0.535391",
"0.53508776",
"0.53465164",
"0.533352",
"0.5310434",
"0.53053254",
"0.5288737",
"0.52736014",
"0.5247925",
"0.52421945",
"0.518635",
"0.5160071",
"0.51593095",
"0.51584893"
] | 0.93749547 | 0 |
Test case for get_scenario | def test_get_scenario(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_scenarios(self):\n pass",
"def test_create_scenario(self):\n pass",
"def test_create_scenario1(self):\n pass",
"def _run_scenario(self, cls, method_name, context, args, config):",
"def run_scenario(self, run, run_id):\n\n raise NotImplementedError",
"def test_scenario(self):\n scenario_ids = list(scenarios.get_scenarios().keys())\n\n for scenario_id in scenario_ids:\n url = reverse('workbench_show_scenario', kwargs={'scenario_id': scenario_id})\n client = Client()\n response = client.get(url, follow=True)\n assert response.status_code == 200, scenario_id\n\n # Be sure we got the whole scenario. Again, we can't know what to expect\n # here, but at the very least, if there are verticals, they should not be\n # empty. That would be a sign that some data wasn't loaded properly while\n # rendering the scenario.\n html = lxml.html.fromstring(response.content)\n for vertical_tag in html.xpath('//div[@class=\"vertical\"]'):\n # No vertical tag should be empty.\n assert list(vertical_tag), u\"Scenario {}: Empty <vertical> shouldn't happen!\".format(scenario_id)",
"def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"",
"def test_2nd_scenario():\n start_entered_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"",
"def test_get_scenarios_expanded(self):\n pass",
"def test_pytest_bdd_scenario_with_failed_step(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == -1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[3].name == \"then\"\n assert spans[3].get_tag(ERROR_MSG)",
"def test_get_run(self):\n pass",
"def test_pytest_bdd_scenario_with_parameters(self):\n self.testdir.makefile(\n \".feature\",\n parameters=\"\"\"\n Feature: Parameters\n Scenario: Passing scenario\n Given I have 0 bars\n When I eat it\n Then I have -1 bars\n\n Scenario: Failing scenario\n Given I have 2 bars\n When I eat it\n Then I have 0 bar\n\n Scenario: Failing converter\n Given I have no bar\n \"\"\",\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenarios, given, then, when, parsers\n\n scenarios(\"parameters.feature\")\n\n BAR = None\n\n @given(parsers.re(\"^I have (?P<bars>[^ ]+) bar$\")) # loose regex\n def have_simple(bars):\n global BAR\n BAR = bars\n\n @given(parsers.re(\"^I have (?P<bars>\\\\d+) bars$\"), converters=dict(bars=int))\n def have(bars):\n global BAR\n BAR = bars\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(parsers.parse(\"I have {bars:d} bar\"))\n def check_parse(bars):\n assert BAR == bars\n\n @then(parsers.cfparse(\"I have {bars:d} bars\"))\n def check_cfparse(bars):\n assert BAR == bars\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 13 # 3 scenarios + 7 steps + 1 module\n assert json.loads(spans[1].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[3].get_tag(test.PARAMETERS)) == {\"bars\": -1}\n assert json.loads(spans[5].get_tag(test.PARAMETERS)) == {\"bars\": 2}\n assert json.loads(spans[7].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[9].get_tag(test.PARAMETERS)) == {\"bars\": \"no\"}",
"def test_virtualmachineconsole_scenario1(self):\n call_scenario1(self)",
"def test_feature_get_scenario_as_item(scenario_sentences, needle_scenario, expected_scenario, mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add Scenarios to Feature\n for sentence in scenario_sentences:\n feature.scenarios.append(mocker.MagicMock(sentence=sentence))\n\n # when\n actual_scenario = feature[needle_scenario]\n\n # then\n if expected_scenario is None:\n assert actual_scenario is None\n else:\n assert actual_scenario.sentence == expected_scenario",
"def test_get_goal(self):\n pass",
"def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])",
"def test_get_game(self):\n pass",
"def __call__(self):\n yaml_dict = self._val\n scenario = yaml_dict.get(\"scenario\")\n if not scenario:\n raise ScenarioFileInvalid(\n \"scenario.yml is invalid. 'scenario:' key does not exist, or 'scenario:' key exists but content under 'scenario:' key does not exist.\" # noqa\n )",
"def run_test(scenario_data):\r\n# Random choice between \"successful login\" and \"login failed\", which should simulate an a \"run_test\" test.\r\n\ttest_result = random.choice([\"successful login\", \"login failed\"])\r\n\treturn test_result",
"def scenario(self):\n scenario = []\n for action in self.application_tree['scenario']:\n scenario.append(action)\n return scenario",
"def describe_scenario(self, scenario, show_status=None):\n assert isinstance(scenario, model.Scenario)\n # -- OPEN ISSUE: Does this work w/ ScenarioOutline, too ?!?\n if show_status is None:\n show_status = self.show_status\n prefix = self.prefix\n text = u'{0}{1}: {2}\\n'.format(prefix, scenario.keyword, scenario.name)\n prefix += u' '\n for step in scenario.all_steps:\n text += self.describe_step(step, prefix=prefix,\n show_status=show_status)\n return text",
"def test_get(self):\n self.assertEqual(self.tester.get('SEASON_ENVIRONMENT'), 'winter')\n self.assertEqual(self.tester.get('depth'), 0.15)",
"def TestOneStep(self):\n pass",
"def test_all_scenarios(self):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 200\n html = lxml.html.fromstring(response.content)\n a_tags = list(html.xpath('//a'))\n\n # Load the loaded_scenarios from the classes.\n loaded_scenarios = list(scenarios.get_scenarios().values())\n\n # We should have an <a> tag for each scenario.\n assert_equals(len(a_tags), len(loaded_scenarios))\n\n # We should have at least one scenario with a vertical tag, since we use\n # empty verticals as our canary in the coal mine that something has gone\n # horribly wrong with loading the loaded_scenarios.\n assert any(\"<vertical_demo>\" in scen.xml for scen in loaded_scenarios)\n\n # Since we are claiming in try_scenario that no vertical is empty, let's\n # eliminate the possibility that a scenario has an actual empty vertical.\n assert all(\"<vertical_demo></vertical_demo>\" not in scen.xml for scen in loaded_scenarios)\n assert all(\"<vertical_demo/>\" not in scen.xml for scen in loaded_scenarios)",
"def test_get_waivers(league):\n pass",
"def create_scenarios(self, params, num_scenarios, random_seed):\n return None",
"def run_scenario(self):\n self.initialize_random_map()\n self.visualize_environment('initial')\n self.get_tower_target_coverages()\n self.solve_environment()\n self.visualize_environment('solved')",
"def test_pytest_bdd_with_missing_step_implementation(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n assert spans[0].get_tag(ERROR_MSG)",
"def get_scenarios(experiments):\n return {exp.scenario for exp in experiments}",
"def pytest_cmdline_main(config):\n try:\n if len(config.option.scenarios) == 0:\n print(\"Available scenarios:\")\n for scenario in Scenario.scenarios.values():\n print(f\" {scenario.name} - {scenario.description}\")\n return 0\n except:\n pass\n\n return None"
] | [
"0.816424",
"0.76843756",
"0.7293048",
"0.71587723",
"0.689421",
"0.67790264",
"0.66742986",
"0.6638238",
"0.65570897",
"0.6419138",
"0.6400768",
"0.6324439",
"0.63148576",
"0.62755543",
"0.62335944",
"0.61972404",
"0.617291",
"0.6166616",
"0.61639094",
"0.61572576",
"0.60758764",
"0.6072418",
"0.59774965",
"0.5974758",
"0.5897099",
"0.58965164",
"0.5894384",
"0.58932513",
"0.58813894",
"0.5864217"
] | 0.9188539 | 0 |
Test case for get_scenarios | def test_get_scenarios(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_scenario(self):\n pass",
"def test_get_scenarios_expanded(self):\n pass",
"def test_create_scenario(self):\n pass",
"def create_scenarios(self, params, num_scenarios, random_seed):\n return None",
"def test_create_scenario1(self):\n pass",
"def test_all_scenarios(self):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 200\n html = lxml.html.fromstring(response.content)\n a_tags = list(html.xpath('//a'))\n\n # Load the loaded_scenarios from the classes.\n loaded_scenarios = list(scenarios.get_scenarios().values())\n\n # We should have an <a> tag for each scenario.\n assert_equals(len(a_tags), len(loaded_scenarios))\n\n # We should have at least one scenario with a vertical tag, since we use\n # empty verticals as our canary in the coal mine that something has gone\n # horribly wrong with loading the loaded_scenarios.\n assert any(\"<vertical_demo>\" in scen.xml for scen in loaded_scenarios)\n\n # Since we are claiming in try_scenario that no vertical is empty, let's\n # eliminate the possibility that a scenario has an actual empty vertical.\n assert all(\"<vertical_demo></vertical_demo>\" not in scen.xml for scen in loaded_scenarios)\n assert all(\"<vertical_demo/>\" not in scen.xml for scen in loaded_scenarios)",
"def test_feature_all_scenarios(mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add regular Scenarios to Feature\n feature.scenarios.extend([mocker.MagicMock(id=1), mocker.MagicMock(id=2)])\n # add Scenario Outline to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioOutline,\n id=3,\n scenarios=[mocker.MagicMock(id=4), mocker.MagicMock(id=5)],\n )\n )\n # add Scenario Loop to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioLoop,\n id=6,\n scenarios=[mocker.MagicMock(id=7), mocker.MagicMock(id=8)],\n )\n )\n\n # when\n all_scenarios = feature.all_scenarios\n\n # then\n assert len(all_scenarios) == 8\n assert all_scenarios[0].id == 1\n assert all_scenarios[1].id == 2\n assert all_scenarios[2].id == 3\n assert all_scenarios[3].id == 4\n assert all_scenarios[4].id == 5\n assert all_scenarios[5].id == 6\n assert all_scenarios[6].id == 7\n assert all_scenarios[7].id == 8",
"def test_scenario(self):\n scenario_ids = list(scenarios.get_scenarios().keys())\n\n for scenario_id in scenario_ids:\n url = reverse('workbench_show_scenario', kwargs={'scenario_id': scenario_id})\n client = Client()\n response = client.get(url, follow=True)\n assert response.status_code == 200, scenario_id\n\n # Be sure we got the whole scenario. Again, we can't know what to expect\n # here, but at the very least, if there are verticals, they should not be\n # empty. That would be a sign that some data wasn't loaded properly while\n # rendering the scenario.\n html = lxml.html.fromstring(response.content)\n for vertical_tag in html.xpath('//div[@class=\"vertical\"]'):\n # No vertical tag should be empty.\n assert list(vertical_tag), u\"Scenario {}: Empty <vertical> shouldn't happen!\".format(scenario_id)",
"def test_get_run(self):\n pass",
"def get_scenarios(experiments):\n return {exp.scenario for exp in experiments}",
"def tests():",
"def _run_scenario(self, cls, method_name, context, args, config):",
"def getTestResults():",
"def test_feature_get_scenario_as_item(scenario_sentences, needle_scenario, expected_scenario, mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add Scenarios to Feature\n for sentence in scenario_sentences:\n feature.scenarios.append(mocker.MagicMock(sentence=sentence))\n\n # when\n actual_scenario = feature[needle_scenario]\n\n # then\n if expected_scenario is None:\n assert actual_scenario is None\n else:\n assert actual_scenario.sentence == expected_scenario",
"def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])",
"def test_workflows_get(self):\n pass",
"def test_workflows_get(self):\n pass",
"def test_get_goals(self):\n pass",
"def workbench_scenarios():\n\t\treturn [(\"TCXBlock\",\n\t\t\"\"\"\n\t\t<tournamentcreator/>\n\t\t\"\"\"),\n\t\t]",
"def test_list_runs(self):\n pass",
"def pytest_generate_tests(metafunc):\n\n # test is setup or teardown - parametrize to all scenarios\n if metafunc.function.__name__ in [\"test_setup\", \"test_teardown\"]:\n metafunc.parametrize(\n \"scenario\", Scenario.scenarios.values())\n\n # parameterize test for each scenario it is included in\n else:\n metafunc.parametrize(\n \"scenario\", metafunc.cls._scenarios)",
"def test_pytest_bdd_scenario_with_parameters(self):\n self.testdir.makefile(\n \".feature\",\n parameters=\"\"\"\n Feature: Parameters\n Scenario: Passing scenario\n Given I have 0 bars\n When I eat it\n Then I have -1 bars\n\n Scenario: Failing scenario\n Given I have 2 bars\n When I eat it\n Then I have 0 bar\n\n Scenario: Failing converter\n Given I have no bar\n \"\"\",\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenarios, given, then, when, parsers\n\n scenarios(\"parameters.feature\")\n\n BAR = None\n\n @given(parsers.re(\"^I have (?P<bars>[^ ]+) bar$\")) # loose regex\n def have_simple(bars):\n global BAR\n BAR = bars\n\n @given(parsers.re(\"^I have (?P<bars>\\\\d+) bars$\"), converters=dict(bars=int))\n def have(bars):\n global BAR\n BAR = bars\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(parsers.parse(\"I have {bars:d} bar\"))\n def check_parse(bars):\n assert BAR == bars\n\n @then(parsers.cfparse(\"I have {bars:d} bars\"))\n def check_cfparse(bars):\n assert BAR == bars\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 13 # 3 scenarios + 7 steps + 1 module\n assert json.loads(spans[1].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[3].get_tag(test.PARAMETERS)) == {\"bars\": -1}\n assert json.loads(spans[5].get_tag(test.PARAMETERS)) == {\"bars\": 2}\n assert json.loads(spans[7].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[9].get_tag(test.PARAMETERS)) == {\"bars\": \"no\"}",
"def test_get_results(self):\n pass",
"def runtest(self):",
"def test_generate_all_testing(self):\n pass",
"def runTests(self):\n \n pass",
"def TestOneStep(self):\n pass",
"def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"",
"def spec_tests():\n pass",
"def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))"
] | [
"0.84538424",
"0.78753704",
"0.72910845",
"0.7237869",
"0.7178813",
"0.7143964",
"0.698214",
"0.6823275",
"0.67113143",
"0.66249156",
"0.6610945",
"0.6498361",
"0.6365719",
"0.634012",
"0.633731",
"0.62387687",
"0.62387687",
"0.62383527",
"0.62377405",
"0.6193853",
"0.6179586",
"0.6172407",
"0.61593455",
"0.6132078",
"0.6126963",
"0.6111624",
"0.6108071",
"0.6106658",
"0.61026835",
"0.6102027"
] | 0.9397413 | 0 |
Test case for get_scenarios_expanded | def test_get_scenarios_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_scenarios(self):\n pass",
"def test_get_scenario(self):\n pass",
"def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])",
"def test_feature_all_scenarios(mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add regular Scenarios to Feature\n feature.scenarios.extend([mocker.MagicMock(id=1), mocker.MagicMock(id=2)])\n # add Scenario Outline to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioOutline,\n id=3,\n scenarios=[mocker.MagicMock(id=4), mocker.MagicMock(id=5)],\n )\n )\n # add Scenario Loop to Feature\n feature.scenarios.append(\n mocker.MagicMock(\n spec=ScenarioLoop,\n id=6,\n scenarios=[mocker.MagicMock(id=7), mocker.MagicMock(id=8)],\n )\n )\n\n # when\n all_scenarios = feature.all_scenarios\n\n # then\n assert len(all_scenarios) == 8\n assert all_scenarios[0].id == 1\n assert all_scenarios[1].id == 2\n assert all_scenarios[2].id == 3\n assert all_scenarios[3].id == 4\n assert all_scenarios[4].id == 5\n assert all_scenarios[5].id == 6\n assert all_scenarios[6].id == 7\n assert all_scenarios[7].id == 8",
"def test_get_projects_expanded(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def workbench_scenarios():\n return [\n (\"SummaryXBlock\",\n \"\"\"<summary/>\n \"\"\"),\n (\"Multiple SummaryXBlock\",\n \"\"\"<vertical_demo>\n <summary/>\n <summary/>\n <summary/>\n </vertical_demo>\n \"\"\"),\n ]",
"def test_all_scenarios(self):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 200\n html = lxml.html.fromstring(response.content)\n a_tags = list(html.xpath('//a'))\n\n # Load the loaded_scenarios from the classes.\n loaded_scenarios = list(scenarios.get_scenarios().values())\n\n # We should have an <a> tag for each scenario.\n assert_equals(len(a_tags), len(loaded_scenarios))\n\n # We should have at least one scenario with a vertical tag, since we use\n # empty verticals as our canary in the coal mine that something has gone\n # horribly wrong with loading the loaded_scenarios.\n assert any(\"<vertical_demo>\" in scen.xml for scen in loaded_scenarios)\n\n # Since we are claiming in try_scenario that no vertical is empty, let's\n # eliminate the possibility that a scenario has an actual empty vertical.\n assert all(\"<vertical_demo></vertical_demo>\" not in scen.xml for scen in loaded_scenarios)\n assert all(\"<vertical_demo/>\" not in scen.xml for scen in loaded_scenarios)",
"def workbench_scenarios(): \n return [\n (\"TermsXBlock\",\n \"\"\"<terms/>\n \"\"\"),\n (\"Multiple TermsXBlock\",\n \"\"\"<vertical_demo>\n <terms/>\n <terms/>\n <terms/>\n </vertical_demo>\n \"\"\"),\n ]",
"def workbench_scenarios():\n return [\n (\"HL rubric text XBlock\",\n \"\"\"<hl_rubric_text/>\n \"\"\"),\n\n ]",
"def get_scenarios(experiments):\n return {exp.scenario for exp in experiments}",
"def test_get_analyzed_recipe_instructions(self):\n pass",
"def workbench_scenarios():\n return [\n (\"MyXBlock\",\n \"\"\"<myxblock/>\n \"\"\"),\n (\"Multiple MyXBlock\",\n \"\"\"<vertical_demo>\n <myxblock/>\n <myxblock/>\n <myxblock/>\n </vertical_demo>\n \"\"\"),\n ]",
"def test_analyze_recipe_instructions(self):\n pass",
"def workbench_scenarios():\n return [\n (\"simstudentXBlock\",\n \"\"\"<vertical_demo>\n <simstudent/>\n </vertical_demo>\n \"\"\"),\n ]",
"def test_get_systems_expanded(self):\n pass",
"def create_scenarios(self, params, num_scenarios, random_seed):\n return None",
"def test_scenario(self):\n scenario_ids = list(scenarios.get_scenarios().keys())\n\n for scenario_id in scenario_ids:\n url = reverse('workbench_show_scenario', kwargs={'scenario_id': scenario_id})\n client = Client()\n response = client.get(url, follow=True)\n assert response.status_code == 200, scenario_id\n\n # Be sure we got the whole scenario. Again, we can't know what to expect\n # here, but at the very least, if there are verticals, they should not be\n # empty. That would be a sign that some data wasn't loaded properly while\n # rendering the scenario.\n html = lxml.html.fromstring(response.content)\n for vertical_tag in html.xpath('//div[@class=\"vertical\"]'):\n # No vertical tag should be empty.\n assert list(vertical_tag), u\"Scenario {}: Empty <vertical> shouldn't happen!\".format(scenario_id)",
"def test_create_scenario1(self):\n pass",
"def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]",
"def workbench_scenarios():\n return [\n (\"Oppia Embedding\",\n \"\"\"<vertical_demo>\n <oppia oppiaid=\"0\" src=\"https://www.oppia.org\" width=\"700\" />\n </vertical_demo>\n \"\"\"),\n ]",
"def workbench_scenarios():\n return [\n (\"filethumbs\",\n \"\"\"\\\n <vertical_demo>\n <filethumbs/>\n <filethumbs/>\n <filethumbs/>\n </vertical_demo>\n \"\"\")\n ]",
"def test_expand_experiments():\n template_script = get_template_script()\n experiment_systems = utils.CombinatorialLeaf(['explicit-system', 'implicit-system', 'hydration-system'])\n template_script['experiments']['system'] = experiment_systems\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=1, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 2\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=2, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 1",
"def workbench_scenarios():\n return [\n (\"KVXBlock\",\n \"\"\"<vertical_demo>\n <kvxblock/>\n </vertical_demo>\n \"\"\"),\n ]",
"def workbench_scenarios():\n return [\n (\"QnetXBlock\",\n \"\"\"<qnet/>\n \"\"\"),\n (\"Multiple QnetXBlock\",\n \"\"\"<vertical_demo>\n <qnet/>\n <qnet/>\n <qnet/>\n </vertical_demo>\n \"\"\"),\n ]",
"def test_pytest_bdd_scenario_with_parameters(self):\n self.testdir.makefile(\n \".feature\",\n parameters=\"\"\"\n Feature: Parameters\n Scenario: Passing scenario\n Given I have 0 bars\n When I eat it\n Then I have -1 bars\n\n Scenario: Failing scenario\n Given I have 2 bars\n When I eat it\n Then I have 0 bar\n\n Scenario: Failing converter\n Given I have no bar\n \"\"\",\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenarios, given, then, when, parsers\n\n scenarios(\"parameters.feature\")\n\n BAR = None\n\n @given(parsers.re(\"^I have (?P<bars>[^ ]+) bar$\")) # loose regex\n def have_simple(bars):\n global BAR\n BAR = bars\n\n @given(parsers.re(\"^I have (?P<bars>\\\\d+) bars$\"), converters=dict(bars=int))\n def have(bars):\n global BAR\n BAR = bars\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(parsers.parse(\"I have {bars:d} bar\"))\n def check_parse(bars):\n assert BAR == bars\n\n @then(parsers.cfparse(\"I have {bars:d} bars\"))\n def check_cfparse(bars):\n assert BAR == bars\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 13 # 3 scenarios + 7 steps + 1 module\n assert json.loads(spans[1].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[3].get_tag(test.PARAMETERS)) == {\"bars\": -1}\n assert json.loads(spans[5].get_tag(test.PARAMETERS)) == {\"bars\": 2}\n assert json.loads(spans[7].get_tag(test.PARAMETERS)) == {\"bars\": 0}\n assert json.loads(spans[9].get_tag(test.PARAMETERS)) == {\"bars\": \"no\"}",
"def expand_tasks_with_samples( # pylint: disable=R0913,R0914\n self,\n dag,\n chain_,\n samples,\n labels,\n task_type,\n adapter_config,\n level_max_dirs,\n):\n LOG.debug(f\"expand_tasks_with_samples called with chain,{chain_}\\n\")\n # Figure out how many directories there are, make a glob string\n directory_sizes = uniform_directories(len(samples), bundle_size=1, level_max_dirs=level_max_dirs)\n\n glob_path = \"*/\" * len(directory_sizes)\n\n LOG.debug(\"creating sample_index\")\n # Write a hierarchy to get the all paths string\n sample_index = create_hierarchy(\n len(samples),\n bundle_size=1,\n directory_sizes=directory_sizes,\n root=\"\",\n n_digits=len(str(level_max_dirs)),\n )\n\n LOG.debug(\"creating sample_paths\")\n sample_paths = sample_index.make_directory_string()\n\n LOG.debug(\"assembling steps\")\n # the steps in the chain\n steps = [dag.step(name) for name in chain_]\n\n # sub in globs prior to expansion\n # sub the glob command\n steps = [\n step.clone_changing_workspace_and_cmd(cmd_replacement_pairs=parameter_substitutions_for_cmd(glob_path, sample_paths))\n for step in steps\n ]\n\n # workspaces = [step.get_workspace() for step in steps]\n # LOG.debug(f\"workspaces : {workspaces}\")\n\n needs_expansion = is_chain_expandable(steps, labels)\n\n LOG.debug(f\"needs_expansion {needs_expansion}\")\n\n if needs_expansion:\n # prepare_chain_workspace(sample_index, steps)\n sample_index.name = \"\"\n LOG.debug(\"queuing merlin expansion tasks\")\n found_tasks = False\n conditions = [\n lambda c: c.is_great_grandparent_of_leaf,\n lambda c: c.is_grandparent_of_leaf,\n lambda c: c.is_parent_of_leaf,\n lambda c: c.is_leaf,\n ]\n for condition in conditions:\n if not found_tasks:\n for next_index_path, next_index in sample_index.traverse(conditional=condition):\n LOG.info(\n f\"generating next step for range {next_index.min}:{next_index.max} {next_index.max-next_index.min}\"\n )\n next_index.name = next_index_path\n\n sig = add_merlin_expanded_chain_to_chord.s(\n task_type,\n steps,\n samples[next_index.min : next_index.max],\n labels,\n next_index,\n adapter_config,\n next_index.min,\n )\n sig.set(queue=steps[0].get_task_queue())\n\n if self.request.is_eager:\n sig.delay()\n else:\n LOG.info(f\"queuing expansion task {next_index.min}:{next_index.max}\")\n self.add_to_chord(sig, lazy=False)\n LOG.info(f\"merlin expansion task {next_index.min}:{next_index.max} queued\")\n found_tasks = True\n else:\n LOG.debug(\"queuing simple chain task\")\n add_simple_chain_to_chord(self, task_type, steps, adapter_config)\n LOG.debug(\"simple chain task queued\")",
"def test_summarize_recipe(self):\n pass",
"def test_make_macrobes(self):\n basic_test_runner(self, 'macrobes')",
"def test_feature_get_scenario_as_item(scenario_sentences, needle_scenario, expected_scenario, mocker):\n # given\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n # add Scenarios to Feature\n for sentence in scenario_sentences:\n feature.scenarios.append(mocker.MagicMock(sentence=sentence))\n\n # when\n actual_scenario = feature[needle_scenario]\n\n # then\n if expected_scenario is None:\n assert actual_scenario is None\n else:\n assert actual_scenario.sentence == expected_scenario"
] | [
"0.7543995",
"0.64511335",
"0.6390361",
"0.6379775",
"0.6358714",
"0.6279671",
"0.6239349",
"0.6005463",
"0.58541197",
"0.58301437",
"0.5812175",
"0.5786495",
"0.57417274",
"0.57320136",
"0.57243",
"0.57126313",
"0.57055986",
"0.56548434",
"0.56532776",
"0.5651143",
"0.56251967",
"0.56123245",
"0.56119525",
"0.56082344",
"0.5607833",
"0.55979455",
"0.55876166",
"0.55833876",
"0.5581108",
"0.55704594"
] | 0.9469915 | 0 |
Test case for get_service_string | def test_get_service_string(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_service(self):",
"def getServiceName(self) -> str:\n ...",
"def test_get_virtual_service(self):\n pass",
"def test_virtualservice_get(self):\n pass",
"def _get_service_type(service):\n\n return service.split(':')[3]",
"def test_unknown_service(self):\n raise NotImplementedError # FIXME",
"def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service",
"def test_get_services_html(self):\n pass",
"def get_ExistingTestServiceInfo(test_case, # type: AnyMagpieTestCaseType\n override_service_name=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n svc_name = override_service_name if override_service_name is not null else test_case.test_service_name\n path = \"/services/{svc}\".format(svc=svc_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n json_body = get_json_body(resp)\n svc_getter = \"service\"\n if TestVersion(test_case.version) < TestVersion(\"0.9.1\"):\n svc_getter = svc_name\n return json_body[svc_getter]",
"def service(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service\")",
"def test_service(self):\n report = self.api.ping()\n self.assertTrue(\"name\" in report)\n self.assertTrue(\"version\" in report)\n\n pkg = pkg_resources.get_distribution(\"pp-jsonpcrud-service\")\n\n self.assertEquals(report['name'], 'pp-jsonpcrud-service')\n self.assertEquals(report['version'], pkg.version)\n\n rc = self.api.do(\"get\")\n self.assertEquals(rc['msg'], \"GET: OK\")\n\n rc = self.api.do(\"post\", a=1)\n self.assertEquals(rc['msg'], \"POST: OK\")\n\n rc = self.api.do(\"put\", a=1)\n self.assertEquals(rc['msg'], \"PUT: OK\")\n\n rc = self.api.do(\"delete\", a=1)\n self.assertEquals(rc['msg'], \"DELETE: OK\")",
"def test_with_service_name(self):\n exc = RemoteIntegrationException(\n u\"Unreliable Service\", \n u\"I just can't handle your request right now.\"\n )\n\n # Since only the service name is provided, there are no details to\n # elide in the non-debug version of a problem detail document.\n debug_detail = exc.document_detail(debug=True)\n other_detail = exc.document_detail(debug=False)\n eq_(debug_detail, other_detail)\n\n eq_(u'The server tried to access Unreliable Service but the third-party service experienced an error.',\n debug_detail\n )",
"def get_from_service(self, service_name: str, key: str) -> Any:\n service_list = [\n self.data[\"services\"][s] for s in self.services if service_name.lower() == s\n ]\n if service_list:\n service = service_list[0]\n return service.get(key, None)",
"def service(self) -> str:\n return pulumi.get(self, \"service\")",
"def get_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover",
"def find_service(iface, context, name):",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def test_service_api_get(service_app):\n response = service_app.get('/')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert json.loads(response.data) == {'description': 'service is up', 'status': 200}",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def _get_service_version(service):\n\n return int(service.split(':')[4])",
"def get_service_name(service, rem):\n flavor = rem.os.package_type\n try:\n return _SERVICE_MAP[service][flavor]\n except KeyError:\n return None",
"def YumGetServiceName(vm):\n raise NotImplementedError",
"def testServiceGet(self):\n\n text = \"This is a test sentence. And another sentence to split.\"\n results = self.client.post(\"workflow\", json={\"name\": \"get\", \"elements\": [text]}).json()\n\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0]), 1)",
"def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")"
] | [
"0.70154655",
"0.7003743",
"0.664822",
"0.65895396",
"0.64724195",
"0.6270259",
"0.6215211",
"0.61835694",
"0.6146643",
"0.6134932",
"0.61176986",
"0.606586",
"0.6028957",
"0.6024573",
"0.60152435",
"0.5995219",
"0.59795713",
"0.59795713",
"0.59795713",
"0.5965451",
"0.59580064",
"0.5921444",
"0.5921444",
"0.5921444",
"0.5919314",
"0.59148073",
"0.58677936",
"0.584601",
"0.58292544",
"0.58292544"
] | 0.9333787 | 0 |
Test case for get_services_html | def test_get_services_html(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gettesttools_html(self):\n pass",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def test_get_service_string(self):\n pass",
"def test_error_html_using_get(self):\n pass",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'annotation_storage', 'token', 'tag', 'openseadragonjson']:\r\n self.assertIn(key, context)",
"def test_get_root_html(self):\n pass",
"def test_html_output(self):\n pass",
"def get_html(self):\r\n pass",
"def test_mocked_get_simpleHtml(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_get\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>value</th><td>testValue</td></tr></table>', response.content)",
"def testHTML(self):\n\n html = self.E.html()",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token']:\r\n self.assertIn(key, context)",
"def test_get_root_html2(self):\n pass",
"def test_get_from_html(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_method()\n\n expected = textwrap.dedent(\n '''\\\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter",
"def test_get_root_html1(self):\n pass",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage']:\r\n self.assertIn(key, context)",
"def test_virtualservice_get(self):\n pass",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)",
"def test_get_publish_html(self):\n response = self.setup_get_html_test('/api/publish')\n self.assertEqual(response.status_code, 200)",
"def test_get_from_html(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"nested_folder\",\n \"another.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_nested_method()\n\n expected = textwrap.dedent(\n '''\\\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter",
"def service(request):\n\treturn render(request,'service.html',None)",
"def test_get_monitor_content_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.data, \"OK\")",
"def test_export_html(self):\r\n resp = self.client.get_html(self.url)\r\n self.assertEquals(resp.status_code, 200)\r\n self.assertContains(resp, \"Export My Course Content\")",
"def test_sample(self):\n response = self.tester.get('/sample-household/',\n content_type='html/text')\n self.assertEqual(response.status_code, 200)",
"def test_get_virtual_service(self):\n pass",
"def get_html(self):\r\n raise NotImplementedError(\r\n \"get_html() must be provided by specific modules - not present in {0}\"\r\n .format(self.__class__))",
"def test_get_checklists_html(self):\r\n response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html')\r\n self.assertContains(response, \"Getting Started With Studio\")\r\n # The HTML generated will define the handler URL (for use by the Backbone model).\r\n self.assertContains(response, self.checklists_url)",
"def test_get_monitor_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n response = self.client.get(self.url)\n\n # Standard response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\"td_biblio/entry_list.html\")",
"def retrieve_offers_html(search_string):\n url = generate_request_url(search_string)\n return requests.get(url).text",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()"
] | [
"0.7298055",
"0.6630749",
"0.6586069",
"0.6555722",
"0.6478907",
"0.6477619",
"0.64631605",
"0.64401156",
"0.6406485",
"0.6298801",
"0.6297101",
"0.6242497",
"0.62286186",
"0.62091583",
"0.62068075",
"0.6133613",
"0.6131008",
"0.6127366",
"0.6117756",
"0.6072103",
"0.6046077",
"0.6042307",
"0.60071045",
"0.60042775",
"0.5966871",
"0.59490883",
"0.59431386",
"0.5923094",
"0.5906685",
"0.5890863"
] | 0.9317913 | 0 |
Test case for get_short_version | def test_get_short_version(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_version(self):\n pass",
"def test__get_component_version_short(self):\n self._ucr({'repository/online/component/a/version': '%d.%d' % (MAJOR, MINOR)})\n ver = self.u._get_component_versions('a', None, None)\n self.assertEqual(set((U.UCS_Version((MAJOR, MINOR, 0)),)), ver)",
"def _get_version(self):",
"def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')",
"def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])",
"def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")",
"def get_version():\n return 1",
"def test_get_oapi_version(self):\n pass",
"def test_version(self):\n pass",
"def get_version():\n return '%d.%d.%d' % version_info",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())",
"def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()",
"def version():\n\n pass",
"def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))",
"def Version(self) -> _n_0_t_12:",
"def Version(self) -> _n_0_t_12:",
"def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def test_get_cons3rt_version(self):\n pass",
"def test_parse_version():\n version = parse_version(__version__)\n assert type(version) == Version",
"def test_main_version(\n app_tester: ApplicationTester, valiant_app_title: str, valiant_version: str\n) -> None:\n app_tester.execute(\"--version\")\n expected = f\"{valiant_app_title} version {valiant_version}\\n\"\n assert expected == app_tester.io.fetch_output()",
"def test_low_client(self):\n version, file = self.get('', '3000000001100',\n self.app, self.platform)\n assert version == self.version_1_0_2",
"def __getVersionArg(self, version):\n if version == \"WORKING\":\n return None\n else:\n return str(version)",
"def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def version_min():\n return VERSION_MIN",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)",
"def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)"
] | [
"0.77419084",
"0.7510357",
"0.7218181",
"0.7142744",
"0.70697105",
"0.6987484",
"0.6889665",
"0.6876873",
"0.6810084",
"0.6762898",
"0.6664488",
"0.66282976",
"0.66041094",
"0.6601576",
"0.6573623",
"0.65610874",
"0.65610874",
"0.65563107",
"0.65350926",
"0.65345055",
"0.6517597",
"0.6516588",
"0.6513801",
"0.6493177",
"0.648999",
"0.6485577",
"0.64715225",
"0.6468968",
"0.6446321",
"0.6435998"
] | 0.9518871 | 0 |
Test case for get_software | def test_get_software(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software_set(self):\n pass",
"def test_get_software_bundle(self):\n pass",
"def test_get_systems(self):\n pass",
"def test_get_system(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def get_software(software_name: str) -> str:\n fixed_name = \"-\".join(software_name.lower().split())\n output = _get_content(fixed_name, \"software\")\n\n return output",
"def check_software():\n query = {\n \"type\": \"op\",\n \"cmd\": (\n \"<request><system><software><check></check></software></system></request>\"\n ),\n }\n\n return __proxy__[\"panos.call\"](query)",
"def test_update_software_component_for_system_module(self):\n pass",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_installments_get(self):\n pass",
"def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def query_supported_software(self):\n api_uri = self._uri_dict.get('querySupportedSoftware')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data",
"def test_get_software_set_expanded(self):\n pass",
"def get_software_by_id(self, id_code):\r\n malware_return = self.fs.query([\r\n Filter('type', '=', 'malware'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n tool_return = self.fs.query([\r\n Filter('type', '=', 'tool'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n if malware_return:\r\n return malware_return\r\n elif tool_return:\r\n return tool_return",
"def get_software(self):\n\n logging.info('downloading OCP 4.3 software bits into {}'.format(self.software_dir))\n for url_key in self.ocp_urls.keys():\n url = self.ocp_urls[url_key]\n dest_name = url.split('/')[-1]\n dest_path = self.software_dir + '/' + dest_name\n dest_path_exist = check_path(dest_path, isfile=True)\n url_check = ''\n if dest_path_exist:\n logging.info('file {} already exists in {}'.format(dest_name, self.software_dir))\n self.inventory_dict['csah']['vars'][url_key] = dest_name\n else:\n url_check = validate_url(url)\n if url_check == '':\n logging.error('file {} in {} is not available'.format(dest_name, url_key))\n self.inventory_dict['csah']['vars'][url_key] = ''\n\n if url_check != '' and url_check.code == 200:\n logging.info('downloading {}'.format(dest_name))\n urlretrieve('{}'.format(url),'{}/{}'.format(self.software_dir, dest_name))\n self.inventory_dict['csah']['vars'][url_key] = dest_name",
"def deduce_software(self,\n job_type: Optional[str] = None):\n\n # OneDMin\n if job_type == 'onedmin':\n if 'onedmin' not in supported_ess:\n raise ValueError(f'Could not find the OneDMin software to compute Lennard-Jones parameters.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'onedmin'\n\n # QChem\n if job_type == 'orbitals':\n # currently we only have a script to print orbitals on QChem,\n # could/should be elaborated to additional ESS\n if 'qchem' not in supported_ess:\n raise ValueError(f'Could not find the QChem software to compute molecular orbitals.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'qchem'\n\n # Orca\n if 'dlpno' in self.method:\n if 'orca' not in supported_ess:\n raise ValueError(f'Could not find Orca to run a DLPNO job.\\nlevels_ess is:\\n{levels_ess}')\n self.software = 'orca'\n\n # Gaussian\n if self.method_type == 'composite' or job_type == 'composite' or job_type == 'irc' \\\n or any([sum(['iop' in value.lower() for value in subdict.values()]) for subdict in self.args.values()]):\n if 'gaussian' not in supported_ess:\n raise ValueError(f'Could not find Gaussian to run the {self.method}.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'gaussian'\n\n # TorchANI\n if 'torchani' in self.method:\n self.software = 'torchani'\n\n # xTB\n if 'xtb' in self.method or 'gfn' in self.method:\n self.software = 'xtb'\n\n # User phrases from settings (levels_ess)\n if self.software is None:\n for ess, phrase_list in levels_ess.items():\n for phrase in phrase_list:\n if self.software is None and \\\n (phrase in self.method or self.basis is not None and phrase in self.basis):\n self.software = ess.lower()\n\n if self.software is None:\n preferred_ess_order = ['gaussian', 'qchem', 'orca', 'molpro', 'terachem', 'cfour']\n\n if self.method_type in ['force_field', 'semiempirical']:\n preferred_ess_order = ['gaussian', 'qchem', 'orca', 'molpro', 'terachem']\n elif self.method_type in ['wavefunction']:\n preferred_ess_order = ['molpro', 'gaussian', 'orca', 'cfour', 'qchem']\n elif self.method_type in ['composite']:\n preferred_ess_order = ['gaussian']\n elif self.method_type in ['dft']:\n preferred_ess_order = ['gaussian', 'qchem', 'terachem', 'orca']\n\n self.determine_compatible_ess()\n relevant_software = get_ordered_intersection_of_two_lists(self.compatible_ess, supported_ess)\n self.software = get_ordered_intersection_of_two_lists(preferred_ess_order, relevant_software)[0] \\\n if relevant_software else None",
"def test_update_software_configuration_for_system_module(self):\n pass",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_get_hyperflex_software_version_policy_list(self):\n pass",
"def software_api(self, install_params):\n try:\n self.sw = jnpr.junos.utils.sw.SW(self.dev)\n ok, msg_ret = self.sw.install(**install_params)\n if ok is not True:\n raise AnsibleError('Unable to install the software %s' % msg_ret)\n msg = 'Package %s successfully installed. Response from device is: %s' % (\n install_params.get('package') or\n install_params.get('pkg_set'),\n msg_ret)\n self.queue_message(\"log\", \"%s\" % msg)\n return msg\n except (self.pyez_exception.ConnectError,\n self.pyez_exception.RpcError) as ex:\n raise AnsibleError('Installation failed. Error: %s' % str(ex))",
"def is_software(self):\n return self._is_name_type(self.SOFTWARE)",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def add_software(\n client: Act, matrice: AttckMatrice, output_format: Text = \"json\"\n) -> List:\n\n notify: List = []\n\n # Enterprise matrice has malwares and tools, but preattack has none of them\n for software in getattr(matrice, \"malwares\", []) + getattr(matrice, \"tools\", []):\n if deprecated_or_revoked(software):\n # Object is revoked/deprecated, add to notification list but do not add to facts that should be added to the platform\n notify.append(software)\n continue\n\n tool_name = software.name\n\n # Tool category\n handle_fact(\n client.fact(\"category\", software.type).source(\"tool\", tool_name),\n output_format=output_format,\n )\n\n for alias in software.alias:\n alias_name = alias\n\n if tool_name != alias_name:\n # Tool category (alias)\n handle_fact(\n client.fact(\"category\", software.type).source(\"tool\", alias_name),\n output_format=output_format,\n )\n handle_fact(\n client.fact(\"alias\").bidirectional(\n \"tool\", tool_name, \"tool\", alias_name\n ),\n output_format=output_format,\n )\n\n for technique in software.techniques:\n handle_fact(\n client.fact(\"implements\")\n .source(\"tool\", software.name)\n .destination(\"technique\", technique.id),\n output_format=output_format,\n )\n\n return notify",
"def test_ucs_get_sys(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertIn(\"Fabric Interconnects\", api_data[\"json\"],\n \"Results did not contain 'Fabric Interconnects'\")\n self.assertIn(\"Servers\", api_data[\"json\"], \"Results did not contain 'Servers\")\n self.assertIn(\"FEX\", api_data[\"json\"], \"Results did not contain 'FEX\")\n self.assertIn(\"Chassis\", api_data[\"json\"], \"Results did not contain 'Chassis\")",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_get_node_hardware(self):\n pass",
"def test_get_hyperflex_software_version_policy_by_moid(self):\n pass",
"def test_create_hyperflex_software_version_policy(self):\n pass",
"def find_software(software='samtools'):\n out = subprocess.run(['which {0}'.format(software)]\n , shell=True)\n if out.returncode != 0:\n return False\n\n return True",
"def _system_requirement_tools(self, app: AppConfig):\n if app.target_vendor_base == DEBIAN:\n base_system_packages = [\"python3-dev\", \"build-essential\"]\n system_verify = [\"dpkg\", \"-s\"]\n system_installer = \"apt\"\n elif app.target_vendor_base == RHEL:\n base_system_packages = [\n \"python3-devel\",\n \"gcc\",\n \"make\",\n \"pkgconf-pkg-config\",\n ]\n system_verify = [\"rpm\", \"-q\"]\n system_installer = \"dnf\"\n else:\n base_system_packages = None\n system_verify = None\n system_installer = None\n\n return base_system_packages, system_verify, system_installer"
] | [
"0.85533744",
"0.77285266",
"0.718239",
"0.71624064",
"0.7089769",
"0.69518405",
"0.68123823",
"0.6744312",
"0.67077756",
"0.66728413",
"0.6634467",
"0.6565418",
"0.6533396",
"0.63395005",
"0.6293563",
"0.62728167",
"0.62635535",
"0.6215833",
"0.61770314",
"0.6089715",
"0.60354435",
"0.59695244",
"0.595804",
"0.59177905",
"0.5909814",
"0.5900911",
"0.59001577",
"0.5888575",
"0.5841388",
"0.58393586"
] | 0.9380521 | 0 |
Test case for get_software_asset_bundle_expanded | def test_get_software_asset_bundle_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software_bundles(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_software_bundle(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_get_container_assets_expanded(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_delete_software_asset_bundle(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_get_software_set_expanded(self):\n pass",
"def test_get_deployments_expanded(self):\n pass",
"def test_get_bundle(self):\n res = self.app.get('/bundle/DEFAULT/main')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_update_software_asset_content(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_get_bundle(self):\n bundle = self.webpack.get_bundle('main')\n self.assertEqual(bundle, self.stats['chunks']['main'])",
"def test_update_software_asset(self):\n pass",
"def test_get_second_bundle(self):\n res = self.app.get('/bundle/other/libs')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats2['chunks']['libs'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_get_test_asset(self):\n pass",
"def test_get_systems_expanded(self):\n pass",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"",
"def test_resource_collection_get_bundles(self):\n bundle = {\n 'resourceType': 'Bundle',\n 'entry': [\n {\n 'resource': {\n 'resourceType': 'ValueSet',\n 'id': 'example-extensional',\n 'url': 'http://value-in-a-bundle',\n 'status': 'draft',\n }\n }\n ],\n }\n\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n collection.put(bundle['entry'][0]['resource'], bundle)\n resource = collection.get('http://value-in-a-bundle')\n\n self.assertIsNotNone(resource)\n self.assertTrue(proto_utils.is_message_type(resource, self._valueset_cls))\n self.assertEqual(resource.id.value, 'example-extensional')\n self.assertEqual(resource.url.value, 'http://value-in-a-bundle')",
"def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content",
"def get_product_bundles():\n downloaded_bundles = []\n\n for url in get_product_bundle_urls():\n if url['downloaded']:\n # The product is separated by a #\n product = url['url'].split('#')\n downloaded_bundles.append(product[1])\n\n repos = get_repositories()\n\n # Some repo names do not match product-bundle names due to underscores.\n # Normalize them both.\n repo_names = set([repo['name'].replace('-', '_') for repo in repos])\n\n def bundle_is_active(name):\n # Returns True if the product-bundle named `name` is present in a package\n # repository (assuming it is downloaded already); otherwise, removes the\n # product-bundle and returns False.\n if name.replace('-', '_') in repo_names:\n return True\n\n remove_product_bundle(name)\n return False\n\n return list(filter(bundle_is_active, downloaded_bundles))",
"def test_list_dependent_assets2(self):\n pass",
"def test_bundle_is_product_pack(self):\n template = self.product_apple_bundle\n product_pack_ids = template.product_pack_ids\n self.assertTrue(template.is_pack, 'Product template is a bundle pack')\n self.assertTrue(len(product_pack_ids) != 0, 'Product: a product bundle should have product pack')\n self.assertEqual(len(product_pack_ids), 3, 'Product: a product bundle should have product pack')",
"def test_flatten_inventory(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def verifyCustomBundlePath(self, catalogName, bundleName):\n try:\n bundlePath = globalVars.switchBundleRepository[bundleName]\n utility.execLog(\"Click edit on Bundle\")\n status, result = self.clickEditCustomBundle(catalogName, bundleName)\n if not status:\n utility.execLog(result)\n return self.browserObject, False, result\n filePath = bundlePath.split(\"\\\\\")\n filename = filePath[-1]\n xpath = self.RepositoriesObjects('current_file_name')\n time.sleep(1)\n existingFileName = self.handleEvent(EC.presence_of_element_located((By.XPATH, xpath)), action=\"GET_TEXT\")\n\n if filename in existingFileName:\n utility.execLog(\"Closing Bundle Form\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('btn_close_edit_bundle'))),\n action=\"CLICK\")\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n time.sleep(1)\n utility.execLog(\"Bundle Name %s displayed as existing file name\" % bundleName)\n return self.browserObject, True, \"Bundle Name %s displayed as existing file name\" % bundleName\n else:\n utility.execLog(\"Closing Bundle Form\")\n self.handleEvent(\n EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('btn_close_edit_bundle'))),\n action=\"CLICK\")\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n time.sleep(1)\n utility.execLog(\"Bundle Name %s not displayed as existing file name fileName : %s Existing FileName:%s\" % (\n bundleName, filename, existingFileName))\n return self.browserObject, False, \"Bundle Name %s not displayed as existing file name fileName : %s Existing FileName:%s\" % (\n bundleName, filename, existingFileName)\n except Exception as e:\n return self.browserObject, False, \"Error :: %s\" % e",
"def test_list_system_assets(self):\n pass"
] | [
"0.7447893",
"0.7385767",
"0.725668",
"0.7151176",
"0.69947815",
"0.69888884",
"0.64153165",
"0.63940763",
"0.6304117",
"0.62138736",
"0.61021364",
"0.6032962",
"0.6028531",
"0.60236627",
"0.5825482",
"0.57914394",
"0.57085747",
"0.56888574",
"0.5656609",
"0.5573399",
"0.55728513",
"0.5557735",
"0.54897577",
"0.5465502",
"0.54587865",
"0.5430313",
"0.5394221",
"0.5385501",
"0.5379171",
"0.53649724"
] | 0.95281523 | 0 |
Test case for get_software_bundle | def test_get_software_bundle(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software_bundles(self):\n pass",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_get_software(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_get_bundle(self):\n res = self.app.get('/bundle/DEFAULT/main')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_get_software_set(self):\n pass",
"def test_get_bundle(self):\n bundle = self.webpack.get_bundle('main')\n self.assertEqual(bundle, self.stats['chunks']['main'])",
"def test_get_second_bundle(self):\n res = self.app.get('/bundle/other/libs')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats2['chunks']['libs'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_get_with_bundle(self):\n user = self.make_user()\n school_year = SchoolYearFactory(school__admin=user)\n bundle = BundleFactory(school_year=school_year)\n\n with self.login(user):\n self.get_check_200(\"reports:bundle\", school_year.pk)\n\n assert self.get_context(\"bundle\") == bundle",
"def test_delete_software_asset_bundle(self):\n pass",
"def getbundlespec(ui, fh):\n\n def speccompression(alg):\n try:\n return util.compengines.forbundletype(alg).bundletype()[0]\n except KeyError:\n return None\n\n b = readbundle(ui, fh, None)\n if isinstance(b, changegroup.cg1unpacker):\n alg = b._type\n if alg == b'_truncatedBZ':\n alg = b'BZ'\n comp = speccompression(alg)\n if not comp:\n raise error.Abort(_(b'unknown compression algorithm: %s') % alg)\n return b'%s-v1' % comp\n elif isinstance(b, bundle2.unbundle20):\n if b'Compression' in b.params:\n comp = speccompression(b.params[b'Compression'])\n if not comp:\n raise error.Abort(\n _(b'unknown compression algorithm: %s') % comp\n )\n else:\n comp = b'none'\n\n version = None\n for part in b.iterparts():\n if part.type == b'changegroup':\n version = part.params[b'version']\n if version in (b'01', b'02'):\n version = b'v2'\n else:\n raise error.Abort(\n _(\n b'changegroup version %s does not have '\n b'a known bundlespec'\n )\n % version,\n hint=_(b'try upgrading your Mercurial client'),\n )\n elif part.type == b'stream2' and version is None:\n # A stream2 part requires to be part of a v2 bundle\n requirements = urlreq.unquote(part.params[b'requirements'])\n splitted = requirements.split()\n params = bundle2._formatrequirementsparams(splitted)\n return b'none-v2;stream=v2;%s' % params\n\n if not version:\n raise error.Abort(\n _(b'could not identify changegroup version in bundle')\n )\n\n return b'%s-%s' % (comp, version)\n elif isinstance(b, streamclone.streamcloneapplier):\n requirements = streamclone.readbundle1header(fh)[2]\n formatted = bundle2._formatrequirementsparams(requirements)\n return b'none-packed1;%s' % formatted\n else:\n raise error.Abort(_(b'unknown bundle type: %s') % b)",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_check_bundle_2(self):\n self.bndl.ticket = None\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 1)\n with self.subTest():\n self.assertEqual(count, 1)",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_bundle_is_product_pack(self):\n template = self.product_apple_bundle\n product_pack_ids = template.product_pack_ids\n self.assertTrue(template.is_pack, 'Product template is a bundle pack')\n self.assertTrue(len(product_pack_ids) != 0, 'Product: a product bundle should have product pack')\n self.assertEqual(len(product_pack_ids), 3, 'Product: a product bundle should have product pack')",
"def test_update_software_component_for_system_module(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_check_bundle_1(self):\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 6)\n with self.subTest():\n self.assertEqual(count, 0)",
"def test_installments_get(self):\n pass",
"def test_get_system(self):\n pass",
"def test_check_bundle_4(self):\n self.tkt.data_add = set()\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 5)\n with self.subTest():\n self.assertEqual(count, 0)",
"def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)",
"def readBundleInformation(self, repositoryName):\n\n def get_bundle_data(buns):\n \"\"\"\n Finds 'Name', 'Version', 'Date and Time' data for all bundles\n :param buns: list of bundle elements\n :return: list of data from tables\n \"\"\"\n b_data = []\n for bundle in buns:\n tds = bundle.find_elements_by_xpath(\"./td\")\n td_text = []\n for index, td in enumerate(tds):\n if index == 3:\n break\n td_text.append(td.text)\n b_data.append(td_text)\n return b_data\n\n def zipped_data(b_data):\n \"\"\"\n Matches every data element with 'Name', 'Version', 'Date and Time' as key, and value\n :param b_data: list of bundles data\n :return: list of dictionaries\n \"\"\"\n col_names = [\"Name\", \"Version\", \"Date and Time\"]\n bundle_data = []\n for bundle in b_data:\n zipped = zip(col_names, bundle)\n bundle_data.append(dict(zipped))\n return bundle_data\n\n def get_pages():\n \"\"\"\n Finds how many pages exists\n :return: None if nothing found or number of pages as integer\n \"\"\"\n try:\n self.handleEvent(EC.text_to_be_present_in_element((By.ID, self.RepositoriesObjects('pager')), \"Page\"))\n footer = self.handleEvent(EC.presence_of_element_located((By.ID, self.RepositoriesObjects('pager'))),\n action=\"GET_TEXT\")\n # filters text to get last number\n pages_num = int(re.search('\\d+$', footer).group())\n utility.execLog(\"Total pages is: {}\".format(pages_num))\n return pages_num\n except NoSuchElementException as e:\n pages_num = None\n utility.execLog(\"Pagination element not found, error message: {}\".format(e))\n return pages_num\n\n try:\n utility.execLog(\"Selecting Firmware Repository '%s'\" % repositoryName)\n self.browserObject, status, result = self.selectRepo(repositoryName)\n if status is False:\n return self.browserObject, False, result\n utility.execLog(\"Clicking on View Bundles for Repository '%s'\" % repositoryName)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('view_bundles'))),\n action=\"CLICK\")\n # wait for page to load\n bundle_page_text = self.handleEvent(\n EC.visibility_of_element_located((By.XPATH, self.RepositoriesObjects('fw_bundle_page_title'))),\n action=\"GET_TEXT\")\n if repositoryName != bundle_page_text:\n return self.browserObject, False, \"Failed to verify Repository Name '%s' in View Bundles Page\" % repositoryName\n utility.execLog(\"Reading Bundle Information\")\n # finds bundles on first page\n total_bundles = []\n bundles_on_a_page = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects(\"bundles\"))))\n total_bundles += bundles_on_a_page\n # finds data on first page\n bundles_data = []\n bundles_data += get_bundle_data(bundles_on_a_page)\n # checks if there is more than 1 page\n pages = get_pages()\n if pages is not None:\n page = 1\n while page < pages:\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('next_button'))),\n action=\"CLICK\")\n bundles = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects(\"bundles\"))))\n total_bundles += bundles\n bundles_data += get_bundle_data(bundles)\n page += 1\n utility.execLog(\"Total bundles number is: {}\".format(len(total_bundles)))\n # matches every data element with 'Name', 'Version', 'Date and Time' as key, and value\n data = zipped_data(bundles_data)\n # closes bundles page\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n return self.browserObject, True, data\n except Exception as e:\n return self.browserObject, False, \"Unable to read Bundles :: Error -> %s\" % str(e)",
"def test_bad_bundle(self):\n with self.assertRaises(KeyError):\n self.webpack.get_bundle('nope')",
"def test_get_deployment(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_bundle_purchase_method(self):\n template = self.product_apple_bundle\n self.assertEqual(template.purchase_method, 'purchase', 'Product: the Control Policy is On ordered quantities')",
"def get_software(software_name: str) -> str:\n fixed_name = \"-\".join(software_name.lower().split())\n output = _get_content(fixed_name, \"software\")\n\n return output"
] | [
"0.881064",
"0.82600576",
"0.7647087",
"0.737937",
"0.7223288",
"0.7032658",
"0.6664362",
"0.6556166",
"0.6320617",
"0.6305571",
"0.625084",
"0.6236446",
"0.61766046",
"0.6166661",
"0.601414",
"0.58963954",
"0.5877787",
"0.58749986",
"0.58441705",
"0.5813502",
"0.5752785",
"0.57524675",
"0.57301396",
"0.57248247",
"0.57230616",
"0.57134855",
"0.5708515",
"0.5708515",
"0.56924653",
"0.5680499"
] | 0.9529322 | 0 |
Test case for get_software_bundles | def test_get_software_bundles(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software_bundle(self):\n pass",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_get_software(self):\n pass",
"def test_get_bundle(self):\n res = self.app.get('/bundle/DEFAULT/main')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats1['chunks']['main'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_get_software_set(self):\n pass",
"def test_get_bundle(self):\n bundle = self.webpack.get_bundle('main')\n self.assertEqual(bundle, self.stats['chunks']['main'])",
"def list_bundles():\n response = houston.get(\"/zipline/bundles\")\n\n houston.raise_for_status_with_json(response)\n return response.json()",
"def get_product_bundles():\n downloaded_bundles = []\n\n for url in get_product_bundle_urls():\n if url['downloaded']:\n # The product is separated by a #\n product = url['url'].split('#')\n downloaded_bundles.append(product[1])\n\n repos = get_repositories()\n\n # Some repo names do not match product-bundle names due to underscores.\n # Normalize them both.\n repo_names = set([repo['name'].replace('-', '_') for repo in repos])\n\n def bundle_is_active(name):\n # Returns True if the product-bundle named `name` is present in a package\n # repository (assuming it is downloaded already); otherwise, removes the\n # product-bundle and returns False.\n if name.replace('-', '_') in repo_names:\n return True\n\n remove_product_bundle(name)\n return False\n\n return list(filter(bundle_is_active, downloaded_bundles))",
"def test_get_second_bundle(self):\n res = self.app.get('/bundle/other/libs')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats2['chunks']['libs'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_delete_software_asset_bundle(self):\n pass",
"def get_bundles(uuids=None, text_search=None):\n query_params = {}\n if uuids:\n query_params['uuid'] = ','.join(map(str, uuids))\n if text_search:\n query_params['text_search'] = text_search\n version_url = api_url('bundles') + '?' + urlencode(query_params)\n response = api_request('get', version_url)\n # build bundle from response, convert map object to list and return\n return [_bundle_from_response(item) for item in response]",
"def test_resource_collection_get_bundles(self):\n bundle = {\n 'resourceType': 'Bundle',\n 'entry': [\n {\n 'resource': {\n 'resourceType': 'ValueSet',\n 'id': 'example-extensional',\n 'url': 'http://value-in-a-bundle',\n 'status': 'draft',\n }\n }\n ],\n }\n\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n collection.put(bundle['entry'][0]['resource'], bundle)\n resource = collection.get('http://value-in-a-bundle')\n\n self.assertIsNotNone(resource)\n self.assertTrue(proto_utils.is_message_type(resource, self._valueset_cls))\n self.assertEqual(resource.id.value, 'example-extensional')\n self.assertEqual(resource.url.value, 'http://value-in-a-bundle')",
"def readBundleInformation(self, repositoryName):\n\n def get_bundle_data(buns):\n \"\"\"\n Finds 'Name', 'Version', 'Date and Time' data for all bundles\n :param buns: list of bundle elements\n :return: list of data from tables\n \"\"\"\n b_data = []\n for bundle in buns:\n tds = bundle.find_elements_by_xpath(\"./td\")\n td_text = []\n for index, td in enumerate(tds):\n if index == 3:\n break\n td_text.append(td.text)\n b_data.append(td_text)\n return b_data\n\n def zipped_data(b_data):\n \"\"\"\n Matches every data element with 'Name', 'Version', 'Date and Time' as key, and value\n :param b_data: list of bundles data\n :return: list of dictionaries\n \"\"\"\n col_names = [\"Name\", \"Version\", \"Date and Time\"]\n bundle_data = []\n for bundle in b_data:\n zipped = zip(col_names, bundle)\n bundle_data.append(dict(zipped))\n return bundle_data\n\n def get_pages():\n \"\"\"\n Finds how many pages exists\n :return: None if nothing found or number of pages as integer\n \"\"\"\n try:\n self.handleEvent(EC.text_to_be_present_in_element((By.ID, self.RepositoriesObjects('pager')), \"Page\"))\n footer = self.handleEvent(EC.presence_of_element_located((By.ID, self.RepositoriesObjects('pager'))),\n action=\"GET_TEXT\")\n # filters text to get last number\n pages_num = int(re.search('\\d+$', footer).group())\n utility.execLog(\"Total pages is: {}\".format(pages_num))\n return pages_num\n except NoSuchElementException as e:\n pages_num = None\n utility.execLog(\"Pagination element not found, error message: {}\".format(e))\n return pages_num\n\n try:\n utility.execLog(\"Selecting Firmware Repository '%s'\" % repositoryName)\n self.browserObject, status, result = self.selectRepo(repositoryName)\n if status is False:\n return self.browserObject, False, result\n utility.execLog(\"Clicking on View Bundles for Repository '%s'\" % repositoryName)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('view_bundles'))),\n action=\"CLICK\")\n # wait for page to load\n bundle_page_text = self.handleEvent(\n EC.visibility_of_element_located((By.XPATH, self.RepositoriesObjects('fw_bundle_page_title'))),\n action=\"GET_TEXT\")\n if repositoryName != bundle_page_text:\n return self.browserObject, False, \"Failed to verify Repository Name '%s' in View Bundles Page\" % repositoryName\n utility.execLog(\"Reading Bundle Information\")\n # finds bundles on first page\n total_bundles = []\n bundles_on_a_page = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects(\"bundles\"))))\n total_bundles += bundles_on_a_page\n # finds data on first page\n bundles_data = []\n bundles_data += get_bundle_data(bundles_on_a_page)\n # checks if there is more than 1 page\n pages = get_pages()\n if pages is not None:\n page = 1\n while page < pages:\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('next_button'))),\n action=\"CLICK\")\n bundles = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects(\"bundles\"))))\n total_bundles += bundles\n bundles_data += get_bundle_data(bundles)\n page += 1\n utility.execLog(\"Total bundles number is: {}\".format(len(total_bundles)))\n # matches every data element with 'Name', 'Version', 'Date and Time' as key, and value\n data = zipped_data(bundles_data)\n # closes bundles page\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n return self.browserObject, True, data\n except Exception as e:\n return self.browserObject, False, \"Unable to read Bundles :: Error -> %s\" % str(e)",
"def test_check_bundle_4(self):\n self.tkt.data_add = set()\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 5)\n with self.subTest():\n self.assertEqual(count, 0)",
"def test_list_system_assets(self):\n pass",
"def test_check_bundle_6(self):\n self.tkt.data_retrieve = set()\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 5)\n with self.subTest():\n self.assertEqual(count, 0)",
"def test_check_bundle_2(self):\n self.bndl.ticket = None\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 1)\n with self.subTest():\n self.assertEqual(count, 1)",
"def test_check_bundle_1(self):\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 6)\n with self.subTest():\n self.assertEqual(count, 0)",
"def bundle_cmd(context, bundle_name, bundle_id, json, verbose, compact):\n store: Store = context.obj[\"store\"]\n bundles = store.bundles()\n\n if bundle_name:\n bundle = store.get_bundle_by_name(bundle_name=bundle_name)\n bundles = [bundle] if bundle else []\n\n if bundle_id:\n bundle = store.get_bundle_by_id(bundle_id=bundle_id)\n bundles = [bundle] if bundle else []\n\n if not bundles:\n LOG.info(\"Could not find any bundles\")\n return\n template = schema.BundleSchema()\n result = []\n for bundle in bundles:\n result.append(template.dump(bundle))\n\n if json:\n click.echo(jsonlib.dumps(result, indent=4, sort_keys=True))\n return\n console = Console()\n console.print(get_bundles_table(result))\n if verbose:\n for bundle in bundles:\n if len(bundle.versions) == 0:\n LOG.info(\"No versions found for bundle %s\", bundle.name)\n return\n version_obj = bundle.versions[0]\n context.invoke(\n version_cmd, version_id=version_obj.id, verbose=True, compact=compact\n )",
"def test_get_deployment_resources(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_installments_get(self):\n pass",
"def test_bad_bundle(self):\n with self.assertRaises(KeyError):\n self.webpack.get_bundle('nope')",
"def test_get_software_set_expanded(self):\n pass"
] | [
"0.89749163",
"0.77525824",
"0.75565785",
"0.7454889",
"0.697139",
"0.6818501",
"0.6655084",
"0.6422234",
"0.64158905",
"0.64141136",
"0.6386338",
"0.62803507",
"0.6258426",
"0.61854166",
"0.60389286",
"0.59048456",
"0.59008664",
"0.5899845",
"0.5880346",
"0.5874225",
"0.5829507",
"0.57873666",
"0.57659143",
"0.57470584",
"0.5743793",
"0.5743793",
"0.5730083",
"0.5729131",
"0.5704048",
"0.57001764"
] | 0.9516825 | 0 |
Test case for get_software_set | def test_get_software_set(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software(self):\n pass",
"def test_get_software_set_expanded(self):\n pass",
"def test_get_software_bundle(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def test_installments_get(self):\n pass",
"def test_get_systems(self):\n pass",
"def test_get_system(self):\n pass",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_update_software_component_for_system_module(self):\n pass",
"def query_supported_software(self):\n api_uri = self._uri_dict.get('querySupportedSoftware')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"def test_update_software_configuration_for_system_module(self):\n pass",
"def test_austriansettlements_get(self):\n pass",
"def get_software(software_name: str) -> str:\n fixed_name = \"-\".join(software_name.lower().split())\n output = _get_content(fixed_name, \"software\")\n\n return output",
"def test_get_hyperflex_software_version_policy_list(self):\n pass",
"def test_value_set_with_added_packages_retrieves_resource(self):\n r1 = self._valueset_cls()\n r1.url.value = 'r1'\n\n r2 = self._valueset_cls()\n r2.url.value = 'r2'\n\n package = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r1, r2]),\n )\n self.assertEqual(package.get_value_set('r1'), r1)\n self.assertEqual(package.get_value_set('r2'), r2)\n self.assertIsNone(package.get_value_set('mystery-url'))",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_installments_id_get(self):\n pass",
"def check_software():\n query = {\n \"type\": \"op\",\n \"cmd\": (\n \"<request><system><software><check></check></software></system></request>\"\n ),\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_software_by_id(self, id_code):\r\n malware_return = self.fs.query([\r\n Filter('type', '=', 'malware'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n tool_return = self.fs.query([\r\n Filter('type', '=', 'tool'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n if malware_return:\r\n return malware_return\r\n elif tool_return:\r\n return tool_return",
"def test_value_set_with_added_packages_retrieves_resource(self):\n r1 = self._valueset_cls()\n r1.url.value = 'r1'\n\n r2 = self._valueset_cls()\n r2.url.value = 'r2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r1]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r2]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_value_set('r1'), r1)\n self.assertEqual(manager.get_value_set('r2'), r2)\n self.assertIsNone(manager.get_value_set('mystery-url'))",
"def test_get_resource_with_added_packages_retrieves_resource(self):\n vs_1 = self._valueset_cls()\n vs_1.url.value = 'vs1'\n\n vs_2 = self._valueset_cls()\n vs_2.url.value = 'vs2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_1]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_2]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_resource('vs1'), vs_1)\n self.assertEqual(manager.get_resource('vs2'), vs_2)\n self.assertIsNone(manager.get_resource('mystery-url'))",
"def test_get_component_defaultpackage_UNKNOWN(self):\n self.assertEqual(set(), self.u.get_component_defaultpackage('a'))",
"def test_get_code_system_with_added_packages_retrieves_resource(self):\n r1 = self._code_system_cls()\n r1.url.value = 'r1'\n\n r2 = self._code_system_cls()\n r2.url.value = 'r2'\n\n package = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([r1, r2]),\n value_sets=mock_resource_collection_containing([]),\n )\n\n self.assertEqual(package.get_code_system('r1'), r1)\n self.assertEqual(package.get_code_system('r2'), r2)\n self.assertIsNone(package.get_code_system('mystery-url'))",
"def is_software(self):\n return self._is_name_type(self.SOFTWARE)",
"def test_create_hyperflex_software_version_policy(self):\n pass",
"def get_used_release_specs(package, installed_version=None):",
"def test_get_systems_expanded(self):\n pass",
"def get_supported_sets(self):\n return _SUPPORTED_SETS",
"def test_get_code_system_with_added_packages_retrieves_resource(self):\n r1 = self._code_system_cls()\n r1.url.value = 'r1'\n\n r2 = self._code_system_cls()\n r2.url.value = 'r2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([r1]),\n value_sets=mock_resource_collection_containing([]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([r2]),\n value_sets=mock_resource_collection_containing([]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_code_system('r1'), r1)\n self.assertEqual(manager.get_code_system('r2'), r2)\n self.assertIsNone(manager.get_code_system('mystery-url'))"
] | [
"0.8271188",
"0.8154749",
"0.7063571",
"0.67102444",
"0.651499",
"0.6467598",
"0.6143794",
"0.6120906",
"0.6106835",
"0.59304625",
"0.5912424",
"0.5906342",
"0.58011746",
"0.5767331",
"0.57666785",
"0.5702493",
"0.56737804",
"0.567184",
"0.56578803",
"0.56295466",
"0.558011",
"0.5567019",
"0.55371743",
"0.55165064",
"0.54679096",
"0.54656315",
"0.54416984",
"0.54172695",
"0.5416477",
"0.54111606"
] | 0.95569277 | 0 |
Test case for get_software_set_expanded | def test_get_software_set_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_software_set(self):\n pass",
"def test_get_systems_expanded(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_get_software(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def test_get_projects_expanded(self):\n pass",
"def test_get_software_bundle(self):\n pass",
"def test_get_deployments_expanded(self):\n pass",
"def test_austriansettlements_get(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_scenarios_expanded(self):\n pass",
"def test_installments_get(self):\n pass",
"def test_general_subset_dset():\n pass",
"def test_value_set_with_added_packages_retrieves_resource(self):\n r1 = self._valueset_cls()\n r1.url.value = 'r1'\n\n r2 = self._valueset_cls()\n r2.url.value = 'r2'\n\n package = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r1, r2]),\n )\n self.assertEqual(package.get_value_set('r1'), r1)\n self.assertEqual(package.get_value_set('r2'), r2)\n self.assertIsNone(package.get_value_set('mystery-url'))",
"def _expand_synset(self, synset: str, cq: str) -> List[str]:\n expanded_variants = set()\n if re.search(synset, cq) is None:\n # given synset does not occur in a CQ\n return [cq] # nothing to expand\n else:\n for synonym in self.synonymes[synset]:\n expanded_variants.add(re.sub(re.escape(synset), synonym, cq))\n return expanded_variants",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"def test_value_set_with_added_packages_retrieves_resource(self):\n r1 = self._valueset_cls()\n r1.url.value = 'r1'\n\n r2 = self._valueset_cls()\n r2.url.value = 'r2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r1]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r2]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_value_set('r1'), r1)\n self.assertEqual(manager.get_value_set('r2'), r2)\n self.assertIsNone(manager.get_value_set('mystery-url'))",
"def getSets():",
"def test_install_set_existing(self):\n expected = copy.deepcopy(test_xdata)\n expected.find(\"Text\").text = \"Changed content\"\n self._install([lxml.etree.Element(\"Set\", path=\"Test/Text/#text\",\n value=\"Changed content\")],\n expected)",
"def filter_working_set_hard(working_set, requirements):\n\n retval = pkg_resources.WorkingSet([])\n\n for req in requirements:\n dists = working_set.require(req)\n for dist in dists: retval.add(dist)\n\n return retval",
"def _get_set(cost):\n if any(i in cost for i in [\"_cap\", \"depreciation_rate\", \"purchase\", \"area\"]):\n return \"loc_techs_investment_cost\"\n elif any(i in cost for i in [\"om_\", \"export\"]):\n return \"loc_techs_om_cost\"\n else:\n return \"loc_techs\"",
"def test_general_subset_level():\n pass",
"def test_get_resource_with_added_packages_retrieves_resource(self):\n vs_1 = self._valueset_cls()\n vs_1.url.value = 'vs1'\n\n vs_2 = self._valueset_cls()\n vs_2.url.value = 'vs2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_1]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_2]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_resource('vs1'), vs_1)\n self.assertEqual(manager.get_resource('vs2'), vs_2)\n self.assertIsNone(manager.get_resource('mystery-url'))",
"def get_supported_sets(self):\n return _SUPPORTED_SETS",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)"
] | [
"0.7933726",
"0.71207213",
"0.65598756",
"0.6293329",
"0.59855705",
"0.58867335",
"0.58770794",
"0.58581364",
"0.5587519",
"0.5542234",
"0.55089754",
"0.54562783",
"0.54389405",
"0.5347332",
"0.5294412",
"0.52456945",
"0.5223074",
"0.52030855",
"0.5175098",
"0.5163759",
"0.51613295",
"0.5138043",
"0.5136096",
"0.51171446",
"0.5068454",
"0.5068454",
"0.5068454",
"0.5068454",
"0.5068454",
"0.5068454"
] | 0.9583301 | 0 |
Test case for get_system | def test_get_system(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_systems(self):\n pass",
"def test_System_creation(self):\n s1 = System()\n self.assertEqual(s1.get_library_name(), \"default\")",
"def test_is_system(self):\n\n url = '/%s/job-type-names/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n url = '/%s/job-type-names/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)",
"def test_get_software(self):\n pass",
"def test_is_system(self):\n\n url = '/%s/job-types/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 5)\n\n url = '/%s/job-types/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)",
"def system(self):\n return self['system']",
"def test_create_system_entire(self):\n pass",
"def is_system(self) -> bool:",
"def test_os_system(self):\n self.assertEqual(self.settings.OS_SYSTEM, platform.system())",
"def test_ucs_get_sys(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertIn(\"Fabric Interconnects\", api_data[\"json\"],\n \"Results did not contain 'Fabric Interconnects'\")\n self.assertIn(\"Servers\", api_data[\"json\"], \"Results did not contain 'Servers\")\n self.assertIn(\"FEX\", api_data[\"json\"], \"Results did not contain 'FEX\")\n self.assertIn(\"Chassis\", api_data[\"json\"], \"Results did not contain 'Chassis\")",
"def get_system_info(self):\r\n method = self.public_endpoints['system_info']['method']\r\n url = self.base_url + self.public_endpoints['system_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def systemRead():\n return",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def test_get_systems_expanded(self):\n pass",
"async def get_system(self) -> dict[str, Any]:\n cmd = await self.send_command(\"SYSTEM\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n system = {}\n for (key, value) in map(lambda k: k.split(\"=\"), keywords):\n system[key.lower()] = value\n if match := re.match(r\"^MOD([0-9]{1,2})_TYPE\", key, re.IGNORECASE):\n name_key = f\"mod{match.groups()[0]}_name\"\n system[name_key] = ModType(int(value)).name\n\n return system",
"def get_unit_system(system: SystemLike) -> unyt.UnitSystem:\n if isinstance(system, unyt.UnitSystem):\n return system\n\n try:\n return unyt.unit_systems.unit_system_registry[str(system)]\n except KeyError as exc:\n raise UnitSystemNotFoundError(system) from exc",
"def getSystemByName(self,systemName):\n\n logger.debug(\"Call to getSystemByName - systemName: {}\".format(systemName))\n try:\n\n response = self.httpHandler.sendHttpRequest(\n CIC_SYSTEM_ENDPOINT+\"?\"+\n urllib.urlencode({ \"name\": systemName }))\n\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n flag = _checkSystemNotFound(body)\n if flag == True:\n raise KeyError(\n \"System with name '{}' was not found in TMS because it does not exist, {}\".format(systemName, body),\n \"CIC_SYSTEM_NOT_FOUND_ERR\")\n else:\n raise IOError(\n \"System with name '{}' was not found in TMS because of network/communication error, {}\".format(systemName, body),\n \"CIC_SYSTEM_COMMUNICATION_NETWORK_ERR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up the specified system {} in {} {}\".format(self.cicUser,systemName, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)",
"def subcmd_getsystem_main(args, parameter_info):\n \n from get_system_inventory import get_system_inventory\n result = get_system_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])",
"def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)",
"def is_system(self):\n\t\treturn self.__is_system",
"def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)",
"def get_unit_system(key: str) -> UnitSystem:\n if key == _CONF_UNIT_SYSTEM_US_CUSTOMARY:\n return US_CUSTOMARY_SYSTEM\n if key == _CONF_UNIT_SYSTEM_METRIC:\n return METRIC_SYSTEM\n raise ValueError(f\"`{key}` is not a valid unit system key\")",
"def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values",
"def is_system(self) -> undefined.UndefinedOr[bool]:",
"async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")",
"def test_subsystems(self):\n pass",
"def _get_host_details(self):\n # Assuming only one system present as part of collection,\n # as we are dealing with iLO's here.\n status, headers, system = self._rest_get('/rest/v1/Systems/1')\n if status < 300:\n stype = self._get_type(system)\n if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:\n msg = \"%s is not a valid system type \" % stype\n raise exception.IloError(msg)\n else:\n msg = self._get_extended_error(system)\n raise exception.IloError(msg)\n\n return system",
"def get_system():\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n\n raise EnvironmentError('Unsupported environment')",
"def system():\n return uname().system",
"def system():\n return uname().system"
] | [
"0.84478086",
"0.7677089",
"0.7202515",
"0.7133954",
"0.7107307",
"0.7023276",
"0.6986549",
"0.69782543",
"0.6938579",
"0.6897294",
"0.68698364",
"0.68685937",
"0.67827594",
"0.6750312",
"0.67279375",
"0.67076576",
"0.6603654",
"0.6600742",
"0.6589855",
"0.65866274",
"0.6537427",
"0.65315926",
"0.65261304",
"0.6487939",
"0.6451735",
"0.6445022",
"0.6437837",
"0.64249325",
"0.63963944",
"0.63963944"
] | 0.9410532 | 0 |
Test case for get_systems | def test_get_systems(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_system(self):\n pass",
"def test_get_systems_expanded(self):\n pass",
"def test_ucs_get_sys(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertIn(\"Fabric Interconnects\", api_data[\"json\"],\n \"Results did not contain 'Fabric Interconnects'\")\n self.assertIn(\"Servers\", api_data[\"json\"], \"Results did not contain 'Servers\")\n self.assertIn(\"FEX\", api_data[\"json\"], \"Results did not contain 'FEX\")\n self.assertIn(\"Chassis\", api_data[\"json\"], \"Results did not contain 'Chassis\")",
"def test_subsystems(self):\n pass",
"def getSystemsList(self):\r\n\r\n self._logger.debug(\"in API getSystemsList()...\")\r\n\r\n # format url parameters\r\n params = {\r\n \"api_key\": _API_APP_KEY,\r\n \"authentication_token\": self._authToken,\r\n \"user_id\": self._userID\r\n } \r\n\r\n # call the systems API\r\n response = self._call_api(_API_SYSTEMS, params=params)\r\n \r\n # if data was returned, return the systems list\r\n if response is not None and response.status_code == 200:\r\n\r\n return response.json()\r\n\r\n # otherwise return error (False)\r\n else:\r\n return False",
"def test_get_software(self):\n pass",
"def test_is_system(self):\n\n url = '/%s/job-type-names/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n url = '/%s/job-type-names/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)",
"def test_is_system(self):\n\n url = '/%s/job-types/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 5)\n\n url = '/%s/job-types/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)",
"def list_systems():\n return sorted(systems.keys())",
"def test_get_catalogue(self):\n s1 = System()\n self.assertEqual(len(s1.get_catalogue()), 0)",
"def test_get_software_set(self):\n pass",
"def test_System_creation(self):\n s1 = System()\n self.assertEqual(s1.get_library_name(), \"default\")",
"def test_create_system_entire(self):\n pass",
"def getSystemByName(self,systemName):\n\n logger.debug(\"Call to getSystemByName - systemName: {}\".format(systemName))\n try:\n\n response = self.httpHandler.sendHttpRequest(\n CIC_SYSTEM_ENDPOINT+\"?\"+\n urllib.urlencode({ \"name\": systemName }))\n\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n flag = _checkSystemNotFound(body)\n if flag == True:\n raise KeyError(\n \"System with name '{}' was not found in TMS because it does not exist, {}\".format(systemName, body),\n \"CIC_SYSTEM_NOT_FOUND_ERR\")\n else:\n raise IOError(\n \"System with name '{}' was not found in TMS because of network/communication error, {}\".format(systemName, body),\n \"CIC_SYSTEM_COMMUNICATION_NETWORK_ERR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up the specified system {} in {} {}\".format(self.cicUser,systemName, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)",
"def unit_system(self):\n val = self._stub.List(self._message).unit_system\n return map_unit_system[val]",
"def subcmd_getsystem_main(args, parameter_info):\n \n from get_system_inventory import get_system_inventory\n result = get_system_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])",
"def test_update_software_components_for_system_module(self):\n pass",
"async def get_system(self) -> dict[str, Any]:\n cmd = await self.send_command(\"SYSTEM\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n system = {}\n for (key, value) in map(lambda k: k.split(\"=\"), keywords):\n system[key.lower()] = value\n if match := re.match(r\"^MOD([0-9]{1,2})_TYPE\", key, re.IGNORECASE):\n name_key = f\"mod{match.groups()[0]}_name\"\n system[name_key] = ModType(int(value)).name\n\n return system",
"def describe_operating_systems():\n pass",
"def getSystemByUid(self,uid):\n\n logger.debug(\"Call to getSystemByUid - uid: {}\".format(uid))\n try:\n response = self.httpHandler.sendHttpRequest(CIC_SYSTEM_ENDPOINT+\"?uuid=\"+uid)\n\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise KeyError(\n \"System with uid {} not found in TMS, {}\".format(uid, body),\n \"CIC_SYSTEM_UUID_NOT_FOUND_ERR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up 'systems' in {} {}\".format(self.cicUser, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)",
"def test_update_storage_systems_info_pass(self):\n self._set_args({\"password\": \"password\", \"subnet_mask\": \"192.168.1.0/24\",\n \"systems\": [{\"ssid\": \"1\", \"serial\": \"1\"}, {\"addresses\": [\"192.168.1.36\"]}, {\"serial\": \"2\"}, {\"serial\": \"5\"}]})\n systems = NetAppESeriesProxySystems()\n systems.systems = [\n {\"ssid\": \"1\", \"serial\": \"1\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.5\", \"192.168.1.6\"], \"embedded_available\": True, \"accept_certificate\": True,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}]\n\n with mock.patch(self.REQUEST_FUNC, return_value=(200, [{\"id\": \"1\", \"passwordStatus\": \"valid\", \"metaTags\": []},\n {\"id\": \"5\", \"passwordStatus\": \"valid\", \"metaTags\": []}])):\n systems.update_storage_systems_info()\n self.assertEquals(systems.systems_to_remove, [\"5\"])\n self.assertEquals(systems.systems_to_add, [\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None,\n \"stored_password_valid\": None, \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False,\n \"accept_certificate\": False, \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}])",
"def get_known_systems(cls):\n return cls.coord_systems.keys()",
"def get_system_info(self):\r\n method = self.public_endpoints['system_info']['method']\r\n url = self.base_url + self.public_endpoints['system_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def test_search_systems_post(self):\n pass",
"def test_update_software_component_for_system_module(self):\n pass",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def _get_host_details(self):\n # Assuming only one system present as part of collection,\n # as we are dealing with iLO's here.\n status, headers, system = self._rest_get('/rest/v1/Systems/1')\n if status < 300:\n stype = self._get_type(system)\n if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:\n msg = \"%s is not a valid system type \" % stype\n raise exception.IloError(msg)\n else:\n msg = self._get_extended_error(system)\n raise exception.IloError(msg)\n\n return system",
"def update_storage_systems_info(self):\n try:\n rc, existing_systems = self.request(\"storage-systems\")\n\n # Mark systems for adding or removing\n for system in self.systems:\n for existing_system in existing_systems:\n if system[\"ssid\"] == existing_system[\"id\"]:\n system[\"current_info\"] = existing_system\n\n if system[\"current_info\"][\"passwordStatus\"] in [\"unknown\", \"securityLockout\"]:\n system[\"failed\"] = True\n self.module.warn(\"Skipping storage system [%s] because of current password status [%s]\"\n % (system[\"ssid\"], system[\"current_info\"][\"passwordStatus\"]))\n if system[\"current_info\"][\"metaTags\"]:\n system[\"current_info\"][\"metaTags\"] = sorted(system[\"current_info\"][\"metaTags\"], key=lambda x: x[\"key\"])\n break\n else:\n self.systems_to_add.append(system)\n\n # Mark systems for removing\n for existing_system in existing_systems:\n for system in self.systems:\n if existing_system[\"id\"] == system[\"ssid\"]:\n\n # Leave existing but undiscovered storage systems alone and throw a warning.\n if existing_system[\"id\"] in self.undiscovered_systems:\n self.undiscovered_systems.remove(existing_system[\"id\"])\n self.module.warn(\"Expected storage system exists on the proxy but was failed to be discovered. Array [%s].\" % existing_system[\"id\"])\n break\n else:\n self.systems_to_remove.append(existing_system[\"id\"])\n except Exception as error:\n self.module.fail_json(msg=\"Failed to retrieve storage systems. Error [%s].\" % to_native(error))",
"def test_accounting_system_resource_methods(self, mock_url):\n account_id = 1234\n resource_id = 2345\n\n single_response = {\"system\": {}}\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n self.freshBooksClient.systems.get(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.list(account_id)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.create(account_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.update(account_id, resource_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.delete(account_id, resource_id)",
"def get_unit_system(system: SystemLike) -> unyt.UnitSystem:\n if isinstance(system, unyt.UnitSystem):\n return system\n\n try:\n return unyt.unit_systems.unit_system_registry[str(system)]\n except KeyError as exc:\n raise UnitSystemNotFoundError(system) from exc"
] | [
"0.82716125",
"0.7814119",
"0.71705204",
"0.68655264",
"0.6863939",
"0.68590707",
"0.681096",
"0.67226976",
"0.65528154",
"0.65283304",
"0.6452966",
"0.6447106",
"0.63753295",
"0.6318197",
"0.62144506",
"0.6202485",
"0.6193861",
"0.6157457",
"0.61413693",
"0.6111493",
"0.60688317",
"0.60670507",
"0.60661066",
"0.604703",
"0.6036052",
"0.6016375",
"0.5990745",
"0.59491247",
"0.59378844",
"0.59219867"
] | 0.9355174 | 0 |
Test case for get_systems_expanded | def test_get_systems_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_systems(self):\n pass",
"def test_system_expansion():\n # We need 2 combinatorial systems\n template_script = get_template_script()\n template_system = template_script['systems']['implicit-system']\n del template_system['leap']\n template_script['systems'] = {'system1': template_system.copy(),\n 'system2': template_system.copy()}\n template_script['systems']['system1']['receptor'] = utils.CombinatorialLeaf(['Abl', 'T4Lysozyme'])\n template_script['systems']['system2']['ligand'] = utils.CombinatorialLeaf(['p-xylene', 'toluene'])\n template_script['experiments']['system'] = utils.CombinatorialLeaf(['system1', 'system2'])\n\n # Expected expanded script\n expected_script = yank_load(\"\"\"\n systems:\n system1_Abl: {receptor: Abl, ligand: p-xylene, solvent: GBSA-OBC2}\n system1_T4Lysozyme: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}\n system2_pxylene: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}\n system2_toluene: {receptor: T4Lysozyme, ligand: toluene, solvent: GBSA-OBC2}\n experiments:\n system: !Combinatorial ['system1_Abl', 'system1_T4Lysozyme', 'system2_pxylene', 'system2_toluene']\n protocol: absolute-binding\n \"\"\")\n expanded_script = template_script.copy()\n expanded_script['systems'] = expected_script['systems']\n expanded_script['experiments'] = expected_script['experiments']\n\n assert ExperimentBuilder(template_script)._expand_systems(template_script) == expanded_script",
"def test_get_software_set_expanded(self):\n pass",
"def test_get_system(self):\n pass",
"def test_subsystems(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_create_system_entire(self):\n pass",
"def test_expand_experiments():\n template_script = get_template_script()\n experiment_systems = utils.CombinatorialLeaf(['explicit-system', 'implicit-system', 'hydration-system'])\n template_script['experiments']['system'] = experiment_systems\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=1, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 2\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=2, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 1",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_get_projects_expanded(self):\n pass",
"def test_get_scenarios_expanded(self):\n pass",
"def test_flatten_inventory(self):\n pass",
"def test_update_software_component_for_system_module(self):\n pass",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)",
"def run_whatis(self, expanded, unexpanded) :\n\t\treturn self.run_man(expanded, unexpanded)",
"def describe_operating_systems():\n pass",
"def test_get_software_set(self):\n pass",
"def solve_subsystems(self, MAP):\n self.accumalated_cost =0 \n for j in random.sample(self.fleet.keys(), len(self.fleet.keys())): #car are selected randomly\n car = self.fleet[j]\n car.set_subsystem(MAP)\n subsystem_S = car.subsystem\n #print(\"TEST: {}\".format(subsystem_S))\n seq, cost = utils_solver.solveSystem(subsystem_S, SOLUCIONES, N_STATES, N_STATIONS, equivalent_systems =True, start_solutions = False )\n car.solution = seq\n car.solution_cost = cost\n self.accumalated_cost += cost",
"def test_create_software_bundle_from_system_module(self):\n pass",
"def test_get_code_system_with_added_packages_retrieves_resource(self):\n r1 = self._code_system_cls()\n r1.url.value = 'r1'\n\n r2 = self._code_system_cls()\n r2.url.value = 'r2'\n\n package = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([r1, r2]),\n value_sets=mock_resource_collection_containing([]),\n )\n\n self.assertEqual(package.get_code_system('r1'), r1)\n self.assertEqual(package.get_code_system('r2'), r2)\n self.assertIsNone(package.get_code_system('mystery-url'))",
"def unit_system(self):\n val = self._stub.List(self._message).unit_system\n return map_unit_system[val]",
"def system_fleet_dimensioning(self):",
"def test_ucs_get_sys(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertIn(\"Fabric Interconnects\", api_data[\"json\"],\n \"Results did not contain 'Fabric Interconnects'\")\n self.assertIn(\"Servers\", api_data[\"json\"], \"Results did not contain 'Servers\")\n self.assertIn(\"FEX\", api_data[\"json\"], \"Results did not contain 'FEX\")\n self.assertIn(\"Chassis\", api_data[\"json\"], \"Results did not contain 'Chassis\")"
] | [
"0.7286601",
"0.7166528",
"0.69890165",
"0.6326804",
"0.6062684",
"0.57679826",
"0.5644336",
"0.561285",
"0.5527389",
"0.55100286",
"0.54939175",
"0.54487073",
"0.5385733",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53748953",
"0.53214025",
"0.5302607",
"0.526843",
"0.52605325",
"0.5259044",
"0.52580434",
"0.5255717",
"0.5235656",
"0.5219163"
] | 0.94834405 | 0 |
Test case for get_team_owned_clouds | def test_get_team_owned_clouds(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_clouds(self):\n pass",
"def test_get_cloud(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_retrieve_team(self):\n pass",
"def cloud_information(self):\n url = \"%s/state/teams/%s/cloud\" % (self.url, self.identifier, )\n return perform_request(url)",
"def test_get_cloud_resources(self):\n pass",
"def test_gridironfootballplayers_get(self):\n pass",
"def get_people(team):",
"def test_cyclingleagues_get(self):\n pass",
"def test_aws_service_api_flavors_get(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_teams_get_teams_v2(self):\n pass",
"def get_cloud_detail(sky):\n debug(\"Getting cloud details\")\n clouds = cloud_map(sky)\n debug(\"There are {} clouds listed in the Metar\".format(len(clouds)))\n thickest = thickest_clouds(clouds)\n debug(\"Found thickest clouds: thick: {} -- base {}\".format(thickest[0], thickest[1]))\n return {\n \"thickness\": thickest[0],\n \"base\": thickest[1]\n }",
"def get_teams():",
"def test_get_cat_owner(self):\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(\n url, token=self.get_token(self.user_owner_cat)\n )\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 2)",
"def test_update_cloud(self):\n pass",
"def test_teams_get_team_v1(self):\n pass",
"def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)",
"def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)",
"def test_basketballteams_get(self):\n pass",
"def test_aws_service_api_flavor_get(self):\n pass",
"def test_get_player_upcoming_chests(self):\n pass",
"def test_get_owner(self):\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(self.user_owner))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )",
"def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players",
"def test_teams_get_teams_v1(self):\n pass",
"def get_clouds():\n clouds = [ x.get('cloud') for x in Schedconfig.objects.values('cloud').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n clouds = sorted(clouds, key=locale.strxfrm)\n return clouds",
"def test_poets_get(self):\n pass",
"def test_teams_get_users_teams_v2(self):\n pass",
"def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)"
] | [
"0.77337533",
"0.6778483",
"0.63360584",
"0.5955542",
"0.59453464",
"0.5891061",
"0.57760024",
"0.56439966",
"0.55424154",
"0.5531821",
"0.55001336",
"0.55001336",
"0.5468871",
"0.54570776",
"0.54522747",
"0.54391277",
"0.5392013",
"0.5381919",
"0.5376097",
"0.5376097",
"0.5344188",
"0.5301563",
"0.5293573",
"0.52612275",
"0.5224584",
"0.5191078",
"0.51616627",
"0.51586586",
"0.5152396",
"0.5126288"
] | 0.9452256 | 0 |
Test case for get_team_owned_or_managed_virtualization_realms | def test_get_team_owned_or_managed_virtualization_realms(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtualization_realms(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_get_team_owned_clouds(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_teams_get_teams_v2(self):\n pass",
"def test_retrieve_team(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_teams_get_users_teams_v2(self):\n pass",
"def test_teams_get_team_v1(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_teams_get_teams_v1(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_assign_managing_team(self):\n pass",
"def test_teams_get_users_teams_v1(self):\n pass",
"def get_teams():",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_returns_projects_managed_by_me(self):\n # Arrange\n # test_user is the member of team that has PM role in test_project_1 so it must be returned\n test_organisation_1 = return_canned_organisation(\n 111, \"test_organisation_2\", \"T2\"\n )\n test_organisation_1.create()\n test_team = return_canned_team(\"test_team\", test_organisation_1.name)\n add_user_to_team(\n test_team, self.test_user, TeamMemberFunctions.MEMBER.value, True\n )\n assign_team_to_project(\n self.test_project_1, test_team, TeamRoles.PROJECT_MANAGER.value\n )\n\n # test_user is the manager of organisation that test_project_2 belongs to so it must be returned\n test_organisation_2 = create_canned_organisation()\n add_manager_to_organisation(test_organisation_2, self.test_user)\n self.test_project_2.organisation_id = test_organisation_2.id\n self.test_project_2.private = False\n self.test_project_2.save()\n\n # test_user doesn't have PM role on test_project_3 so it must not be returned\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.organisation_id = test_organisation_1.id\n self.test_project_3.save()\n\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"managedByMe\": \"true\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 2)\n self.assertNotIn(\n self.test_project_3.id,\n [i[\"projectId\"] for i in response.json[\"results\"]],\n )",
"def test_get_requests_for_team_by_owner(self):\n\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)",
"def test_get_one_for_other_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='[email protected]',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n status=403,\n )",
"def get_people(team):",
"def get_available_companies_and_people(team):",
"def test_register_virtualization_realm(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass"
] | [
"0.698553",
"0.68025017",
"0.6727827",
"0.6570777",
"0.6517338",
"0.650933",
"0.62920487",
"0.60085666",
"0.5972494",
"0.5942473",
"0.5904141",
"0.5887374",
"0.5860143",
"0.58404505",
"0.58404505",
"0.5825013",
"0.56843686",
"0.56609446",
"0.5626665",
"0.5597818",
"0.55626684",
"0.55324835",
"0.5512861",
"0.54884714",
"0.548247",
"0.5461975",
"0.5458603",
"0.5451861",
"0.54491466",
"0.5442572"
] | 0.9600649 | 0 |
Test case for get_teams | def test_get_teams(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_teams_get_teams_v2(self):\n pass",
"def get_teams():",
"def test_teams_get_teams_v1(self):\n pass",
"def test_teams_list(self):\n pass",
"def test_teams_get_team_v1(self):\n pass",
"def test_basketballteams_get(self):\n pass",
"def test_retrieve_team(self):\n pass",
"def test_teams_read(self):\n pass",
"def test_teams_get_users_teams_v2(self):\n pass",
"def test_get_teams(self):\n owner2 = AnotherUserFactory(email_confirmed=True)\n owner3 = AnotherUserFactory(username='team owner 3', email='[email protected]', email_confirmed=True,)\n TeamFactory(owner=owner2, name='second team')\n TeamFactory(owner=owner3, name='third team')\n\n usual_user = UserFactory(\n username='usualuser',\n email='[email protected]',\n email_confirmed=True,\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n response = self.client.get(reverse('api:teams-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 3)",
"def test_teams_get_users_teams_v1(self):\n pass",
"def test_workflows_id_team_get(self):\n pass",
"def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)",
"def test_basketballteams_id_get(self):\n pass",
"def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)",
"def test_teams_create(self):\n pass",
"def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)",
"def test_get_team_history(self):\n pass",
"def test_data_source_soaps_id_team_get(self):\n pass",
"def get_people(team):",
"def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)",
"def getAllTeams(self):\n return []",
"def test_user_get_teams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/teams')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_create_team(self):\n pass",
"def test_teams_get_workgroups_v2(self):\n pass",
"def test_get_open_requests_by_team(self):\n pass",
"def teams(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'teams')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def teams():\n print 'Getting Teams'\n\n substring = \"%\" + request.args.get('t') + \"%\"\n\n team_list = datastore.get_teams_typeahead(engine, substring, max_teams=10)\n\n print 'Teams:', team_list\n return jsonify(team_list)",
"def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))",
"def test_user_get_team_page():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/team/2')\n assert r.status_code == 200\n destroy_ctfd(app)"
] | [
"0.8955048",
"0.8872334",
"0.8777806",
"0.8660324",
"0.86075455",
"0.8591912",
"0.8526396",
"0.8174428",
"0.81494045",
"0.7914162",
"0.7860738",
"0.781623",
"0.7745858",
"0.7636601",
"0.7574792",
"0.7549753",
"0.7522559",
"0.7513139",
"0.7410063",
"0.73755103",
"0.7349635",
"0.7347204",
"0.7294981",
"0.7177747",
"0.70800847",
"0.7052586",
"0.70380455",
"0.70263195",
"0.69365466",
"0.6908934"
] | 0.9239268 | 0 |
Test case for get_template_subscription | def test_get_template_subscription(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_subscription_template(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_create_subscription_template(self):\n pass",
"def test_update_template_subscription(self):\n pass",
"def test_list_template_subscriptions(self):\n pass",
"def test_update_subscription_template(self):\n pass",
"def test_get_subscription(self):\n pass",
"def test_delete_template_subscription(self):\n pass",
"def test_list_pending_template_subscriptions(self):\n pass",
"def test_delete_subscription_template(self):\n pass",
"def test_create_subscription(self):\n pass",
"def test_get_subscriptions(self):\n pass",
"def test_retrieve_template_registration(self):\n pass",
"def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_get_template_success(self):\n template_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_issue_subscriptions(self):\n pass",
"def test_update_subscription(self):\n pass",
"def tests_get_subscription(self):\n manager_root = ISubscriptionManager(self.root)\n manager_root.subscribability = SUBSCRIBABLE\n manager_root.subscribe('[email protected]')\n manager_folder = ISubscriptionManager(self.root.folder)\n manager_folder.subscribe('[email protected]')\n\n manager = ISubscriptionManager(self.root.folder.index)\n manager.subscribability = SUBSCRIBABLE\n manager.subscribe('[email protected]')\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root)\n self.assertEqual(len(manager.get_subscriptions()), 3)\n\n manager_root.subscribability = NOT_SUBSCRIBABLE\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root.folder.index)\n self.assertEqual(len(manager.get_subscriptions()), 1)",
"def test_process_subscriptions(self):\n pass",
"def test_get_tosca_template(self):\n pass",
"def test_issue_add_subscription(self):\n pass",
"def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})",
"def test_subscription(self):\n self.token_login()\n cassette_name = self.cassette_name(\"subscription\")\n with self.recorder.use_cassette(cassette_name):\n repository = self.gh.repository(\"sigmavirus24\", \"github3.py\")\n threads = list(repository.notifications(all=True))\n assert len(threads) > 0\n thread = threads[0]\n assert isinstance(thread, github3.notifications.Thread)\n assert isinstance(\n thread.subscription(),\n github3.notifications.ThreadSubscription,\n )",
"def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)",
"def test_create_template_subsciption(self):\n pass",
"def test_share_template_registration(self):\n pass",
"def test_delete_subscription(self):\n pass",
"def test_aws_service_api_validate_subscription_post(self):\n pass"
] | [
"0.91254216",
"0.8465079",
"0.83400655",
"0.7970487",
"0.78837746",
"0.780573",
"0.77934146",
"0.74571586",
"0.7394973",
"0.7294791",
"0.7174411",
"0.71060663",
"0.7010564",
"0.6695348",
"0.6669228",
"0.6440063",
"0.64380753",
"0.64185697",
"0.6368903",
"0.6362722",
"0.63525814",
"0.63335836",
"0.63146263",
"0.6312883",
"0.630718",
"0.6137684",
"0.6124251",
"0.612158",
"0.6073203",
"0.60676754"
] | 0.9414446 | 0 |
Test case for get_templates_in_virtualization_realm | def test_get_templates_in_virtualization_realm(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_team_template_folders_id_templates_get(self):\n pass",
"def test_invalidate_template_cache_in_virtualization_realm(self):\n pass",
"def test_retrieve_template_registration(self):\n pass",
"def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids",
"def test_get_virtualization_realms(self):\n pass",
"def test_get_activity_templates(self):\n pass",
"def test_get_device_templates(self):\n pass",
"def test_get_all_as_superuser_returns_all_templates(self):\n mock_request = create_mock_request(user=self.superuser)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 3)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))",
"def test_register_virtualization_realm(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_get_all_as_user_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))",
"def test_get_all_as_staff_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.staff_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def test_list_template_registrations(self):\n pass",
"def test_team_template_folders_get(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)",
"def test_allocate_virtualization_realm(self):\n pass",
"def list_templates(self):\n raise NotImplementedError()",
"def test_template_name():\n for t in templates:\n assert len(t.name) > 0",
"def test_list_unregistered_templates(self):\n pass",
"def testGetTemplatesLength(self):\n self.assertEqual(len(self.service.templates), 12)",
"def _assertPageTemplatesUsed(self, response):\n self.assertGSoCTemplatesUsed(response)\n self.assertTemplateUsed(response, 'modules/gsoc/participants/base.html')\n self.assertTemplateUsed(\n response, 'modules/gsoc/participants/_mentors_list.html')\n self.assertTemplateUsed(response, 'soc/list/lists.html')\n self.assertTemplateUsed(response, 'soc/list/list.html')",
"def find_templates(self, name):\n script = (\n 'Get-SCVMTemplate -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVMTemplate(system=self, raw=tmpl_data) for tmpl_data in data]\n return [SCVMTemplate(system=self, raw=data)]",
"def test_list_template_for_all_namespaces(self):\n pass"
] | [
"0.8728161",
"0.73325515",
"0.7203499",
"0.67043316",
"0.6665658",
"0.6649772",
"0.6569931",
"0.6565045",
"0.6523989",
"0.6463076",
"0.6445338",
"0.6443119",
"0.6429947",
"0.6382415",
"0.6331302",
"0.6329164",
"0.6323613",
"0.62802017",
"0.62723553",
"0.6254154",
"0.6235713",
"0.6211968",
"0.61779845",
"0.6154516",
"0.6147203",
"0.6125125",
"0.6091973",
"0.6086867",
"0.6074563",
"0.60608435"
] | 0.950381 | 0 |
Test case for get_test_asset | def test_get_test_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_test_assets(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)",
"def test_get_container_assets(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_delete_asset(self):\n pass",
"def get_asset_path(test):\n return DEVICE_ASSETS_PATH + os.path.basename(test)",
"def test_get_test_assets_expanded(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_get_image_url(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n self.assertEquals(course_image_url(course), '/c4x/edX/999/asset/{0}'.format(course.course_image))",
"def startTest(asset):",
"def test_get_image_url(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n url = utils.course_image_url(course)\r\n self.assertEquals(url, '/c4x/edX/999/asset/{0}'.format(course.course_image))",
"def test_itar_restrict_test_asset(self):\n pass",
"def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_get_image_url(self):\r\n course = self.process_xml(xml.CourseFactory.build())\r\n self.assertEquals(course_image_url(course), '/static/xml_test_course/images/course_image.jpg')",
"def test_add_category_to_asset(self):\n pass",
"def test_get_collection_asset_urls(self, mock_get):\n # Arrange\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(\n response,\n [\n \"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg\"\n ],\n )",
"def test_aws_service_api_image_get(self):\n pass",
"def test_itar_restrict_asset(self):\n pass"
] | [
"0.82820946",
"0.8044094",
"0.7942955",
"0.7331919",
"0.72676235",
"0.7154872",
"0.7139311",
"0.71014214",
"0.7091819",
"0.69722605",
"0.6903332",
"0.68703055",
"0.6739013",
"0.6554874",
"0.6528944",
"0.65047604",
"0.64353347",
"0.6410545",
"0.63964784",
"0.6385942",
"0.6385543",
"0.63789904",
"0.633389",
"0.6298511",
"0.62806815",
"0.626482",
"0.6253854",
"0.61980534",
"0.6182914",
"0.61801934"
] | 0.9440002 | 0 |
Test case for get_test_assets | def test_get_test_assets(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_test_asset(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_update_test_asset(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_list_dependent_assets2(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)",
"def get_asset_path(test):\n return DEVICE_ASSETS_PATH + os.path.basename(test)",
"def test_get_container_assets_expanded(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_itar_restrict_test_asset(self):\n pass",
"def assets():",
"def assets():\n pass",
"async def test_get_all_assets(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/assets',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_get_section_assets_json(self):\n json_data = self.view.get_section_assets_json(story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data), len(self.story.sections.all()))\n for section in self.story.sections.all():\n self.assertIn(section.section_id, data)\n self.assertEqual(len(data[section.section_id]['objects']),\n len(section.assets.all()))\n asset_ids = [sectionasset['asset']['asset_id'] for\n sectionasset in \n data[section.section_id]['objects']]\n for asset in section.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_update_system_asset(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def get_test_files():\n repo_fs()\n return TEST_FILES",
"def test_get_api_resources(self):\n pass"
] | [
"0.86941195",
"0.7828993",
"0.77420974",
"0.7642411",
"0.7535365",
"0.7506334",
"0.7083185",
"0.7028469",
"0.69800526",
"0.68649346",
"0.6769858",
"0.6759919",
"0.67547977",
"0.66890097",
"0.6621367",
"0.6560056",
"0.6497423",
"0.64589155",
"0.644263",
"0.6389088",
"0.6362594",
"0.6348605",
"0.63158953",
"0.62643766",
"0.623432",
"0.62239677",
"0.6219055",
"0.6209857",
"0.6153589",
"0.61504817"
] | 0.9445863 | 0 |
Test case for get_test_assets_expanded | def test_get_test_assets_expanded(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_container_assets_expanded(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_get_scenarios_expanded(self):\n pass",
"def test_get_projects_expanded(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_list_dependent_assets2(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_deployments_expanded(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_read_artifact(self):\n pass",
"def test_itar_restrict_test_asset(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_auto_add_assets_to_story(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())",
"def test_auto_add_assets_to_story(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())",
"def test_create_system_asset(self):\n pass",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_get_software_bundles(self):\n pass",
"def test_get_software_set_expanded(self):\n pass",
"def test_get_section_assets_json(self):\n json_data = self.view.get_section_assets_json(story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data), len(self.story.sections.all()))\n for section in self.story.sections.all():\n self.assertIn(section.section_id, data)\n self.assertEqual(len(data[section.section_id]['objects']),\n len(section.assets.all()))\n asset_ids = [sectionasset['asset']['asset_id'] for\n sectionasset in \n data[section.section_id]['objects']]\n for asset in section.assets.all():\n self.assertIn(asset.asset_id, asset_ids)"
] | [
"0.82792056",
"0.74338585",
"0.7426617",
"0.6749964",
"0.64775175",
"0.62863046",
"0.62856776",
"0.62648165",
"0.6234041",
"0.62172043",
"0.616142",
"0.60602117",
"0.59562725",
"0.59427273",
"0.59029245",
"0.58813554",
"0.5872886",
"0.57209206",
"0.57043236",
"0.5625963",
"0.5613919",
"0.5559223",
"0.5546255",
"0.5506102",
"0.5457975",
"0.5442613",
"0.5430495",
"0.54213494",
"0.539096",
"0.53811246"
] | 0.9534373 | 0 |
Test case for get_unregistered_networks | def test_get_unregistered_networks(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_networks(self):\n pass",
"def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)",
"def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)",
"def test_aws_service_api_networks_get(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_get_network(self):\n pass",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def test_register_network(self):\n pass",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def show_networks():\n return get_networks()",
"def validate_networks(self, context, requested_networks):\n args = {'networks': requested_networks}\n return rpc.call(context, FLAGS.network_topic,\n {'method': 'validate_networks',\n 'args': args})",
"def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()",
"def test_delete_network(self):\n pass",
"def test_external_networks(self):\n network_list = self.neutron_operations.find_networks(router_external=True)\n self.assertNotEqual(len(network_list), 0, \"No external networks found\")",
"def test_get_default_network(self):\n pass",
"def fetch():\n\t\n\t_interfaces = [Interface(iface) for iface in netifaces.interfaces()]\n\t\n\tfor iface in _interfaces: \n\t\tif (iface.id in BLACK_ID) or (iface.mac in BLACK_MAC) or (len(iface.mac) < 5):\n\t\t\t_interfaces.remove(iface)\n\t\t\t\n\treturn _interfaces",
"def get_networks(self, just_names=False, as_dict=False,\n disconnected=False):\n # Calling GetClients() on returned networks is simpler than filtering\n # result of self.get_clients(), above.\n networks = (n for n in self.GetUser().GetNetworks() if\n disconnected or n.IsIRCConnected())\n if just_names:\n return tuple(n.GetName() for n in networks)\n elif as_dict:\n return {n.GetName(): n for n in networks}\n return tuple(networks)",
"def delete_networks(self):\n logging.debug(\"cleanup called\")\n # for network in self.networks.key():\n # self.networks[network].delete()\n for network in self.networks.values():\n logging.warn(\"Deleting network '%s'\" % network)\n print \"Deleting network '%s'\" % network\n # print self.networks[network]\n network.delete()\n self.networks = {}",
"def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def list_networks():\n return __sets.keys()",
"def test_nets(self):\n\n good_nets = self.good.nets[:]\n\n self.assertEqual(len(good_nets), 5)\n\n for net in self.actual.nets:\n for goodnet in good_nets:\n if set(net.points) == set(goodnet.points):\n good_nets.remove(goodnet)\n break\n else:\n raise Exception('bad net', net)\n\n self.assertEqual(good_nets, [])",
"def cleanup_networks(self):\n for network in self.networks:\n try:\n network.remove()\n network.client.api.close()\n network.client.close()\n self.log_message(\n f'{dateutils.get_current_time()} '\n f'destroying docker network {network}'\n )\n except Exception:\n self.log_message(\n f'{dateutils.get_current_time()} ERROR: Could not remove docker '\n f'network {network}'\n )\n self.networks.clear()",
"def fusion_api_get_server_profiles_available_networks(self, uri=None, param='', api=None, headers=None):\n param = '/available-networks%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]",
"def unassign_sdn_networks(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_sdn_networks\"), kwargs)",
"def networks(self) -> dict:\n return self.data[\"networks\"]",
"def test_list_unregistered_templates(self):\n pass",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret"
] | [
"0.70481443",
"0.6705914",
"0.6705914",
"0.64878374",
"0.625235",
"0.6166959",
"0.61608213",
"0.5913051",
"0.5904383",
"0.5857477",
"0.58545583",
"0.58114564",
"0.5768361",
"0.57424515",
"0.5725416",
"0.572304",
"0.57081574",
"0.5693959",
"0.56401277",
"0.556046",
"0.5550816",
"0.5504027",
"0.54779947",
"0.5437932",
"0.54247576",
"0.54122984",
"0.5398221",
"0.5394709",
"0.53823644",
"0.5369821"
] | 0.95693475 | 0 |
Test case for get_valid_networks_for_virtualization_realm | def test_get_valid_networks_for_virtualization_realm(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_networks(self):\n pass",
"def test_external_networks(self):\n network_list = self.neutron_operations.find_networks(router_external=True)\n self.assertNotEqual(len(network_list), 0, \"No external networks found\")",
"def test_aws_service_api_networks_get(self):\n pass",
"def validate_networks(self, context, requested_networks):\n args = {'networks': requested_networks}\n return rpc.call(context, FLAGS.network_topic,\n {'method': 'validate_networks',\n 'args': args})",
"def test_networking_project_network_list(self):\n pass",
"def test_get_unregistered_networks(self):\n pass",
"def test_get_network(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_list_host_subnet(self):\n pass",
"def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None",
"def test_nets(self):\n\n good_nets = self.good.nets[:]\n\n self.assertEqual(len(good_nets), 5)\n\n for net in self.actual.nets:\n for goodnet in good_nets:\n if set(net.points) == set(goodnet.points):\n good_nets.remove(goodnet)\n break\n else:\n raise Exception('bad net', net)\n\n self.assertEqual(good_nets, [])",
"def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")",
"def test_networking_project_network_get(self):\n pass",
"def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()",
"def test_verify_list_of_devices_in_my_network():",
"def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self._fail_network_list = True\n self.configuration.hgst_net = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self._fail_network_list = False",
"def test_list_cluster_network(self):\n pass",
"def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_networking_project_network_service_list(self):\n pass",
"def test_network_full(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=None,\n )",
"def test_get_virtualization_realm(self):\n pass",
"def test_networking_project_network_create(self):\n pass",
"def dvs_vcenter_networks(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n subnets = []\n networks = []\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n for net in self.net_data:\n logger.info('Create network {}'.format(net.keys()[0]))\n netw = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=netw['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n subnets.append(subnet)\n networks.append(netw)\n\n self.show_step(3)\n for net in networks:\n assert_true(os_conn.get_network(net['name'])['id'] == net['id'])\n\n self.show_step(4)\n logger.info('Delete network net_1')\n os_conn.neutron.delete_subnet(subnets[0]['id'])\n os_conn.neutron.delete_network(networks[0]['id'])\n\n self.show_step(5)\n assert_true(os_conn.get_network(networks[0]) is None)\n\n self.show_step(6)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))\n # subnet\n os_conn.create_subnet(\n subnet_name=self.net_data[0].keys()[0],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n logger.info('Networks net_1 and net_2 are present.')",
"def dvs_different_networks(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n _sg_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _sg_groups\n if sg['tenant_id'] == _srv_tenant and\n sg['name'] == 'default'][0]\n\n instances_group = []\n networks = []\n map_router_subnet = []\n step = 2\n self.show_step(step)\n for net in self.net_data:\n network = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=network['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(\n os_conn.get_network(network['name'])['id'] == network['id'])\n self.show_step(step + 1)\n router = os_conn.create_router(\n 'router_0{}'.format(self.net_data.index(net) + 1),\n tenant=tenant)\n\n self.show_step(step + 3)\n os_conn.add_router_interface(router_id=router[\"id\"],\n subnet_id=subnet[\"id\"])\n\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': network['id']}],\n security_groups=[security_group.name, default_sg['name']])\n if step == 3:\n step += 1\n self.show_step(step + 5)\n self.show_step(step + 6)\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': network['id']}],\n vm_count=1,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n instances = [instance for instance in os_conn.get_servers()\n if network['name'] == instance.networks.keys()[0] and\n instance.id != access_point.id]\n\n private_ips = [\n os_conn.get_nova_instance_ip(i, net_name=network['name'])\n for i in instances]\n\n instances_group.append({'access_point': access_point,\n 'access_point_ip': access_point_ip,\n 'private_ips': private_ips})\n\n networks.append(network)\n map_router_subnet.append({'subnet': subnet['id'],\n 'router': router['id']})\n step = 3\n self.show_step(11)\n for group in instances_group:\n ip_pair = dict.fromkeys(group['private_ips'])\n for key in ip_pair:\n ip_pair[key] = [value for value in group['private_ips']\n if key != value]\n openstack.check_connection_through_host(\n group['access_point_ip'], ip_pair)\n\n self.show_step(12)\n ip_pair = dict.fromkeys(instances_group[0]['private_ips'])\n for key in ip_pair:\n ip_pair[key] = instances_group[1]['private_ips']\n openstack.check_connection_through_host(\n remote=instances_group[0]['access_point_ip'],\n ip_pair=ip_pair,\n result_of_command=1)\n\n self.show_step(13)\n\n access_point_fip = instances_group[1]['access_point_ip']\n _fips = os_conn.neutron.list_floatingips()['floatingips']\n fip_id = [fip['id'] for fip in _fips\n if fip['floating_ip_address'] == access_point_fip][0]\n\n os_conn.neutron.delete_floatingip(fip_id)\n\n os_conn.neutron.remove_interface_router(\n router=map_router_subnet[1]['router'],\n body={\"subnet_id\": map_router_subnet[1]['subnet']})\n\n os_conn.add_router_interface(router_id=map_router_subnet[0]['router'],\n subnet_id=map_router_subnet[1]['subnet'])\n self.show_step(14)\n openstack.check_connection_through_host(\n remote=instances_group[0]['access_point_ip'], ip_pair=ip_pair)",
"def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")",
"def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes",
"def list_virtual_networks(client, private_cloud, resource_pool, location):\n return client.list(location, private_cloud, resource_pool)"
] | [
"0.7239187",
"0.68767494",
"0.6630372",
"0.65684474",
"0.6546521",
"0.65141875",
"0.6466804",
"0.6217919",
"0.61868286",
"0.61580765",
"0.61419344",
"0.61353874",
"0.6133159",
"0.6086617",
"0.6047105",
"0.60371196",
"0.5971482",
"0.59617007",
"0.59037125",
"0.58990085",
"0.58962274",
"0.58525467",
"0.5851098",
"0.5839026",
"0.5837987",
"0.582233",
"0.58199686",
"0.57819986",
"0.5766111",
"0.5763497"
] | 0.95556384 | 0 |
Test case for get_version | def test_get_version(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_version(self):\n pass",
"def _get_version(self):",
"def test_get_short_version(self):\n pass",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()",
"def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')",
"def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])",
"def test_get_oapi_version(self):\n pass",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def test_getVersion(self):\n version = (\"twisted\", 2, 1, 0)\n project = self.makeProject(version)\n self.assertEqual(project.getVersion(), Version(*version))",
"def test_getVersion(self):\n version = Version('foo', 2, 1, 0)\n project = self.makeProject(version)\n self.assertEquals(project.getVersion(), version)",
"def get_version():\n return 1",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def get_version(self):\n pass",
"def test_parse_version():\n version = parse_version(__version__)\n assert type(version) == Version",
"def test_version():\n assert __version__ == \"0.1.0\"",
"def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)",
"def test_version_type(self):\n self.assertIsInstance(get_version(), str)",
"def test_version():\n assert __version__",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")",
"def test_get_cons3rt_version(self):\n pass",
"def test_versionInfo(self):\n self.assertEqual(\n nevow.__version_info__,\n (nevow.version.major, nevow.version.minor, nevow.version.micro))",
"def test_get_revision(self):\n pass",
"def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def test_version(self):\n result = check_output([b\"flocker-changestate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def py_versiontest(c):\n pass",
"def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))",
"def test_get_iiq_version_ok(self, fake_get_distribution):\n fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\n\n v = versions.get_iiq_version()\n\n self.assertTrue(isinstance(v, versions.Version))"
] | [
"0.8308639",
"0.82531625",
"0.80604726",
"0.7995182",
"0.7986462",
"0.7970134",
"0.79274386",
"0.78546906",
"0.78112584",
"0.7802326",
"0.77743787",
"0.7685401",
"0.765229",
"0.75938",
"0.7579828",
"0.75624156",
"0.75437415",
"0.7539098",
"0.75359255",
"0.7534819",
"0.7527031",
"0.75240284",
"0.7433535",
"0.74164206",
"0.7411804",
"0.74092835",
"0.7357032",
"0.73567194",
"0.73329234",
"0.7313114"
] | 0.93555695 | 0 |
Test case for get_virtual_machine_count_metrics | def test_get_virtual_machine_count_metrics(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def test_vm_count():\n assert environments.vm_count() > 0, 'Total VM count should be over 1.'\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.vm_count\n msg = ('VM count mismatch. Environments says: ' +\n str(environments.vm_count()) +\n ', actual count: ' + str(count))\n assert count == environments.vm_count(), msg",
"def test_svm_vs_vm_count():\n assert environments.svms() >= environments.vm_count()",
"def test_total_procs():\n result = _run_metric('total_procs')\n assert result.exit_code == 0",
"async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )",
"def test_svm_count():\n assert environments.svms() > 0\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.svms\n msg = ('SVM count mismatch. Environments says: ' +\n str(environments.svms()) +\n ', actual count: ' + str(count))\n assert count == environments.svms(), msg",
"def test_svm_vs_vm_count():\n assert templates.svms() >= templates.vm_count()",
"def check_vm_count(system, warn=10, crit=15, **kwargs):\n logger = kwargs[\"logger\"]\n vm_count = len(system.list_vms())\n logger.info(\"Checking threshold status for instance count\")\n check_threshold(vm_count, warn, crit, logger)",
"def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0",
"def test_svm_count():\n assert templates.svms() > 0",
"def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)",
"def test_vm_count():\n assert templates.vm_count() > 0",
"def retrieve_num_instances(service):\n instance_counts = service[\"instance-counts\"]\n return instance_counts[\"healthy-instances\"] + instance_counts[\"unhealthy-instances\"]",
"def test_calculate_count(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.add_counts()\n assert \"AADT\" in net.links_df.columns\n print(net.links_df[net.links_df.drive_access == 1].AADT.value_counts())\n ## todo write an assert that actually tests something",
"def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")",
"def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0",
"def test00(self):\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")",
"def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))",
"def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)",
"def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0",
"def find_test_count(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n\n result = parsed['metrics']['testsCount']['_value']\n _logger.debug('Using subtest count: %s', result)\n\n return result",
"def compute_metrics(self):\n pass",
"def create_system_metrics(system):\n pass",
"def get_total_n_cpu(self) -> int:",
"def test_cpu_percentage():\n result = _run_metric('cpu_percent')\n assert result.exit_code == 0",
"def test_get_all_derived_metrics(self):\n pass",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def count():",
"def test_load_avg_5():\n result = _run_metric('load_avg_5')\n assert result.exit_code == 0",
"def test_cpu_processor_count_value(self):\n \n cpu_processor_count = get_cpu_information()[4] \n \n # Check to make sure the returned value is \"Intel(R) Core(TM) i7-4771 CPU @ 3.50GHz\"\n self.assertEqual(cpu_processor_count, 1)"
] | [
"0.9428486",
"0.7132315",
"0.69137144",
"0.6858517",
"0.6834674",
"0.67460644",
"0.6649076",
"0.64297545",
"0.64092255",
"0.6310988",
"0.6262392",
"0.62045723",
"0.6077991",
"0.60744435",
"0.6053116",
"0.6038438",
"0.60157406",
"0.599788",
"0.5970189",
"0.59456164",
"0.594385",
"0.5934387",
"0.5921304",
"0.59019065",
"0.5893075",
"0.5882274",
"0.5867882",
"0.5860513",
"0.5856267",
"0.5836098"
] | 0.952482 | 0 |
Test case for get_virtual_machine_count_metrics1 | def test_get_virtual_machine_count_metrics1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def test_vm_count():\n assert environments.vm_count() > 0, 'Total VM count should be over 1.'\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.vm_count\n msg = ('VM count mismatch. Environments says: ' +\n str(environments.vm_count()) +\n ', actual count: ' + str(count))\n assert count == environments.vm_count(), msg",
"def test_svm_vs_vm_count():\n assert environments.svms() >= environments.vm_count()",
"def test_total_procs():\n result = _run_metric('total_procs')\n assert result.exit_code == 0",
"def test_svm_count():\n assert environments.svms() > 0\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.svms\n msg = ('SVM count mismatch. Environments says: ' +\n str(environments.svms()) +\n ', actual count: ' + str(count))\n assert count == environments.svms(), msg",
"async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )",
"def test_svm_vs_vm_count():\n assert templates.svms() >= templates.vm_count()",
"def check_vm_count(system, warn=10, crit=15, **kwargs):\n logger = kwargs[\"logger\"]\n vm_count = len(system.list_vms())\n logger.info(\"Checking threshold status for instance count\")\n check_threshold(vm_count, warn, crit, logger)",
"def test_svm_count():\n assert templates.svms() > 0",
"def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0",
"def test_vm_count():\n assert templates.vm_count() > 0",
"def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)",
"def retrieve_num_instances(service):\n instance_counts = service[\"instance-counts\"]\n return instance_counts[\"healthy-instances\"] + instance_counts[\"unhealthy-instances\"]",
"def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")",
"def test_calculate_count(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.add_counts()\n assert \"AADT\" in net.links_df.columns\n print(net.links_df[net.links_df.drive_access == 1].AADT.value_counts())\n ## todo write an assert that actually tests something",
"def test00(self):\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0",
"def count():",
"def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))",
"def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)",
"def find_test_count(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n\n result = parsed['metrics']['testsCount']['_value']\n _logger.debug('Using subtest count: %s', result)\n\n return result",
"def get_total_n_cpu(self) -> int:",
"def test_cpu_processor_count_value(self):\n \n cpu_processor_count = get_cpu_information()[4] \n \n # Check to make sure the returned value is \"Intel(R) Core(TM) i7-4771 CPU @ 3.50GHz\"\n self.assertEqual(cpu_processor_count, 1)",
"def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0",
"def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten",
"def test_getObservationCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getObservationCount(), 5)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getObservationCount(), 140)\r\n self.assertEqual(self.est3.getObservationCount(), 112)",
"def create_system_metrics(system):\n pass",
"def Count(self) -> int:",
"def Count(self) -> int:"
] | [
"0.9462208",
"0.7175775",
"0.69014585",
"0.6836147",
"0.68071187",
"0.6777666",
"0.66530794",
"0.6407028",
"0.63620377",
"0.62978643",
"0.6244483",
"0.6218319",
"0.61617315",
"0.60853904",
"0.6077662",
"0.6069866",
"0.60468376",
"0.6042649",
"0.6019392",
"0.6003801",
"0.59937567",
"0.5977803",
"0.59629714",
"0.5859253",
"0.5851777",
"0.58410895",
"0.5829707",
"0.5819805",
"0.58130825",
"0.58130825"
] | 0.94947237 | 0 |
Test case for get_virtualization_realm | def test_get_virtualization_realm(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_get_virtualization_realms(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_set_project_default_virtualization_realm(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_update_virt_realm(self):\n pass",
"def test_enable_virt_realm_remote_access(self):\n pass",
"def test_remove_virt_realm(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_deallocate_virt_realm(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_update_virt_realm_remote_access_config(self):\n pass",
"def test_disable_virt_realm_remote_access(self):\n pass",
"def test_invalidate_template_cache_in_virtualization_realm(self):\n pass",
"def test_aws_service_api_vm_management_get(self):\n pass",
"def test_aws_service_api_vm_get(self):\n pass",
"def test_get_virtual_account_by_id(self):\n pass",
"def test_aws_service_api_vm_details_get(self):\n pass",
"def test_virtualservice_get(self):\n pass",
"def test_update_virtualization_realm_maximum_impact_level(self):\n pass",
"def test_get_virtual_service(self):\n pass"
] | [
"0.84720147",
"0.827407",
"0.7972289",
"0.7966721",
"0.7904994",
"0.7822082",
"0.7593173",
"0.74989456",
"0.74953544",
"0.7374292",
"0.7293444",
"0.69793445",
"0.6924349",
"0.67929065",
"0.6714621",
"0.65157896",
"0.62951374",
"0.6198845",
"0.6182621",
"0.60891366",
"0.6073488",
"0.6037288",
"0.5974495",
"0.58855736",
"0.5854623",
"0.5787645",
"0.56990355",
"0.56399745",
"0.562431",
"0.5573267"
] | 0.94904315 | 0 |
Test case for get_virtualization_realm_resources | def test_get_virtualization_realm_resources(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtualization_realm(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_virtualization_realms(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_get_cloud_resources(self):\n pass",
"def test_show_vcs_resources(mgmt_session):\n vcs_resource = rift.vcs.vcs.VcsResource(mgmt_session)\n vcs_resource_info = None\n\n # Get vcs resources\n vcs_resource_info = vcs_resource.get_vcs_resource()\n\n # Verify there are VM entries in the vcs resource info container\n vms = [vm for vm in vcs_resource_info.vm]\n if len(vms) == 0:\n raise AssertionError(\"No entries found in vcs resource info\")",
"def test_get_virtual_accounts(self):\n pass",
"def test_get_resource_membership_list(self):\n pass",
"def init_cloud_virtual_resources():\n test_cldvirt_resources = []\n\n # add info to list in memory, one by one, following signature values\n cldvirtres_ID = 1\n cldvirtres_name = \"nova-compute-1\"\n cldvirtres_info = \"nova VM in Arm pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 2\n cldvirtres_name = \"nova-compute-2\"\n cldvirtres_info = \"nova VM in LaaS\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [2,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 3\n cldvirtres_name = \"nova-compute-3\"\n cldvirtres_info = \"nova VM in x86 pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n\n # write list to binary file\n write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)\n\n return test_cldvirt_resources",
"def test_get_deployment_resources(self):\n pass",
"def test_get_translation_resources(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_update_virt_realm(self):\n pass",
"def test_enable_virt_realm_remote_access(self):\n pass",
"def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass",
"def test_get_virtual_account_clients(self):\n pass",
"def test_aws_service_api_vm_management_get(self):\n pass",
"def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass",
"def test_ipam_vrfs_list(self):\n pass",
"def test_objectresource_listobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n names = yield calendar.listObjectResources()\n self.assertEqual(set(names), set((\"1.ics\", \"2.ics\",)))\n yield self.commitTransaction(1)",
"def test_get_api_resources(self):\n pass"
] | [
"0.82264215",
"0.7756911",
"0.7746048",
"0.73255306",
"0.7196306",
"0.6961232",
"0.6932928",
"0.6821897",
"0.6704755",
"0.66542464",
"0.6613938",
"0.6571302",
"0.6533066",
"0.6385645",
"0.638342",
"0.6371223",
"0.6077056",
"0.6075573",
"0.6040317",
"0.6019357",
"0.5995074",
"0.5991204",
"0.59631455",
"0.5916462",
"0.5881969",
"0.5867999",
"0.5858281",
"0.58170885",
"0.5798507",
"0.5771519"
] | 0.9436492 | 0 |
Test case for get_virtualization_realms | def test_get_virtualization_realms(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_aws_service_api_vm_management_get(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_aws_service_api_vm_details_get(self):\n pass",
"def test_aws_service_api_vm_get(self):\n pass",
"def test_virtualservice_get(self):\n pass",
"def test_aws_service_api_vms_get(self):\n pass",
"def test_get_virtual_service(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_set_project_default_virtualization_realm(self):\n pass",
"def test_get_all_virtualservices(self,setup_suite):\n _, resp = get('virtualservice')\n vs_obj_list = resp['results']\n for vs_obj in vs_obj_list:\n logger.info(\" >>> VS Name: %s <<<\" % vs_obj['name'])",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_ipam_vrfs_list(self):\n pass",
"def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)",
"def test_get_virtual_account_clients(self):\n pass",
"def virtual_machines(self):\n return self._virtual_machines",
"def init_cloud_virtual_resources():\n test_cldvirt_resources = []\n\n # add info to list in memory, one by one, following signature values\n cldvirtres_ID = 1\n cldvirtres_name = \"nova-compute-1\"\n cldvirtres_info = \"nova VM in Arm pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 2\n cldvirtres_name = \"nova-compute-2\"\n cldvirtres_info = \"nova VM in LaaS\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [2,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 3\n cldvirtres_name = \"nova-compute-3\"\n cldvirtres_info = \"nova VM in x86 pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n\n # write list to binary file\n write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)\n\n return test_cldvirt_resources",
"def test_aws_service_api_vm_patch(self):\n pass",
"def test_svm_vs_vm_count():\n assert templates.svms() >= templates.vm_count()"
] | [
"0.87345",
"0.82369715",
"0.8018457",
"0.75693434",
"0.7503235",
"0.74925",
"0.74441326",
"0.72711354",
"0.7167637",
"0.7138762",
"0.70454913",
"0.6963919",
"0.6474516",
"0.6448546",
"0.63143057",
"0.63117677",
"0.6283197",
"0.61965907",
"0.6153293",
"0.61172134",
"0.6111598",
"0.6059752",
"0.60581297",
"0.6002509",
"0.59385496",
"0.5816631",
"0.57880074",
"0.5779764",
"0.5716718",
"0.57132757"
] | 0.95070046 | 0 |
Test case for gettesttools_html | def test_gettesttools_html(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_html_output(self):\n pass",
"def test_get_services_html(self):\n pass",
"def testHTML(self):\n\n html = self.E.html()",
"def test_get_root_html(self):\n pass",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage']:\r\n self.assertIn(key, context)",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'annotation_storage', 'token', 'tag', 'openseadragonjson']:\r\n self.assertIn(key, context)",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token']:\r\n self.assertIn(key, context)",
"def test_export_html(self):\r\n resp = self.client.get_html(self.url)\r\n self.assertEquals(resp.status_code, 200)\r\n self.assertContains(resp, \"Export My Course Content\")",
"def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)",
"def test_get_root_html1(self):\n pass",
"def test_error_html_using_get(self):\n pass",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def test_get_root_html2(self):\n pass",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()",
"def get_html(self):\r\n return u'This is supposed to be test html.'",
"def test_get_from_html(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_method()\n\n expected = textwrap.dedent(\n '''\\\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter",
"def testConvertHtmlWithScriptToPdf(self):\n self._testBase(\"data/test_with_script.html\")",
"def test_get_monitor_content_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.data, \"OK\")",
"def test_get_from_html(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"nested_folder\",\n \"another.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_nested_method()\n\n expected = textwrap.dedent(\n '''\\\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter",
"def test_mocked_get_simpleHtml(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_get\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>value</th><td>testValue</td></tr></table>', response.content)",
"def test_get_checklists_html(self):\r\n response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html')\r\n self.assertContains(response, \"Getting Started With Studio\")\r\n # The HTML generated will define the handler URL (for use by the Backbone model).\r\n self.assertContains(response, self.checklists_url)",
"def tests():\n\n\treturn render_template(\"testing.html\")",
"def test_get_monitor_html(self):\n response = self.setup_get_html_test('/monitor')\n self.assertEqual(response.status_code, 200)",
"def get_html(self):\r\n pass",
"def test_get_index_html(self):\n response = self.setup_get_html_test('/api/index')\n self.assertEqual(response.status_code, 200)",
"def test_tester01(self):\n result = self.init_test_app().get('/tester')\n self.assertEqual(result.data[0:33],\n b'<!DOCTYPE html>\\n<html lang=\"en\">\\n')",
"def test_dose_check_generation(self):\n response = self.client.get('/dose_check/V117108T/Insertion%201/Pre%20Ins%201/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'myapp/dose_check.html')\n # with open(r\"myapp\\\\tests\\\\test_dose_check_page.html\", 'r') as myfile:\n # test_html_data=myfile.read()\n # self.assertEqual(response.content.decode(\"utf-8\"),test_html_data)",
"def test_gethtml_multiple(self):\r\n mock_module = VerticalWithModulesFactory.create()\r\n out_html = mock_module.render('student_view').content\r\n self.assertTrue('Test numerical problem.' in out_html)\r\n self.assertTrue('Another test numerical problem.' in out_html)",
"def test_gethtml(self):\r\n mock_module = CHModuleFactory.create()\r\n\r\n def fake_get_display_items():\r\n \"\"\"\r\n A mock of get_display_items\r\n \"\"\"\r\n return [FakeChild()]\r\n mock_module.get_display_items = fake_get_display_items\r\n out_html = mock_module.render('student_view').content\r\n self.assertTrue('This is supposed to be test html.' in out_html)\r\n self.assertTrue('i4x://this/is/a/fake/id' in out_html)",
"def test_sample(self):\n response = self.tester.get('/sample-household/',\n content_type='html/text')\n self.assertEqual(response.status_code, 200)"
] | [
"0.793766",
"0.76976943",
"0.73097146",
"0.7293976",
"0.7218267",
"0.7141496",
"0.70999503",
"0.70624965",
"0.706234",
"0.70623034",
"0.7017327",
"0.69936585",
"0.6943644",
"0.69046235",
"0.6819236",
"0.6779285",
"0.6710938",
"0.66981655",
"0.669376",
"0.66642946",
"0.66429555",
"0.6605312",
"0.6531139",
"0.6516584",
"0.6495576",
"0.64678967",
"0.6460365",
"0.64555985",
"0.6441198",
"0.6426237"
] | 0.9521599 | 0 |
Test case for import_software_asset | def test_import_software_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_import_system_asset(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_update_software_asset_install_script(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_itar_restrict_software_asset(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_delete_software_asset_bundle(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_submit_asset_to_submission_service(self):\n pass",
"def test_update_software_asset_impact_level(self):\n pass",
"def test_get_software_bundle(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_itar_restrict_asset(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_itar_restrict_test_asset(self):\n pass",
"def test_get_software(self):\n pass",
"def test_read_artifact(self):\n pass",
"def test_delete_system_asset(self):\n pass"
] | [
"0.8180706",
"0.791843",
"0.77358276",
"0.758361",
"0.7497419",
"0.7470979",
"0.7464038",
"0.74122477",
"0.7361647",
"0.7264995",
"0.72075474",
"0.70608485",
"0.67757905",
"0.6715832",
"0.6647645",
"0.65379536",
"0.6521966",
"0.6420024",
"0.6415774",
"0.6392043",
"0.63913375",
"0.6364326",
"0.6362654",
"0.63471985",
"0.63464963",
"0.6310603",
"0.62032443",
"0.6109473",
"0.61082715",
"0.6092638"
] | 0.93554413 | 0 |
Test case for import_system_asset | def test_import_system_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_import_software_asset(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_update_software_asset_install_script(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_itar_restrict_asset(self):\n pass",
"def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)",
"def test_get_container_assets(self):\n pass",
"def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)",
"def test_itar_restrict_test_asset(self):\n pass",
"def test_file_asset(self):\n proto = struct_pb2.Struct()\n \n # pylint: disable=no-member\n subproto = proto.get_or_create_struct(\"asset\")\n subproto[rpc._special_sig_key] = rpc._special_asset_sig\n subproto[\"path\"] = \"foo.txt\"\n deserialized = rpc.deserialize_resource_props(proto)\n self.assertIsInstance(deserialized[\"asset\"], FileAsset)\n self.assertEqual(\"foo.txt\", deserialized[\"asset\"].path)",
"def test_delete_asset(self):\n pass",
"def test_get_imports(self):\n pass",
"def test_submit_asset_to_submission_service(self):\n pass",
"def test_add_asset_share_feed(self):\n pass",
"def test_delete_software_asset_bundle(self):\n pass"
] | [
"0.84282696",
"0.83869624",
"0.8303573",
"0.82705146",
"0.75017416",
"0.73494947",
"0.73215",
"0.72187483",
"0.693334",
"0.68903947",
"0.6575745",
"0.65537906",
"0.6538311",
"0.65132874",
"0.6505295",
"0.647619",
"0.64284027",
"0.6389249",
"0.62174743",
"0.6191067",
"0.617686",
"0.61765987",
"0.61157864",
"0.6089434",
"0.6082026",
"0.60045534",
"0.5966303",
"0.59427077",
"0.5918542",
"0.5885827"
] | 0.9337757 | 0 |
Test case for import_test_asset | def test_import_test_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_import_system_asset(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_delete_asset(self):\n pass",
"def startTest(asset):",
"def test_submit_asset_to_submission_service(self):\n pass",
"def test_itar_restrict_test_asset(self):\n pass",
"def test_get_imports(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_add_asset_share_feed(self):\n pass",
"def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)",
"def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)",
"def test_upload_file(self):\n pass",
"def test_imports():\n assert False",
"def test_add_category_to_asset(self):\n pass",
"def test_itar_restrict_asset(self):\n pass",
"def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))",
"def test_update_asset_state(self):\n pass",
"def test_file_asset(self):\n proto = struct_pb2.Struct()\n \n # pylint: disable=no-member\n subproto = proto.get_or_create_struct(\"asset\")\n subproto[rpc._special_sig_key] = rpc._special_asset_sig\n subproto[\"path\"] = \"foo.txt\"\n deserialized = rpc.deserialize_resource_props(proto)\n self.assertIsInstance(deserialized[\"asset\"], FileAsset)\n self.assertEqual(\"foo.txt\", deserialized[\"asset\"].path)",
"def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_update_software_asset_install_script(self):\n pass"
] | [
"0.8419804",
"0.8334843",
"0.82471544",
"0.7692862",
"0.7465949",
"0.72720367",
"0.7267126",
"0.7191902",
"0.7053571",
"0.69645387",
"0.6893089",
"0.68716335",
"0.6628451",
"0.6540459",
"0.6508295",
"0.6494333",
"0.6476792",
"0.6475675",
"0.64689904",
"0.64682513",
"0.64274764",
"0.6382945",
"0.63693607",
"0.6352307",
"0.6331532",
"0.6319763",
"0.63194853",
"0.6267707",
"0.62539864",
"0.62099105"
] | 0.94771224 | 0 |
Test case for invalidate_template_cache_in_virtualization_realm | def test_invalidate_template_cache_in_virtualization_realm(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_refresh_error_update_cache(self):\n self.host_updater.refresh_cache()\n mock_method_path = ('dbtobindzone.updaters.host_updater'\n '.HostUpdater.temp_cache_file')\n patch = mock.patch(mock_method_path, new_callable=mock.PropertyMock)\n with patch as mock_method:\n mock_method.return_value = '/TMP/DIR/NOT/EXISTS'\n result = self.host_updater.refresh_cache()\n self.assertFalse(result)",
"def test_refresh_error_create_cache(self):\n mock_method_path = ('dbtobindzone.updaters.host_updater'\n '.HostUpdater.cache_file')\n patch = mock.patch(mock_method_path, new_callable=mock.PropertyMock)\n with patch as mock_method:\n mock_method.return_value = '/TMP/DIR/NOT/EXISTS'\n result = self.host_updater.refresh_cache()\n self.assertFalse(result)",
"def test_clear_cache():\n yvs.main()\n case.assertFalse(\n os.path.exists(yvs.cache.LOCAL_CACHE_DIR_PATH),\n 'local cache directory exists')",
"def test_unregister_template(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def testUnsuccessfulIncrement(self):\n\n cache = self.stub._cache\n self.stub._cache = {}\n\n memcache.incr('somekey')\n\n self.stub._cache = cache",
"def test_cachefile_timestamp(self):\n data = EngineTest.testdata['test_cachefile']\n filenames = { 'layout': 'layout.pyhtml',\n 'page': 'account_create.pyhtml',\n 'form': 'account_form.pyhtml',\n }\n expected = data['expected']\n context = { 'params': { } }\n cache_filenames = ['account_create.pyhtml.cache', 'account_form.pyhtml.cache']\n try:\n for key, filename in filenames.items():\n write_file(filename, data[key])\n props = { 'prefix': 'account_', 'postfix':'.pyhtml', 'layout':'layout.pyhtml', 'cache':True }\n ## create cache files and check them\n time.sleep(1)\n curr_time = time.time()\n engine = tenjin.Engine(**props)\n output = engine.render(':create', context)\n for fname in filenames.values():\n self.assertExists(fname) # file created?\n self.assertTrue(engine.get_template(fname).timestamp < curr_time)\n self.assertEquals(os.path.getmtime(fname), engine.get_template(fname).timestamp)\n ## save current cached object\n cached = {}\n for fname in filenames.values():\n cached[fname] = engine.get_template(fname)\n ## confirm that get_template() returns the same object\n for fname in filenames.values():\n self.assertEquals(id(engine.get_template(fname)), id(cached[fname]))\n ## change timestamp of templates to be old\n for fname in filenames.values():\n atime = mtime = os.path.getmtime(fname) - 10\n os.utime(fname, (atime, mtime))\n ## check whether new caches are created\n for fname in filenames.values():\n t = engine.get_template(fname)\n self.assertNotEqual(id(t), id(cached[fname]))\n self.assertEquals(os.path.getmtime(fname), t.timestamp)\n finally:\n _remove_files(filenames.values())",
"def test_vm_count():\n assert templates.vm_count() > 0",
"def test_cache_create(self):\n self.assertTrue(self.host_updater.refresh_cache())\n self.assertTrue(os.path.exists(self.host_updater.cache_file))",
"def test_delete_from_cache_removes_correctly():\n MEM_CACHE.clear()\n my_accessor = RallyAccessor('uname', 'pword', 'base_url')\n MEM_CACHE['cache_key']['cache_lookup'] = 'some_test_data'\n\n my_accessor.delete_from_cache('cache_key', 'cache_lookup')\n\n assert_equal(MEM_CACHE, {'cache_key': {}})",
"def test_local_cache():",
"def invalidate_cache(self):\n #self.objects.objects = []\n return True",
"def test_clear_cache_silent_fail():\n shutil.rmtree(yvs.cache.LOCAL_CACHE_DIR_PATH)\n yvs.main()\n case.assertFalse(\n os.path.exists(yvs.cache.LOCAL_CACHE_DIR_PATH),\n 'local cache directory exists')",
"def test_update_virt_realm(self):\n pass",
"def test_cache_without_data_change(self):\n self.assertTrue(self.host_updater.refresh_cache())\n\n self.assertFalse(self.host_updater.refresh_cache())",
"def test_timed_reset(self):\n time = 0.005\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n cache[1] = 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]",
"def test_clear_cache(self):\n api_helpers.clear_cache()",
"def test_update_template_registration(self):\n pass",
"def test_deallocate_virt_realm(self):\n pass",
"def test_render_not_cached(self, mock_from_string, mock_sha1):\n template = SnippetTemplateFactory(code='asdf')\n mock_cache = {}\n\n with patch('snippets.base.models.template_cache', mock_cache):\n result = template.render({})\n\n jinja_template = mock_from_string.return_value\n cache_key = mock_sha1.return_value.hexdigest.return_value\n eq_(mock_cache, {cache_key: jinja_template})\n\n mock_sha1.assert_called_with('asdf')\n mock_from_string.assert_called_with('asdf')\n jinja_template.render.assert_called_with({'snippet_id': 0})\n eq_(result, jinja_template.render.return_value)",
"def test_register_virtualization_realm(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_get_datasource_cache_miss(in_memory_runtime_context) -> None:\n context = in_memory_runtime_context\n\n name = \"my_fake_datasource_name\"\n\n # Initial GET will miss the cache, necessitating store retrieval\n with mock.patch(\n \"great_expectations.core.datasource_dict.DatasourceDict.__getitem__\"\n ) as mock_get:\n context.get_datasource(name)\n\n assert mock_get.called\n\n # Subsequent GET will retrieve from the cache\n with mock.patch(\n \"great_expectations.data_context.store.DatasourceStore.get\"\n ) as mock_get:\n context.get_datasource(name)\n\n assert not mock_get.called",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_unshare_template_registration(self):\n pass",
"def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})",
"def test_sess_cache_no_internal(self):\n assert 0x300 == SESS_CACHE_NO_INTERNAL",
"def test_evicts_invalid_refresh_token():\n\n tenant_id = \"tenant-id\"\n client_id = \"client-id\"\n invalid_token = \"invalid-refresh-token\"\n\n cache = TokenCache()\n cache.add({\"response\": build_aad_response(uid=\"id1\", utid=\"tid1\", access_token=\"*\", refresh_token=invalid_token)})\n cache.add({\"response\": build_aad_response(uid=\"id2\", utid=\"tid2\", access_token=\"*\", refresh_token=\"...\")})\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 2\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 1\n\n def send(request, **_):\n assert request.data[\"refresh_token\"] == invalid_token\n return mock_response(json_payload={\"error\": \"invalid_grant\"}, status_code=400)\n\n transport = Mock(send=Mock(wraps=send))\n\n client = AadClient(tenant_id, client_id, transport=transport, cache=cache)\n with pytest.raises(ClientAuthenticationError):\n client.obtain_token_by_refresh_token(scopes=(\"scope\",), refresh_token=invalid_token)\n\n assert transport.send.call_count == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 0"
] | [
"0.6384349",
"0.63655883",
"0.5953301",
"0.5895541",
"0.58603024",
"0.58588606",
"0.58140755",
"0.58063567",
"0.5806046",
"0.58050084",
"0.57698876",
"0.57569075",
"0.5731549",
"0.57063264",
"0.5693664",
"0.56881267",
"0.5679725",
"0.5649483",
"0.56375843",
"0.5634418",
"0.5616755",
"0.56127256",
"0.5605901",
"0.5597761",
"0.5597537",
"0.5596019",
"0.5569957",
"0.5565311",
"0.5560078",
"0.5555085"
] | 0.94289184 | 0 |
Test case for itar_restrict_asset | def test_itar_restrict_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_itar_restrict_test_asset(self):\n pass",
"def test_itar_restrict_software_asset(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_delete_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_unlocked_asset(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_unlocked)\r\n self.assertEqual(resp.status_code, 200) # pylint: disable=E1103\r",
"def corporate_action_restricted_assets(self) -> Tuple[str, ...]:\n return self.__corporate_action_restricted_assets",
"def test_change_asset_type_assignment_rule(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_submit_asset_to_submission_service(self):\n pass",
"def _can_for_asset(self, func_name, asset_id):\n return self._can_for_object(func_name, asset_id, 'get_repository_ids_for_asset')",
"def test_withdraw_interactive_invalid_asset(client):\n response = client.get(f\"{WEBAPP_PATH}?transaction_id=2&asset_code=ETH\", follow=True)\n assert response.status_code == 400\n assert \"asset_code\" in response.content.decode()",
"def test_add_asset_type_assignment_rule(self):\n pass",
"def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()",
"def test_remove_category_from_asset(self):\n pass",
"def test_redeploy_container_asset(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass"
] | [
"0.91117144",
"0.8265128",
"0.64824677",
"0.6261831",
"0.6223426",
"0.621064",
"0.61959505",
"0.61202574",
"0.6117847",
"0.60020924",
"0.59734684",
"0.5955457",
"0.59489053",
"0.58895147",
"0.58890885",
"0.5869154",
"0.5827109",
"0.57565814",
"0.57228553",
"0.5651957",
"0.563143",
"0.55742824",
"0.5552215",
"0.5508741",
"0.55067265",
"0.54684913",
"0.54280335",
"0.5408731",
"0.54078794",
"0.5405171"
] | 0.9234051 | 0 |
Test case for itar_restrict_software_asset | def test_itar_restrict_software_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_itar_restrict_asset(self):\n pass",
"def test_itar_restrict_test_asset(self):\n pass",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_update_software_asset_impact_level(self):\n pass",
"def test_update_software_asset_install_script(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_delete_software_asset_bundle(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)",
"def test_get_software(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def corporate_action_restricted_assets(self) -> Tuple[str, ...]:\n return self.__corporate_action_restricted_assets",
"def test_withdraw_interactive_invalid_asset(client):\n response = client.get(f\"{WEBAPP_PATH}?transaction_id=2&asset_code=ETH\", follow=True)\n assert response.status_code == 400\n assert \"asset_code\" in response.content.decode()",
"def test_unlocked_asset(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_unlocked)\r\n self.assertEqual(resp.status_code, 200) # pylint: disable=E1103\r",
"def test_update_asset(self):\n pass",
"def test_forbidden_blocked_conanmanifest():\n server = TestServer()\n servers = {\"default\": server}\n client = TestClient(servers=servers, inputs=[\"admin\", \"password\"])\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"create . --name=lib --version=1.0\")\n client.run(\"upload lib/1.0* -c -r default\")\n\n class DownloadForbidden(TestRequester):\n def get(self, url, **kwargs):\n if \"conanmanifest.txt\" in url:\n r = Response()\n r._content = \"Forbidden because of security!!!\"\n r.status_code = 403\n return r\n else:\n return super(DownloadForbidden, self).get(url, **kwargs)\n\n client = TestClient(servers=servers, inputs=[\"admin\", \"password\"],\n requester_class=DownloadForbidden)\n client.run(\"download lib/1.0 -r=default\", assert_error=True)\n assert \"Forbidden because of security!!!\" in client.out\n\n client.run(\"list *\")\n assert \"lib/1.0\" not in client.out\n\n client.run(\"install --requires=lib/1.0\", assert_error=True)\n assert \"Forbidden because of security!!!\" in client.out\n\n client.run(\"list *\")\n assert \"lib/1.0\" not in client.out",
"def test_get_software_set(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def test_update_test_asset(self):\n pass"
] | [
"0.8564181",
"0.8398488",
"0.7312294",
"0.721838",
"0.71797836",
"0.6980574",
"0.66412014",
"0.64739144",
"0.64240015",
"0.63762283",
"0.6343993",
"0.6152096",
"0.61467737",
"0.6104712",
"0.5911036",
"0.5910296",
"0.5883513",
"0.5831698",
"0.5788306",
"0.57407355",
"0.57155466",
"0.57025087",
"0.5641265",
"0.55866086",
"0.5566308",
"0.5528354",
"0.5525492",
"0.55068475",
"0.54870087",
"0.5484471"
] | 0.9329446 | 0 |
Test case for itar_restrict_test_asset | def test_itar_restrict_test_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_itar_restrict_asset(self):\n pass",
"def test_itar_restrict_software_asset(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_import_software_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_retrieve_system_asset(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_delete_asset(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_update_software_asset_content(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_unlocked_asset(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_unlocked)\r\n self.assertEqual(resp.status_code, 200) # pylint: disable=E1103\r",
"def test_submit_asset_to_submission_service(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_add_category_to_asset(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_change_asset_type_assignment_rule(self):\n pass",
"def test_update_test_asset_impact_level(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_remove_category_from_asset(self):\n pass",
"def test_redeploy_container_asset(self):\n pass"
] | [
"0.92832786",
"0.8300537",
"0.73058987",
"0.69763637",
"0.6910737",
"0.6730777",
"0.6730673",
"0.6719473",
"0.67102104",
"0.6697047",
"0.6603677",
"0.65775687",
"0.64729184",
"0.6422961",
"0.63372934",
"0.630211",
"0.6252118",
"0.61820394",
"0.6061455",
"0.60574055",
"0.59907013",
"0.5905219",
"0.58019656",
"0.57816166",
"0.5775374",
"0.57744133",
"0.577224",
"0.57536995",
"0.57468957",
"0.57368654"
] | 0.94150704 | 0 |
Test case for launch_composition | def test_launch_composition(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_composition(self):",
"def test_get_composition(self):\n pass",
"def test_publish_scenario_to_composition(self):\n pass",
"def test_update_composition(self):\n pass",
"def test_list_compositions(self):\n pass",
"def launch(self):",
"def test_list_composition_status(self):\n pass",
"def test_delete_composition(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def startTestRun(self):",
"def _run_launch(cls, params):\n assert cls.compose_file is not None, \"compose_file file must be set by subclass\"\n assert cls.testname is not None, \"testname file must be set by subclass\"\n\n test_path = dirname(realpath(sys.argv[0]))\n vmnet_path = dirname(vmnet.__file__) if hasattr(vmnet, '__file__') else vmnet.__path__._path[0]\n local_path = abspath(os.getenv('LOCAL_PATH', '.'))\n compose_path = '{}/compose_files/{}'.format(test_path, cls.compose_file)\n docker_dir_path = '{}/docker_dir'.format(test_path)\n launch_path = '{}/launch.py'.format(vmnet_path)\n if not hasattr(cls, 'docker_dir'): cls.docker_dir = docker_dir_path\n cls.launch_path = launch_path\n\n exc_str = 'python {} --compose_file {} --docker_dir {} --local_path {} {}'.format(\n launch_path,\n cls.compose_file if exists(cls.compose_file) else compose_path,\n cls.docker_dir if exists(cls.docker_dir) else docker_dir_path,\n cls.local_path if hasattr(cls, 'local_path') else local_path,\n params\n )\n os.system(exc_str)",
"def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)",
"def test_composing_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import compose\n tmpdir = tempfile.mkdtemp()\n try:\n # First make sure the simple pipeline can be compiled.\n simple_package_path = os.path.join(tmpdir, 'simple.tar.gz')\n compiler.Compiler().compile(compose.save_most_frequent_word, simple_package_path)\n\n # Then make sure the composed pipeline can be compiled and also compare with golden.\n compose_package_path = os.path.join(tmpdir, 'compose.tar.gz')\n compiler.Compiler().compile(compose.download_save_most_frequent_word, compose_package_path)\n with open(os.path.join(test_data_dir, 'compose.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(compose_package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)",
"def __init__(self, test_group):\n super().__init__(test_group=test_group, name='restart_test')\n\n self.add_step(\n SetupMesh(test_case=self, initial_condition='zero'))\n\n name = 'full_run'\n step = RunModel(test_case=self, name=name, subdir=name, ntasks=4,\n openmp_threads=1)\n # modify the namelist options and streams file\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.full', out_name='namelist.landice')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.full', out_name='streams.landice')\n self.add_step(step)\n\n input_dir = name\n name = 'visualize_{}'.format(name)\n step = Visualize(test_case=self, name=name, subdir=name,\n input_dir=input_dir)\n self.add_step(step, run_by_default=False)\n\n name = 'restart_run'\n step = RunModel(test_case=self, name=name, subdir=name, ntasks=4,\n openmp_threads=1,\n suffixes=['landice', 'landice.rst'])\n\n # modify the namelist options and streams file\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.restart', out_name='namelist.landice')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.restart', out_name='streams.landice')\n\n step.add_namelist_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'namelist.restart.rst', out_name='namelist.landice.rst')\n step.add_streams_file(\n 'compass.landice.tests.hydro_radial.restart_test',\n 'streams.restart.rst', out_name='streams.landice.rst')\n self.add_step(step)\n\n input_dir = name\n name = 'visualize_{}'.format(name)\n step = Visualize(test_case=self, name=name, subdir=name,\n input_dir=input_dir)\n self.add_step(step, run_by_default=False)",
"def test_spawn(lab):\n print('Test spawn')\n lab.spawn_missing()\n test_print(lab)\n print('Test completed')",
"def __main() :\n launchTests()",
"def runtest(self):",
"def test_create_run(self):\n pass",
"def test_run_started(self):",
"def test_composition_adds_to_100_percent(self):",
"def startTest(asset):",
"def test_window_loaded(self):",
"def ConstructStage(self):\n raise NotImplementedError(self, \"ConstructStage: Implement in your test\")",
"def test_all_components(self):\n model_name = 'BCZModel'\n pose_components = [\n ('xyz', 3, True, 100.),\n ('quaternion', 4, False, 10.),\n ('axis_angle', 3, True, 10.),\n ('arm_joints', 7, True, 1.),\n ('target_close', 1, False, 1.),\n ]\n gin.bind_parameter(\n 'BCZModel.action_components', pose_components)\n gin.parse_config('BCZPreprocessor.mock_subtask = True')\n gin.parse_config(\n 'resnet_film_network.film_generator_fn = @linear_film_generator')\n self._fixture.random_train(model, model_name)",
"def runTests(self):\n \n pass",
"def test_launch_minimal(self, capsys):\n UI.launch(**self.args)\n captured = capsys.readouterr().out\n assert \"Results written to OP_buildH.out\" in captured",
"def test_creator_complete(self):\n config = ShellConfig(script='''echo \"test:{{ env.foo }}-{{ model.foo }}\"''',\n title='test', model={'foo': 'model foo'}, env={'foo': 'env foo'})\n container = Container.creator({}, config)\n output = [line for line in container.process() if line.startswith(\"test:\")]\n assert_that(len(output), equal_to(1))\n assert_that(output[0], equal_to('test:env foo-model foo'))",
"def test_create_part(self):\n pass",
"def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass",
"def test_strategy(self):\n self.first_play_test(C)"
] | [
"0.77599436",
"0.7552297",
"0.7450174",
"0.7356642",
"0.6642157",
"0.6351946",
"0.63417995",
"0.6196432",
"0.61686575",
"0.60280544",
"0.5926443",
"0.5898173",
"0.58623946",
"0.5841978",
"0.5821273",
"0.57989544",
"0.575538",
"0.57242006",
"0.5717975",
"0.57054245",
"0.5660862",
"0.562768",
"0.56209654",
"0.5607611",
"0.5537699",
"0.5529691",
"0.54881614",
"0.5475761",
"0.54567826",
"0.543833"
] | 0.92985 | 0 |
Test case for launch_deployment | def test_launch_deployment(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_execute_deployment(self):\n pass",
"def test_create_deployment(self):\n pass",
"def test_get_deployment_run(self):\n pass",
"def test_publish_deployment_run(self):\n pass",
"def test_release_deployment_run(self):\n pass",
"def test_create_deployment_entire(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def test_get_deployment_runs1(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_get_deployment_runs(self):\n pass",
"def test_retest_deployment_run(self):\n pass",
"def test_update_deployment(self):\n pass",
"def test_clone_deployment(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def start_deployment(self):\n return",
"def deploy():",
"def test_deployment(self):\n config = {'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1\n }}\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))",
"def test_get_deployment_resources(self):\n pass",
"def test_get_deployment_resource(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def deploy(parameters):\n\n print(\"In deploy module\")",
"def test_delete_deployment(self):\n pass",
"def test_get_deployments(self):\n pass",
"def test_get_deployments(self):\n pass",
"def test_config_deploy_app(fail_deploy):\n signal = SignalActor.remote()\n\n @ray.remote\n def task():\n ray.get(signal.wait.remote())\n if fail_deploy:\n raise Exception(\"fail!\")\n\n object_ref = task.remote()\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", object_ref)\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n signal.send.remote()\n time.sleep(2)\n if fail_deploy:\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n else:\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING",
"def test_download_deployment_run_test_report(self):\n pass",
"def test_create_namespaced_deployment_config(self):\n pass",
"def test_update_deployment_state(self):\n pass",
"def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()"
] | [
"0.8692073",
"0.8307779",
"0.82309496",
"0.8105909",
"0.80215615",
"0.80006105",
"0.7853609",
"0.78021574",
"0.77395755",
"0.77395755",
"0.75952774",
"0.7590269",
"0.7452514",
"0.7416822",
"0.7090711",
"0.7090074",
"0.6952404",
"0.69345313",
"0.6907092",
"0.6863611",
"0.67938274",
"0.6719619",
"0.6716019",
"0.66374916",
"0.66374916",
"0.6627709",
"0.66072583",
"0.65763617",
"0.6559486",
"0.654477"
] | 0.9352368 | 0 |
Test case for list_composition_status | def test_list_composition_status(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_compositions(self):\n pass",
"def test_get_composition(self):\n pass",
"def test_component_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def status(ABC) -> bool:",
"def check_status(self):",
"def test_update_composition(self):\n pass",
"def test_wait_for_dispatched_statuses_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(status_connector_name='fooconn')\n d = worker_helper.wait_for_dispatched_statuses(1)\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])",
"def test_wait_for_dispatched_statuses(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_statuses(1, 'fooconn')\n self.assertNoResult(d)\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])",
"def test_get_status(self):\n pass",
"def test_get_status(self):\n pass",
"def test_statuses_exist(self):\n\n assert hasattr(Partner, 'AVAILABLE')\n assert hasattr(Partner, 'NOT_AVAILABLE')\n assert hasattr(Partner, 'WAITLIST')\n\n assert hasattr(Partner, 'STATUS_CHOICES')\n\n assert len(Partner.STATUS_CHOICES) == 3\n\n database_statuses = [x[0] for x in Partner.STATUS_CHOICES]\n\n assert Partner.AVAILABLE in database_statuses\n assert Partner.NOT_AVAILABLE in database_statuses\n assert Partner.WAITLIST in database_statuses",
"def status(self):",
"def test_get_dispatched_statuses(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n dispatched = worker_helper.get_dispatched_statuses('fooconn')\n self.assertEqual(dispatched, [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg)\n dispatched = worker_helper.get_dispatched_statuses('fooconn')\n self.assertEqual(dispatched, [msg])",
"def test_launch_composition(self):\n pass",
"def violation_status(lista = list_violations):\n status = status_card()\n if status != True:\n lista.append(status)\n return lista",
"def getStatus():",
"def test_get_dispatched_statuses_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(status_connector_name='fooconn')\n dispatched = worker_helper.get_dispatched_statuses()\n self.assertEqual(dispatched, [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg)\n dispatched = worker_helper.get_dispatched_statuses()\n self.assertEqual(dispatched, [msg])",
"def test_status_code(self):\n assert self.list_response.status_code == 200",
"def test_composition(self):",
"def test_list_activity_occurrences(self):\n pass",
"def test_wait_for_dispatched_statuses_no_amount(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_statuses(\n connector_name='fooconn')\n self.assertEqual(self.successResultOf(d), [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n self._add_to_dispatched(worker_helper.broker, 'fooconn.status', msg)\n d = worker_helper.wait_for_dispatched_statuses(\n connector_name='fooconn')\n self.assertNoResult(d)\n yield worker_helper.kick_delivery()\n self.assertEqual(self.successResultOf(d), [msg])",
"def status(self):\n pass",
"def status(self):\n pass",
"def test_message_list():",
"def clone_list_status(self, clone_id=None):\n return self.request( \"clone-list-status\", {\n 'clone_id': [ clone_id, 'clone-id', [ CloneIdInfo, 'None' ], False ],\n }, {\n 'status': [ OpsInfo, True ],\n } )",
"def test_get_current_component_status_OK(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n 'repository/online/component/b': 'yes',\n 'repository/online/component/c': 'yes',\n 'repository/online/component/d': 'yes',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n print >> tmp, 'deb http://host:port/prefix/0.0/maintained/component/ c/arch/'\n print >> tmp, 'deb http://host:port/prefix/0.0/unmaintained/component/ d/arch/'\n tmp.flush()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('c'))\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('d'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"def _getCurrentComponentStatus(self):\n resOverall = self.sysAdminClient.getOverallStatus()\n if not resOverall['OK']:\n return resOverall\n currentStatus = {'Down': set(), 'Run': set(), 'All': set()}\n informationDict = resOverall['Value']\n for systemsDict in informationDict.values():\n for system, instancesDict in systemsDict.items():\n for instanceName, instanceInfoDict in instancesDict.items():\n identifier = '%s__%s' % (system, instanceName)\n runitStatus = instanceInfoDict.get('RunitStatus')\n if runitStatus in ('Run', 'Down'):\n currentStatus[runitStatus].add(identifier)\n\n currentStatus['All'] = currentStatus['Run'] | currentStatus['Down']\n return S_OK(currentStatus)",
"def test_verification_status_visible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_on('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_on('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_on('audit', 'You\\'re auditing this course')",
"def get_status(self):\n return [not colors.same_color(color, colors.to_rgba(\"none\"))\n for color in self._checks.get_facecolors()]",
"def verify_status_filter(self, status_list):\n is_verified = True\n self.click_element(self.multiselect_status_dropdown_locator)\n for item in status_list:\n dropdown_item_locator = (By.XPATH, \"//li[text()='%s']\" % item)\n if self.is_element_visible(dropdown_item_locator) is False:\n is_verified = False\n break\n self.script_executor(\"var elements = document.getElementsByClassName('k-list-container k-popup k-group k-reset multiselect'); for (var i = 0, len = elements.length; i < len; i++) { elements[i].style.display = 'none';}\")\n self.script_executor(\"var elements = document.getElementsByClassName('k-list k-reset'); for (var i = 0, len = elements.length; i < len; i++) { elements[i].setAttribute('aria-hidden', 'true');}\")\n return is_verified"
] | [
"0.7290643",
"0.6458045",
"0.6364622",
"0.6113339",
"0.605503",
"0.59509087",
"0.59468585",
"0.5934961",
"0.58839995",
"0.58839995",
"0.58361083",
"0.58053935",
"0.580344",
"0.5788129",
"0.57794875",
"0.57626873",
"0.56936795",
"0.5619055",
"0.5570947",
"0.5527876",
"0.54957736",
"0.54880995",
"0.54880995",
"0.54625356",
"0.5442983",
"0.54379606",
"0.5428703",
"0.54269975",
"0.5414447",
"0.5374746"
] | 0.9269822 | 0 |
Test case for list_compositions | def test_list_compositions(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_composition_status(self):\n pass",
"def test_composition(self):",
"def test_get_composition(self):\n pass",
"def test_list_group(self):\n pass",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def test_update_composition(self):\n pass",
"def test_launch_composition(self):\n pass",
"def test_list_identity(self):\n pass",
"def test_component_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_list_facet_dictionaries(self):\n pass",
"def test_candidates_list(self):\n pass",
"def test_list_field():",
"def test_get_list(self):\n pass",
"def test_publish_scenario_to_composition(self):\n pass",
"def test_composition_adds_to_100_percent(self):",
"def test_delete_composition(self):\n pass",
"def test_list_namespaced_build(self):\n pass",
"def test_list_occurrences(self):\n pass",
"def test_get_collections(self):\n pass",
"def test_pre_order_list(self):\n _expected_list = [23, 5, 13, 57, 103]\n\n _output_list = []\n\n # Call pre_order_list to test\n pre_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _pre_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _pre_order_output",
"def test_cards_get_list(self):\n pass",
"def test_lists(self):\n # test the identity autogenerate comparisson against an empty list\n for i in range(10):\n rand = srt(random.sample(xrange(10000), 100))\n self.assertEqual(srt(autogenerate(rand, [])), rand)\n # test some known valued lists against eachother\n self.assertEqual(autogenerate([1,2,3,4], [2,3,4]), [1])\n # going the other way doesn't get us anything\n self.assertEqual(autogenerate([2,3,4], [1,2,3,4]), [])\n # test interpolated values\n self.assertEqual(autogenerate([1,2,3,4,5,6,7], [2,5,1,3]), [4,6,7])\n # establish what duplicates mean\n self.assertEqual(autogenerate([1,1,2,3,4,4,5], [1,2,3,4,5]), [])\n # test autogenerate comparissons on autogenerateosite types\n self.assertEqual(srt(autogenerate(\n [{'a': 1, 'b': 2}, {'c': 3}, ['foo', 'bar']],\n [{'c': 3}])),\n srt([{'a': 1, 'b': 2}, ['foo', 'bar']]),\n )\n self.assertEqual(srt(autogenerate(\n [{'a': 1, 'b': 2}, {'c': 3}, ['foo', 'bar']],\n [{'a': 1, 'b': 3}])),\n srt([{'a': 1, 'b': 2}, {'c': 3}, ['foo', 'bar']]),\n )",
"def test_plugin_with_list(project):\n project.add_mock_file(\"templates\", \"test.tmpl\",\n \"\"\"{% for item in items | std.key_sort(\"name\") %} {{ item.name }}\n{% endfor %}\"\"\")\n\n project.compile(\"\"\"\nimport std\nimport unittest\n\nentity Item:\n string name\nend\n\nimplement Item using std::none\n\nentity Collection:\n string content\nend\n\nimplementation makeContent for Collection:\n self.content = std::template(\"unittest/test.tmpl\")\nend\n\nimplement Collection using makeContent\n\nCollection.items [0:] -- Item.collection [0:]\n\nc1 = Collection()\n\nt1 = Item(name=\"t1\", collection=c1)\nt2 = Item(name=\"t2\", collection=c1)\nt3 = Item(name=\"t3\", collection=c1)\n \"\"\")",
"def test_listpattern(self):\n x = t.List(t.Exactly(\"x\"))\n self.assertEqual(writePython(x),\n dd(\"\"\"\n def _G_listpattern_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_listpattern_3, lastError = self.listpattern(_G_listpattern_1)\n self.considerError(lastError, None)\n _G_listpattern_3\n \"\"\"))",
"def testCreateFromIterable(self):\n self.assertEqual([\"c\",\"h\",\"e\",\"k\",\"a\"],list(\"cheka\"))",
"def test_container(self):\r\n\r\n fc = FlowgramCollection({'a': '1.0 0.0 0.0 1.0 1.0 1.2 1.2 0.8',\r\n 'b': '1.2 1.0 0.0 0.8 1.2 2.4 1.0 0.0'})\r\n\r\n f_container = FlowgramContainerFile(header)\r\n\r\n for f in fc:\r\n f_container.add(f)\r\n\r\n for f_obs, f_exp in zip(f_container, fc):\r\n self.assertEqual(str(f_obs), str(f_exp))\r\n\r\n # adding after iter started raises errror\r\n self.assertRaises(ValueError, f_container.add, f_obs)",
"def test_example(self):\n\n solution = Solution()\n\n nums = [1, 2, 3]\n\n expected_output = [\n (3,),\n (1,),\n (2,),\n (1, 2, 3),\n (1, 3),\n (2, 3),\n (1, 2),\n ()\n ]\n actual_output = solution.subsets(nums)\n\n for ss in expected_output:\n self.assertIn(ss, actual_output)",
"def test_first_level_composition(self):\n oe = expression.OperationalExpression\n v1, v2 = map(expression.Variable, [\"v1\", \"v2\"])\n data = [\n [(v1 + v2), oe('+', v1, v2)],\n [(v1 - v2), oe('-', v1, v2)],\n [(v1 / v2), oe('/', v1, v2)],\n [(v1 * v2), oe('*', v1, v2)],\n ]\n yield from self.generate_from_data(data)",
"def test_in_order_list(self):\n _expected_list = [5, 13, 23, 57, 103]\n _output_list = []\n \n # Call in_order_list to test\n in_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _sorted_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _sorted_output"
] | [
"0.7716325",
"0.7411501",
"0.7335917",
"0.66139853",
"0.6537345",
"0.6537345",
"0.6486396",
"0.6309443",
"0.6296264",
"0.6071394",
"0.60705644",
"0.60574293",
"0.5916543",
"0.5909159",
"0.58179885",
"0.5804485",
"0.57657564",
"0.5758317",
"0.5753746",
"0.56702685",
"0.5668996",
"0.565883",
"0.5649882",
"0.5636027",
"0.56116223",
"0.559075",
"0.55542904",
"0.55449826",
"0.5518928",
"0.55004984"
] | 0.93421084 | 0 |
Test case for list_dependent_assets | def test_list_dependent_assets(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_dependent_assets2(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )",
"def test_get_container_assets_expanded(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_08_transaction_assets_of_portfolio(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transaction_assets(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of unique transaction assets\")\n print(\"Transaction get transaction assets is returning the following list: {}\".format(\n t,\n ))",
"def dependencies(self) -> List[Bundle]:\n return []",
"def test_retrieve_system_asset(self):\n pass",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_collect_dashboard_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"dashboard\", \"Dummy_dashboard\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_dashboard\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Dashboards\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"async def test_dependencies(self):\n response = await self.collect(get_request_text=self.xml)\n expected_entities = [\n dict(\n key=\"12345\",\n url=\"https://owasp_dependency_check#l1_12345\",\n file_name=self.file_name,\n file_path=self.file_path,\n )\n ]\n self.assert_measurement(response, value=\"1\", entities=expected_entities)",
"def test_collect_scripts_depends_on_integration_with_items(\n self, dependency_integration_command, expected_result, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [dependency_integration_command],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result[0]\n assert found_items == expected_result[1]",
"def test_collect_scripts_depends_on_script_with_items(\n self, dependency_script, expected_pack, expected_items, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"docker_image\": \"demisto/python3:3.8.3.8715\",\n \"depends_on\": [dependency_script],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_pack\n assert found_items == expected_items",
"def test_delete_asset(self):\n pass",
"def test_collect_scripts_depends_on_with_two_inputs(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript1\": {\n \"name\": \"DummyScript1\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"sslbl-get-indicators\"],\n \"pack\": \"dummy_pack\",\n }\n },\n {\n \"DummyScript2\": {\n \"name\": \"DummyScript2\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n },\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_index_dependencies(self):\n bar = create_library('bar')\n barf = create_library('barf')\n addon = create_addon('foo')\n addon.latest.dependency_add(bar.latest)\n addon.latest.dependency_add(barf.latest)\n es = self.es\n es.refresh()\n\n for lib in (bar, barf):\n r = es.search(query=FieldQuery(FieldParameter('dependencies',\n lib.id)))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], addon.name)\n return (addon, bar, barf)",
"def test_get_section_assets_json(self):\n json_data = self.view.get_section_assets_json(story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data), len(self.story.sections.all()))\n for section in self.story.sections.all():\n self.assertIn(section.section_id, data)\n self.assertEqual(len(data[section.section_id]['objects']),\n len(section.assets.all()))\n asset_ids = [sectionasset['asset']['asset_id'] for\n sectionasset in \n data[section.section_id]['objects']]\n for asset in section.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def resolve_asset_dependency(self):\n\n for node in self.asset.findall(\"./*[@file]\"):\n file = node.get(\"file\")\n abs_path = os.path.abspath(self.folder)\n abs_path = os.path.join(abs_path, file)\n node.set(\"file\", abs_path)",
"def holderDepend( self, holder ):\n for shader in self.shaders:\n # TODO: cache links...\n shader.holderDepend( holder )\n holder.depend( self, 'shaders' )\n return holder",
"def test_collect_report_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"report\", \"Dummy_report\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_report\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Reports\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_collect_indicator_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"Carbon_Black_Enterprise_Response\", False), (\"CommonScripts\", False)},\n {},\n )\n\n test_input = [\n {\n \"Dummy Indicator Type\": {\n \"name\": \"Dummy Indicator Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"integrations\": [\n \"abuse.ch SSL Blacklist Feed\",\n \"AbuseIPDB\",\n \"ActiveMQ\",\n ],\n \"scripts\": [\"AssignAnalystToIncident\", \"CBAlerts\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_indicators_types_dependencies(\n pack_indicators_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_import_system_asset(self):\n pass",
"def test_delete_system_asset(self):\n pass"
] | [
"0.9168786",
"0.90606445",
"0.89862716",
"0.70258105",
"0.6616876",
"0.6541793",
"0.6290414",
"0.61448324",
"0.6023611",
"0.59293574",
"0.5926046",
"0.5869146",
"0.57619244",
"0.5715066",
"0.5700429",
"0.5620767",
"0.55918443",
"0.55791515",
"0.5575974",
"0.5573987",
"0.5561031",
"0.5532845",
"0.5520835",
"0.5486458",
"0.5473283",
"0.54695904",
"0.5458278",
"0.54542005",
"0.54416966",
"0.54398316"
] | 0.9413303 | 0 |
Test case for list_dependent_assets1 | def test_list_dependent_assets1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_dependent_assets2(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_get_container_assets_expanded(self):\n pass",
"def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )",
"def test_collect_scripts_depends_on_with_two_inputs(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript1\": {\n \"name\": \"DummyScript1\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"sslbl-get-indicators\"],\n \"pack\": \"dummy_pack\",\n }\n },\n {\n \"DummyScript2\": {\n \"name\": \"DummyScript2\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n },\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_08_transaction_assets_of_portfolio(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transaction_assets(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of unique transaction assets\")\n print(\"Transaction get transaction assets is returning the following list: {}\".format(\n t,\n ))",
"def dependencies(self) -> List[Bundle]:\n return []",
"async def test_dependencies(self):\n response = await self.collect(get_request_text=self.xml)\n expected_entities = [\n dict(\n key=\"12345\",\n url=\"https://owasp_dependency_check#l1_12345\",\n file_name=self.file_name,\n file_path=self.file_path,\n )\n ]\n self.assert_measurement(response, value=\"1\", entities=expected_entities)",
"def test_collect_scripts_depends_on_script_with_items(\n self, dependency_script, expected_pack, expected_items, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"docker_image\": \"demisto/python3:3.8.3.8715\",\n \"depends_on\": [dependency_script],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_pack\n assert found_items == expected_items",
"def test_collect_indicator_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"Carbon_Black_Enterprise_Response\", False), (\"CommonScripts\", False)},\n {},\n )\n\n test_input = [\n {\n \"Dummy Indicator Type\": {\n \"name\": \"Dummy Indicator Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"integrations\": [\n \"abuse.ch SSL Blacklist Feed\",\n \"AbuseIPDB\",\n \"ActiveMQ\",\n ],\n \"scripts\": [\"AssignAnalystToIncident\", \"CBAlerts\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_indicators_types_dependencies(\n pack_indicators_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_collect_incident_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"AutoFocus\", True), (\"Volatility\", True)},\n {\n (\"incidenttype\", \"Dummy Incident Type\"): {\n \"AutoFocus\": [\n (\"playbook\", \"Autofocus Query Samples, Sessions and Tags\")\n ],\n \"Volatility\": [(\"script\", \"AnalyzeMemImage\")],\n }\n },\n )\n\n test_input = [\n {\n \"Dummy Incident Type\": {\n \"name\": \"Dummy Incident Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"playbooks\": \"Autofocus Query Samples, Sessions and Tags\",\n \"scripts\": \"AnalyzeMemImage\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_incidents_types_dependencies(\n pack_incidents_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def holderDepend( self, holder ):\n for shader in self.shaders:\n # TODO: cache links...\n shader.holderDepend( holder )\n holder.depend( self, 'shaders' )\n return holder",
"def test_index_dependencies(self):\n bar = create_library('bar')\n barf = create_library('barf')\n addon = create_addon('foo')\n addon.latest.dependency_add(bar.latest)\n addon.latest.dependency_add(barf.latest)\n es = self.es\n es.refresh()\n\n for lib in (bar, barf):\n r = es.search(query=FieldQuery(FieldParameter('dependencies',\n lib.id)))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], addon.name)\n return (addon, bar, barf)",
"def test_collect_scripts_depends_on_integration_with_items(\n self, dependency_integration_command, expected_result, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [dependency_integration_command],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result[0]\n assert found_items == expected_result[1]",
"def test_collect_dashboard_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"dashboard\", \"Dummy_dashboard\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_dashboard\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Dashboards\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_get_test_asset(self):\n pass",
"def test_collect_scripts_depends_on_two_integrations(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [\"sslbl-get-indicators\", \"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_retrieve_system_asset(self):\n pass",
"def test_collect_report_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"report\", \"Dummy_report\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_report\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Reports\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_ls():\n\n with pipeline.fixture(assets=[\"Asset1\"],\n subsets=[\"animRig\"],\n versions=1) as root:\n asset = next(pipeline.ls())\n\n reference = {\n \"schema\": \"pyblish-mindbender:asset-1.0\",\n \"name\": \"Asset1\",\n \"subsets\": [\n {\n \"schema\": \"pyblish-mindbender:subset-1.0\",\n \"name\": \"animRig\",\n \"versions\": [\n {\n \"schema\": \"pyblish-mindbender:version-1.0\",\n \"version\": 1,\n \"path\": os.path.join(\n root,\n \"Asset1\",\n \"publish\",\n \"animRig\",\n \"v001\"\n ),\n \"source\": os.path.join(\n \"{project}\",\n \"maya\",\n \"scenes\",\n \"scene.ma\"\n ),\n \"representations\": [\n {\n \"schema\": (\"pyblish-mindbender:\"\n \"representation-1.0\"),\n \"format\": \".ma\",\n \"path\": os.path.join(\n \"{dirname}\",\n \"Asset1{format}\"\n ),\n }\n ],\n \"time\": \"\",\n \"author\": \"mottosso\",\n },\n ]\n }\n ]\n }\n\n # Printed on error\n print(\"# Comparing result:\")\n print(json.dumps(asset, indent=4, sort_keys=True))\n print(\"# With reference:\")\n print(json.dumps(reference, indent=4, sort_keys=True))\n\n assert_equals(asset, reference)",
"def test_ls_returns_sorted_versions():\n with pipeline.fixture(assets=[\"Asset1\"], subsets=[\"animRig\"], versions=1):\n for asset in pipeline.ls():\n previous_version = 0\n for subset in asset[\"subsets\"]:\n for version in subset[\"versions\"]:\n version = version[\"version\"]\n assert version > previous_version\n previous_version = version",
"def test_get_software_asset_bundle_expanded(self):\n pass"
] | [
"0.92809683",
"0.92086357",
"0.9144166",
"0.69437504",
"0.6310067",
"0.6257704",
"0.6061398",
"0.60211796",
"0.58933574",
"0.5853871",
"0.5852153",
"0.58324456",
"0.572449",
"0.5680465",
"0.5669209",
"0.56603944",
"0.56360835",
"0.562374",
"0.5613121",
"0.5576192",
"0.5570292",
"0.55598414",
"0.55556893",
"0.55404735",
"0.55388623",
"0.5530641",
"0.5504881",
"0.5485109",
"0.5434276",
"0.5425044"
] | 0.93650377 | 0 |
Test case for list_dependent_assets2 | def test_list_dependent_assets2(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_dependent_assets1(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_get_container_assets_expanded(self):\n pass",
"def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )",
"def test_retrieve_system_asset(self):\n pass",
"def test_collect_scripts_depends_on_with_two_inputs(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript1\": {\n \"name\": \"DummyScript1\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"sslbl-get-indicators\"],\n \"pack\": \"dummy_pack\",\n }\n },\n {\n \"DummyScript2\": {\n \"name\": \"DummyScript2\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n },\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def dependencies(self) -> List[Bundle]:\n return []",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_get_test_asset(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_collect_scripts_depends_on_two_integrations(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [\"sslbl-get-indicators\", \"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_08_transaction_assets_of_portfolio(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transaction_assets(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of unique transaction assets\")\n print(\"Transaction get transaction assets is returning the following list: {}\".format(\n t,\n ))",
"def test_collect_dashboard_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"dashboard\", \"Dummy_dashboard\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_dashboard\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Dashboards\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"async def test_dependencies(self):\n response = await self.collect(get_request_text=self.xml)\n expected_entities = [\n dict(\n key=\"12345\",\n url=\"https://owasp_dependency_check#l1_12345\",\n file_name=self.file_name,\n file_path=self.file_path,\n )\n ]\n self.assert_measurement(response, value=\"1\", entities=expected_entities)",
"def test_index_dependencies(self):\n bar = create_library('bar')\n barf = create_library('barf')\n addon = create_addon('foo')\n addon.latest.dependency_add(bar.latest)\n addon.latest.dependency_add(barf.latest)\n es = self.es\n es.refresh()\n\n for lib in (bar, barf):\n r = es.search(query=FieldQuery(FieldParameter('dependencies',\n lib.id)))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], addon.name)\n return (addon, bar, barf)",
"def test_collect_scripts_depends_on_integration_with_items(\n self, dependency_integration_command, expected_result, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [dependency_integration_command],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result[0]\n assert found_items == expected_result[1]",
"def test_get_second_bundle(self):\n res = self.app.get('/bundle/other/libs')\n bundle = json.loads(res.body.decode('utf-8'))\n expected = self.stats2['chunks']['libs'][0]\n self.assertEqual(len(bundle), 1)\n self.assertEqual(bundle[0]['name'], expected['name'])\n self.assertEqual(bundle[0]['path'], expected['path'])\n self.assertTrue('url' in bundle[0])",
"def test_collect_indicator_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"Carbon_Black_Enterprise_Response\", False), (\"CommonScripts\", False)},\n {},\n )\n\n test_input = [\n {\n \"Dummy Indicator Type\": {\n \"name\": \"Dummy Indicator Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"integrations\": [\n \"abuse.ch SSL Blacklist Feed\",\n \"AbuseIPDB\",\n \"ActiveMQ\",\n ],\n \"scripts\": [\"AssignAnalystToIncident\", \"CBAlerts\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_indicators_types_dependencies(\n pack_indicators_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_collect_incident_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"AutoFocus\", True), (\"Volatility\", True)},\n {\n (\"incidenttype\", \"Dummy Incident Type\"): {\n \"AutoFocus\": [\n (\"playbook\", \"Autofocus Query Samples, Sessions and Tags\")\n ],\n \"Volatility\": [(\"script\", \"AnalyzeMemImage\")],\n }\n },\n )\n\n test_input = [\n {\n \"Dummy Incident Type\": {\n \"name\": \"Dummy Incident Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"playbooks\": \"Autofocus Query Samples, Sessions and Tags\",\n \"scripts\": \"AnalyzeMemImage\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_incidents_types_dependencies(\n pack_incidents_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_collect_scripts_depends_on_script_with_items(\n self, dependency_script, expected_pack, expected_items, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"docker_image\": \"demisto/python3:3.8.3.8715\",\n \"depends_on\": [dependency_script],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_pack\n assert found_items == expected_items",
"def holderDepend( self, holder ):\n for shader in self.shaders:\n # TODO: cache links...\n shader.holderDepend( holder )\n holder.depend( self, 'shaders' )\n return holder",
"def test_import_system_asset(self):\n pass",
"def test_collect_report_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"CommonScripts\", True)},\n {\n (\"report\", \"Dummy_report\"): {\n \"CommonScripts\": [(\"script\", \"AssignAnalystToIncident\")]\n }\n },\n )\n\n test_input = [\n {\n \"Dummy_report\": {\n \"name\": \"Dummy Widget\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"scripts\": [\"AssignAnalystToIncident\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_widget_dependencies(\n pack_widgets=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n header=\"Reports\",\n get_dependent_items=True,\n )\n\n assert found_result == expected_result"
] | [
"0.91306466",
"0.910373",
"0.9006155",
"0.699086",
"0.6289074",
"0.62856466",
"0.6103313",
"0.60395",
"0.5887599",
"0.5751277",
"0.5713746",
"0.57121825",
"0.57046294",
"0.5666672",
"0.5635678",
"0.5617942",
"0.55732477",
"0.5525664",
"0.55182874",
"0.54923886",
"0.5491221",
"0.5478014",
"0.5472557",
"0.54663354",
"0.5442779",
"0.5424928",
"0.5417396",
"0.54054815",
"0.53986335",
"0.5385572"
] | 0.93180984 | 0 |
Test case for list_dependent_assets3 | def test_list_dependent_assets3(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_dependent_assets(self):\n pass",
"def test_list_dependent_assets2(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def test_list_system_assets(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_get_assets_json(self):\n self.assertEqual(self.story.assets.all().count(), 5)\n json_data = self.view.get_assets_json(\n story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data['objects']), len(self.story.assets.all()))\n asset_ids = [asset['asset_id'] for asset\n in data['objects']]\n for asset in self.story.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_08_transaction_assets_of_portfolio(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transaction_assets(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of unique transaction assets\")\n print(\"Transaction get transaction assets is returning the following list: {}\".format(\n t,\n ))",
"def test_get_test_assets_expanded(self):\n pass",
"def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )",
"def test_get_container_assets_expanded(self):\n pass",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_get_test_asset(self):\n pass",
"def test_collect_scripts_depends_on_script_with_items(\n self, dependency_script, expected_pack, expected_items, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"docker_image\": \"demisto/python3:3.8.3.8715\",\n \"depends_on\": [dependency_script],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_pack\n assert found_items == expected_items",
"def test_retrieve_system_asset(self):\n pass",
"def holderDepend( self, holder ):\n for shader in self.shaders:\n # TODO: cache links...\n shader.holderDepend( holder )\n holder.depend( self, 'shaders' )\n return holder",
"def test_collect_scripts_depends_on_with_two_inputs(self, module_repo):\n expected_result = {(\"Active_Directory_Query\", True), (\"Feedsslabusech\", True)}\n\n test_input = [\n {\n \"DummyScript1\": {\n \"name\": \"DummyScript1\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"sslbl-get-indicators\"],\n \"pack\": \"dummy_pack\",\n }\n },\n {\n \"DummyScript2\": {\n \"name\": \"DummyScript2\",\n \"file_path\": \"dummy_path1\",\n \"depends_on\": [\"ad-get-user\"],\n \"pack\": \"dummy_pack\",\n }\n },\n ]\n\n found_result = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)",
"def test_get_section_assets_json(self):\n json_data = self.view.get_section_assets_json(story=self.story)\n data = json.loads(json_data)\n self.assertEqual(len(data), len(self.story.sections.all()))\n for section in self.story.sections.all():\n self.assertIn(section.section_id, data)\n self.assertEqual(len(data[section.section_id]['objects']),\n len(section.assets.all()))\n asset_ids = [sectionasset['asset']['asset_id'] for\n sectionasset in \n data[section.section_id]['objects']]\n for asset in section.assets.all():\n self.assertIn(asset.asset_id, asset_ids)",
"def test_collect_scripts_depends_on_integration_with_items(\n self, dependency_integration_command, expected_result, module_repo\n ):\n test_input = [\n {\n \"DummyScript\": {\n \"name\": \"DummyScript\",\n \"file_path\": \"dummy_path\",\n \"depends_on\": [dependency_integration_command],\n \"pack\": \"dummy_pack\",\n }\n }\n ]\n\n found_result, found_items = PackDependencies._collect_scripts_dependencies(\n pack_scripts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result[0]\n assert found_items == expected_result[1]",
"def dependencies(self) -> List[Bundle]:\n return []",
"def test_index_dependencies(self):\n bar = create_library('bar')\n barf = create_library('barf')\n addon = create_addon('foo')\n addon.latest.dependency_add(bar.latest)\n addon.latest.dependency_add(barf.latest)\n es = self.es\n es.refresh()\n\n for lib in (bar, barf):\n r = es.search(query=FieldQuery(FieldParameter('dependencies',\n lib.id)))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], addon.name)\n return (addon, bar, barf)",
"def test_delete_asset(self):\n pass",
"def test_collect_incident_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"AutoFocus\", True), (\"Volatility\", True)},\n {\n (\"incidenttype\", \"Dummy Incident Type\"): {\n \"AutoFocus\": [\n (\"playbook\", \"Autofocus Query Samples, Sessions and Tags\")\n ],\n \"Volatility\": [(\"script\", \"AnalyzeMemImage\")],\n }\n },\n )\n\n test_input = [\n {\n \"Dummy Incident Type\": {\n \"name\": \"Dummy Incident Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"playbooks\": \"Autofocus Query Samples, Sessions and Tags\",\n \"scripts\": \"AnalyzeMemImage\",\n }\n }\n ]\n\n found_result = PackDependencies._collect_incidents_types_dependencies(\n pack_incidents_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_ls_returns_sorted_versions():\n with pipeline.fixture(assets=[\"Asset1\"], subsets=[\"animRig\"], versions=1):\n for asset in pipeline.ls():\n previous_version = 0\n for subset in asset[\"subsets\"]:\n for version in subset[\"versions\"]:\n version = version[\"version\"]\n assert version > previous_version\n previous_version = version",
"def resolve_asset_dependency(self):\n\n for node in self.asset.findall(\"./*[@file]\"):\n file = node.get(\"file\")\n abs_path = os.path.abspath(self.folder)\n abs_path = os.path.join(abs_path, file)\n node.set(\"file\", abs_path)",
"async def test_dependencies(self):\n response = await self.collect(get_request_text=self.xml)\n expected_entities = [\n dict(\n key=\"12345\",\n url=\"https://owasp_dependency_check#l1_12345\",\n file_name=self.file_name,\n file_path=self.file_path,\n )\n ]\n self.assert_measurement(response, value=\"1\", entities=expected_entities)",
"def test_collect_indicator_type_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"Carbon_Black_Enterprise_Response\", False), (\"CommonScripts\", False)},\n {},\n )\n\n test_input = [\n {\n \"Dummy Indicator Type\": {\n \"name\": \"Dummy Indicator Type\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"integrations\": [\n \"abuse.ch SSL Blacklist Feed\",\n \"AbuseIPDB\",\n \"ActiveMQ\",\n ],\n \"scripts\": [\"AssignAnalystToIncident\", \"CBAlerts\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_indicators_types_dependencies(\n pack_indicators_types=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result",
"def test_delete_system_asset(self):\n pass",
"def test_import_system_asset(self):\n pass"
] | [
"0.8951088",
"0.8932187",
"0.8925703",
"0.6693508",
"0.6299812",
"0.6152574",
"0.5939092",
"0.588485",
"0.5834068",
"0.5737396",
"0.55890346",
"0.5582852",
"0.5579139",
"0.5559609",
"0.5554732",
"0.54983944",
"0.5422076",
"0.5402317",
"0.53830427",
"0.5363248",
"0.5344633",
"0.53376585",
"0.533289",
"0.5325506",
"0.5315794",
"0.5293912",
"0.5287086",
"0.5282211",
"0.52696073",
"0.52541643"
] | 0.93428546 | 0 |
Test case for list_members | def test_list_members(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_members(self):\n pass",
"def get_members():",
"def getMembers():",
"def getMembers():",
"def getMembers():",
"def getMembers():",
"def test_get_members():\n client = Client\n members_res = get_members(client, {})\n assert members_res['Contents'] == [{'AccountId': 1, 'DetectorId': 1, 'MasterId': 1}]",
"def test_list_members_sort(self):\r\n resources = \"members\"\r\n cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def test_get_resource_group_member_list(self):\n pass",
"def test_members(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"user.add\", [self._user1])\n m = \"{},{}\".format(self._user, self._user1)\n ret = self.run_function(\"group.members\", [self._group, m])\n self.assertTrue(ret)\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n self.assertIn(self._user1, str(group_info[\"members\"]))",
"def test_get_members(session): # pylint:disable=unused-argument\n user = factory_user_model()\n org = OrgService.create_org(TestOrgInfo.org1, user.id)\n\n response = org.get_members()\n assert response\n assert len(response['members']) == 1\n assert response['members'][0]['membershipTypeCode'] == 'OWNER'",
"def test_get_member(self):\n test_resource = 'test_get_member'\n self.app.post(f'/v1/resource/{self.test_resource}/id/{test_resource}', headers=admin_headers)\n\n # make groups\n members = []\n for group in [f'rt_group{i}' for i in range(5)]:\n resp = self.app.post(\n f'/v1/group',\n data=json.dumps({'group_id': group}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n members.append({'member': group,\n 'member_type': 'group',\n 'access_level': 'read'})\n\n # make users\n for user in [f'rt_user{i}' for i in range(6)]:\n resp = self.app.post(\n f'/v1/user',\n data=json.dumps({'user_id': user}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n members.append({'member': user,\n 'member_type': 'user',\n 'access_level': 'read'})\n for m in members:\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{test_resource}/members',\n data=json.dumps([m]),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self._test_paging(f'/v1/resource/{self.test_resource}/id/{test_resource}/members', admin_headers, 10, 'members')",
"def getListOfMembers(self, *args):\n return _libsbml.Group_getListOfMembers(self, *args)",
"def test_list_members_with_mod_rights(self):\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])",
"def test_get_members(mocker):\n client = MockedBoto3Client()\n get_members_mock = mocker.patch.object(MockedBoto3Client, 'get_members', side_effect=[GET_MEMBERS_RESPONSE])\n command_results = get_members(client, {})\n assert command_results.outputs == [{'AccountId': 1, 'DetectorId': 1, 'MasterId': 1}]\n assert get_members_mock.is_called_once()",
"def list_members(self):\n detector_id = self.list_detector()\n if detector_id:\n try:\n response = self.client.list_members(\n DetectorId=detector_id,\n OnlyAssociated='FALSE'\n )\n return response['Members']\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False",
"def get(self, *args):\n return _libsbml.ListOfMembers_get(self, *args)",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")",
"def listMembers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_group_member_access(self, group):\n assert len(group.members) == 2",
"def find_members(self, details):\n\n results = []\n\n for member in self.member:\n if details.match(member):\n results.append(member)\n\n return results",
"def get_members(self):\n return self._members",
"def ListMembers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def testMembership(self):\n self.assertEqual(\n {self.ref: [self.mr]},\n cdl_convert.MediaRef.members\n )",
"def members(self, items):\n pass",
"def getAllMembers(self):\n if not self.is_compatible(): return []\n return self.tool.listMembers()",
"def list_members(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('members', self.members_path, retrieve_all,\r\n **_params)",
"def test_list_members_with_owner_rights(self):\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])"
] | [
"0.8765095",
"0.8256648",
"0.7839314",
"0.7839314",
"0.7839314",
"0.7839314",
"0.78150743",
"0.7450355",
"0.7230728",
"0.71831447",
"0.715223",
"0.7142639",
"0.7129268",
"0.7055926",
"0.7011348",
"0.69431925",
"0.6915091",
"0.6868672",
"0.6868672",
"0.6868672",
"0.6845843",
"0.676344",
"0.6759894",
"0.67536837",
"0.6745064",
"0.67091024",
"0.667064",
"0.66586727",
"0.6564549",
"0.65588033"
] | 0.9231387 | 0 |
Test case for list_options | def test_list_options(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list(self):\n parser = parse_args(['-g', '10', '-s', 'bubble', '-l'])\n self.assertTrue(parser.list)\n self.assertEqual(True, parser.list)\n\n parser = parse_args(['-g', '10', '-s', 'bubble'])\n self.assertEqual(False, parser.list)",
"def get_options(self):\r\n return self._option_values",
"def test_options(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '']\n main(None)\n self.assertEqual(len(wf._items), 2)\n self.assertEqual(wf._items[0].title, OPTIONS[0]['title'])\n self.assertEqual(wf._items[1].title, OPTIONS[1]['title'])\n wf._items = []",
"def get_options(self):\n return []",
"async def get_options(self):",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def test_options_listed_in_build_options(self) -> None:\n testdir = os.path.join(self.unit_test_dir, '112 list build options')\n\n out = self.init(testdir)\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertNotIn('-Dauto_features=auto', line)\n self.assertNotIn('-Doptional=auto', line)\n\n self.wipe()\n self.mac_ci_delay()\n\n out = self.init(testdir, extra_args=['-Dauto_features=disabled', '-Doptional=enabled'])\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertIn('-Dauto_features=disabled', line)\n self.assertIn('-Doptional=enabled', line)\n\n self.setconf('-Doptional=disabled')\n out = self.build()\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertIn('-Dauto_features=disabled', line)\n self.assertNotIn('-Doptional=enabled', line)\n self.assertIn('-Doptional=disabled', line)",
"def getOptionsNames(self) -> List[unicode]:\n ...",
"def create_options(self):\n return []",
"def initialize_options(self):",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def list(self):\n return self._options",
"def test_arg_parser_list(self):\n args = self.parser.parse_args(['list'])\n self.assertEqual(args.command, 'list')",
"def getOptions(self, propertyListName: unicode) -> ghidra.framework.options.Options:\n ...",
"def OPTIONS_LOOP():\n pass",
"def list_opts():\n return _make_opt_list([OPTS], 'tvdb')",
"def initialize_options(self):\n pass",
"def parse_options(options, return_list=True):\n\n cmd_options = []\n\n for key, value in options.items():\n\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n if return_list:\n return cmd_options\n\n cmd_options = \" \".join(cmd_options)\n\n return cmd_options",
"def test_array_option_empty_equivalents(self):\n def get_opt():\n opts = self.introspect('--buildoptions')\n for x in opts:\n if x.get('name') == 'list':\n return x\n raise Exception(opts)\n\n expected = {\n 'name': 'list',\n 'description': 'list',\n 'section': 'user',\n 'type': 'array',\n 'value': [],\n 'choices': ['foo', 'bar', 'oink', 'boink'],\n 'machine': 'any',\n }\n tdir = os.path.join(self.unit_test_dir, '19 array option')\n self.init(tdir, extra_args='-Dlist=')\n original = get_opt()\n self.assertDictEqual(original, expected)",
"def test_basic(self):\n options = counts_to_options(self.counts, 'fruit', 'Fruit')\n eq_(options['name'], 'fruit')\n eq_(options['display'], 'Fruit')\n\n eq_(options['options'][0], {\n 'name': 'bananas',\n 'display': 'bananas',\n 'value': 'bananas',\n 'count': 10,\n 'checked': False,\n })\n eq_(options['options'][1], {\n 'name': 'oranges',\n 'display': 'oranges',\n 'value': 'oranges',\n 'count': 6,\n 'checked': False,\n })\n eq_(options['options'][2], {\n 'name': 'apples',\n 'display': 'apples',\n 'value': 'apples',\n 'count': 5,\n 'checked': False,\n })",
"def get_options(self):\n\t\treturn self.options",
"def test_choose_from_list_6(self, ask_mock):\n ask_mock.side_effect = [False, False, False]\n field = basic.choose_from_list(self.options)\n self.assertIsNone(field)",
"def options(self, *args, **kwargs):\n self.request(\"options\", *args, **kwargs)",
"def list_opts():\n return [(None, copy.deepcopy(service_opts))]",
"def _options(self):\n return",
"def test_options_method(self):\n self.getPage('/', method='OPTIONS')\n self.assertStatus('204 No Content')\n self.assertHeader('Content-Type', 'application/json')",
"def test_choose_from_list_4(self, ask_mock):\n ask_mock.side_effect = [None]\n field = basic.choose_from_list(self.options)\n self.assertIsNone(field)",
"def test_post_options_admin(self):\n url = reverse('post-list')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.options(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('Post List', response.content)",
"def parse_options(self, options):\n pass"
] | [
"0.6750716",
"0.6740243",
"0.66668826",
"0.6650192",
"0.65552926",
"0.65385455",
"0.64712864",
"0.6464425",
"0.64616156",
"0.644398",
"0.6358037",
"0.6358037",
"0.6357471",
"0.6346873",
"0.63383377",
"0.63130623",
"0.62963915",
"0.6245664",
"0.6245303",
"0.61882854",
"0.61592543",
"0.61435455",
"0.61310697",
"0.61187345",
"0.6117189",
"0.6116769",
"0.61143315",
"0.6107027",
"0.6095322",
"0.60946506"
] | 0.9013061 | 0 |
Test case for list_pending_template_subscriptions | def test_list_pending_template_subscriptions(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_template_subscriptions(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_get_template_subscription(self):\n pass",
"def test_get_subscription_template(self):\n pass",
"def test_get_subscriptions(self):\n pass",
"def test_update_template_subscription(self):\n pass",
"def test_create_subscription_template(self):\n pass",
"def test_process_subscriptions(self):\n pass",
"def test_issue_subscriptions(self):\n pass",
"def test_list_template_registrations(self):\n pass",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_update_subscription_template(self):\n pass",
"def test_list_unregistered_templates(self):\n pass",
"def test_delete_template_subscription(self):\n pass",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_successful_subscriptions_list(self) -> None:\n result = self.api_get(self.test_user, \"/api/v1/users/me/subscriptions\")\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))",
"def test_successful_subscriptions_list_subscribers(self) -> None:\n result = self.api_get(\n self.test_user,\n \"/api/v1/users/me/subscriptions\",\n {\"include_subscribers\": \"true\"},\n )\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))",
"def test_get_pending_users(self):\n pass",
"def test_delete_subscription_template(self):\n pass",
"def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_get_subscription(self):\n pass",
"def fake_subscription_list(subscription_list):\n return fake_generic_listing(subscription_list, 'subscription')",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass",
"def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'",
"def test_successful_subscriptions_exists_subbed(self) -> None:\n self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(self.streams[0], True, True)",
"def subscriber_pending(args):\n\n\tclass ActiveMqSubscriberPendingContext(np.ScalarContext):\n\t\tdef evaluate(self, metric, resource):\n\t\t\tif metric.value < 0:\n\t\t\t\treturn self.result_cls(np.Critical, metric=metric)\n\t\t\treturn super(ActiveMqSubscriberPendingContext, self).evaluate(metric, resource)\n\t\tdef describe(self, metric):\n\t\t\tif metric.value < 0:\n\t\t\t\treturn 'ERROR: ' + metric.name\n\t\t\treturn super(ActiveMqSubscriberPendingContext, self).describe(metric)\n\n\tclass ActiveMqSubscriberPending(np.Resource):\n\t\tdef probe(self):\n\t\t\ttry:\n\t\t\t\tresp = loadJson(query_url(args))\n\t\t\t\tsubs = (resp['value']['TopicSubscribers'] +\n\t\t\t\t resp['value']['InactiveDurableTopicSubscribers'])\n\t\t\t\tfor sub in subs:\n\t\t\t\t\tqJ = loadJson(make_url(args, sub['objectName']))['value']\n\t\t\t\t\tif not qJ['SubscriptionName'] == args.subscription:\n\t\t\t\t\t\tcontinue # skip subscriber\n\t\t\t\t\tif not qJ['ClientId'] == args.clientId:\n\t\t\t\t\t\t# When this if is entered, we have found the correct\n\t\t\t\t\t\t# subscription, but the clientId doesn't match\n\t\t\t\t\t\treturn np.Metric('ClientId error: Expected: %s. Got: %s'\n\t\t\t\t\t\t % (args.clientId, qJ['ClientId']),\n\t\t\t\t\t\t -1, context='subscriber_pending')\n\t\t\t\t\treturn np.Metric('Pending Messages for %s' % qJ['SubscriptionName'],\n\t\t\t\t\t qJ['PendingQueueSize'], min=0,\n\t\t\t\t\t context='subscriber_pending')\n\t\t\texcept IOError as e:\n\t\t\t\treturn np.Metric('Fetching network FAILED: ' + str(e), -1, context='subscriber_pending')\n\t\t\texcept ValueError as e:\n\t\t\t\treturn np.Metric('Decoding Json FAILED: ' + str(e), -1, context='subscriber_pending')\n\t\t\texcept KeyError as e:\n\t\t\t\treturn np.Metric('Getting Subscriber FAILED: ' + str(e), -1, context='subscriber_pending')\n\n\tnp.Check(\n\t\tActiveMqSubscriberPending(),\n\t\tActiveMqSubscriberPendingContext('subscriber_pending', args.warn, args.crit),\n\t).main(timeout=get_timeout())",
"def test_successful_subscriptions_exists_not_subbed(self) -> None:\n all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.test_realm)]\n streams_not_subbed = list(set(all_stream_names) - set(self.streams))\n self.assertNotEqual(len(streams_not_subbed), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(streams_not_subbed[0], True, False)",
"def test_admin_sms_template_view_list(self):\n response = self.client.get('/admin/sms_module/smstemplate/')\n self.failUnlessEqual(response.status_code, 200)",
"def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass"
] | [
"0.8475974",
"0.7563876",
"0.7555855",
"0.7079644",
"0.7052482",
"0.6835922",
"0.6707649",
"0.66719127",
"0.66385555",
"0.65055555",
"0.6479729",
"0.647205",
"0.64366",
"0.63448125",
"0.61716145",
"0.6155952",
"0.61558974",
"0.60644513",
"0.6040236",
"0.5936966",
"0.5936786",
"0.58464956",
"0.5813512",
"0.58131176",
"0.57899827",
"0.57814974",
"0.57522714",
"0.5749768",
"0.5715968",
"0.57092166"
] | 0.93702567 | 0 |
Test case for list_projects | def test_list_projects(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_project(self):\n pass",
"def test_get_projects(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)",
"def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)",
"def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)",
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def test_project_list(self):\n rv = self.app.get(\"/\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"Assignment1.0\", rv.data)\n self.assertIn(\"Assignment2.0\", rv.data)",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_get_projects_expanded(self):\n pass",
"def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def test_get_project(self):\n pass",
"def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())",
"def test_list_projects():\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=self._inventory\n )\n\n return_code = subprocess.call(\n [\n sys.executable,\n \"-u\",\n \"-m\",\n \"avalon.inventory\",\n \"--ls\"\n ],\n cwd=tempfile.mkdtemp(dir=self._tempdir)\n )\n\n assert 0 == return_code",
"def _ExpectListProjects(self, projects):\n self.mock_projects_client.projects.List.Expect(\n self.projects_messages.CloudresourcemanagerProjectsListRequest(\n filter='lifecycleState:ACTIVE'),\n self.projects_messages.ListProjectsResponse(\n projects=[\n self.projects_messages.Project(\n projectId=p, name='name') for p in projects]))",
"def list_projects(arn=None, nextToken=None):\n pass",
"def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)",
"def test_get_projects(client, session, models, tokens):\n response = client.get(\n \"/projects\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200\n assert len(response.json) > 0",
"def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def test_project_list_tags(self):\n # Add test project with tags.\n tags = ['tag1', 'tag2', 'tag3']\n add_project(title='1', description='1', tags=tags)\n\n # Check that project list contains each tag.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n for tag in tags:\n self.assertContains(response, tag)",
"def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)",
"def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)",
"def test_project_list_pagination(self):\n # Add enough projects so that pagination is required.\n # project_list should show 5 projects per page, so 15\n # projects will be split up over 3 pages.\n for i in range(15):\n add_project(title='{0}'.format(i), description='{0}'.format(i))\n\n url = reverse('portfolio:project_list')\n\n # Check buttons on first page.\n response = self.client.get(url)\n self.assertNotContains(response, 'Previous')\n self.assertContains(response, 'Next')\n\n # Check buttons on second page.\n response = self.client.get('{url}?page=2'.format(url=url))\n self.assertContains(response, 'Previous')\n self.assertContains(response, 'Next')\n\n # Check buttons on third page.\n response = self.client.get('{url}?page=3'.format(url=url))\n self.assertContains(response, 'Previous')\n self.assertNotContains(response, 'Next')",
"def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)",
"def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response",
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects"
] | [
"0.9042425",
"0.8778745",
"0.87161314",
"0.8480545",
"0.8213874",
"0.79792327",
"0.7973112",
"0.79351175",
"0.7776914",
"0.7674936",
"0.7659318",
"0.7621019",
"0.7614639",
"0.7605095",
"0.75506294",
"0.750285",
"0.73882",
"0.73737574",
"0.72951984",
"0.72737426",
"0.72640103",
"0.72562426",
"0.7246814",
"0.7234167",
"0.7178262",
"0.7173774",
"0.7161938",
"0.7161938",
"0.7129362",
"0.712571"
] | 0.92746294 | 1 |
Test case for list_properties | def test_list_properties(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_properties_get(self):\n pass",
"def get_properties():",
"def test_properties_count_get(self):\n pass",
"def test_properties_stats_get(self):\n pass",
"def getPropertiesAll():",
"def getProperties():",
"def test_get_objects_with_properties(self):\n expected_result = self.spec.get(\"test_get_objects_with_properties\")\n expected_type = expected_result.get(\"_type\")\n expected_datastore_list = []\n\n for each_datastore in expected_result.get(\"datastore_infos\"):\n datastore_name = each_datastore[\"name\"]\n expected_datastore_list.append(datastore_name)\n datastore_list = []\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'Datastore', \n 100, \n ['name'])\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n prop_dict = {}\n for prop in dynamic_properties:\n if prop.name == \"name\":\n datastore_list.append(prop.val)\n \n for each_ds_name in datastore_list:\n self.assertTrue(each_ds_name in datastore_list)",
"def test_properties_distribution_get(self):\n pass",
"def test_properties_evolution_get(self):\n pass",
"def get_properties(self):\n return self.properties",
"def get_property_list(self,filtr):\n\n\n return self.dp.get_property_list(filtr)",
"def test_read_props(self):\n basic_test_runner(self, 'read_props')",
"def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)",
"def properties(self):\n raise NotImplementedError",
"def test_sm_list_full_properties(\n self, full_properties_kwargs, prop_names):\n\n # Add two faked storage groups\n faked_storage_group1 = self.add_storage_group1()\n faked_storage_group2 = self.add_storage_group2()\n\n exp_faked_storage_groups = [faked_storage_group1, faked_storage_group2]\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_groups = storage_group_mgr.list(**full_properties_kwargs)\n\n assert_resources(storage_groups, exp_faked_storage_groups, prop_names)",
"def test_get_property_matches(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n self.assertEqual(results.get_property_matches('GenProp0236'), None)\n self.assertEqual(len(results.get_property_matches('GenProp0232')), 9)\n self.assertEqual(len(results.get_property_matches('GenProp0232', top=True)), 2)\n self.assertEqual(len(results.get_property_matches('GenProp0232', sample='C_luteolum_DSM_273')), 4)",
"def test_venue_list_properties(self, actual):\n print(actual(self.venue_list_page))\n self.assertTrue(actual(self.venue_list_page))",
"def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])",
"def get_properties_code(self, obj):\n return []",
"def list_property(property, all_articles, sort, sort_by):\n contents = {}\n for title in all_articles:\n try:\n if sort_by:\n key = all_articles[title][sort_by] \n value = all_articles[title][property]\n contents[key] = value\n else:\n key = all_articles[title][property]\n value = None\n contents[key] = value\n except KeyError:\n if sort_by:\n print(f\"'{property}' or '{sort_by}' isn't a valid item to list.\")\n else:\n print(f\"'{property}' isn't a valid item to list.\")\n valid_properties = \", \".join(all_articles[title].keys())\n print(f\"Choices are: {valid_properties}\")\n return\n \n all_keys = contents.keys()\n\n # Sort in-place if applicable.\n if sort or sort_by:\n all_keys = sorted(all_keys)\n \n # Print the output.\n for item_key in all_keys:\n if sort_by:\n print(f\"{item_key}: {contents[item_key]}\")\n else:\n print(item_key)",
"def properties(self):",
"def properties(self):",
"def properties(self):",
"def testReadPropertyList(self):\n md1 = self.butler.get(\"raw_md\", visit=1, filter=\"g\")\n md2 = self.butler.get(\"rawMetadataDirect\", visit=1, filter=\"g\")\n self.assertEqual(md1, md2)",
"def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for obj in obj_list:\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\n prop_filter_spec = get_prop_filter_spec(client_factory,\n lst_obj_specs, [prop_spec])\n return retrieve_properties_ex(vim,\n vim.service_content.propertyCollector,\n [prop_filter_spec])",
"def readProperties(self):\r\n print('not yet implemented')",
"def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )",
"def test_get_object_properties(self):\n test_spec = self.spec.get(\"test_get_object_properties\")\n host_moref = vim_util.get_moref(test_spec.get(\"host_id\"), 'HostSystem')\n objects = self.session.invoke_api( vim_util, \n 'get_object_properties', \n self.vim, \n host_moref, \n [\"summary.hardware.numCpuCores\", \"summary.hardware.numCpuThreads\"]) \n self.assertIsNotNone(objects)\n expected_numCpuCores = test_spec.get(\"numCpuCores\")\n expected_numCpuThreads = test_spec.get(\"numCpuThreads\")\n numCpuCores = 0\n numCpuThreads = 0\n if hasattr(objects[0], 'propSet'):\n dynamic_properties = objects[0].propSet\n for prop in dynamic_properties:\n if prop.name == \"summary.hardware.numCpuCores\":\n numCpuCores = prop.val\n else:\n numCpuThreads = prop.val\n self.assertEqual(expected_numCpuCores, numCpuCores)\n self.assertEqual(expected_numCpuThreads, numCpuThreads)",
"def properties(self, filters={}):\n return self.__get_list_client(Property)(filters=filters)",
"def test_dev_props(name, properties):\n assert properties['x']\n assert properties['y']"
] | [
"0.81236464",
"0.77246433",
"0.7622766",
"0.74299735",
"0.7338716",
"0.7227491",
"0.7197647",
"0.71324134",
"0.69659305",
"0.6891058",
"0.6746788",
"0.6724933",
"0.6704888",
"0.66274834",
"0.6608135",
"0.65980446",
"0.65625614",
"0.6562381",
"0.6554081",
"0.65503466",
"0.65327126",
"0.65327126",
"0.65327126",
"0.65309393",
"0.6527306",
"0.6506041",
"0.64908886",
"0.6486134",
"0.6483831",
"0.648048"
] | 0.9160139 | 0 |
Test case for list_schedules | def test_list_schedules(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_schedules(session, logger):\n for sched in session.query(Schedule).all():\n logger.info(\"- {}\".format(sched))",
"def _create_schedules(self):\n\n ''''''",
"def list_schedules(self):\n with self.get(\"/v3/schedule/list\") as res:\n code, body = res.status, res.read()\n if code != 200:\n self.raise_error(\"List schedules failed\", res, body)\n js = self.checked_json(body, [\"schedules\"])\n\n return [schedule_to_tuple(m) for m in js[\"schedules\"]]",
"def test_cron_workflow_service_list_cron_workflows(self):\n pass",
"def list_schedules(connection, fields=None, error_msg=None):\n\n response = connection.get(\n url=f'{connection.base_url}/api/schedules', params={'fields': fields}\n )\n if response.ok:\n # Fix for incorrect 'eventId' (expecting 'id')\n event_based_in_list = False\n response_json = response.json()\n for schedule in response_json['schedules']:\n if 'event' in schedule:\n schedule['event']['id'] = schedule['event'].pop('eventId')\n event_based_in_list = True\n if event_based_in_list:\n response.encoding, response._content = 'utf-8', json.dumps(\n response_json\n ).encode('utf-8')\n\n return response",
"def getSchedules(self) :\n return self.schedules",
"def test_cron_workflow_service_list_cron_workflows2(self):\n pass",
"def get_schedules(self):\n return self.__schedules",
"def list(\n self,\n *,\n list_view_type: ScheduleListViewType = ScheduleListViewType.ENABLED_ONLY, # pylint: disable=unused-argument\n **kwargs,\n ) -> Iterable[Schedule]:\n\n def safe_from_rest_object(objs):\n result = []\n for obj in objs:\n try:\n result.append(Schedule._from_rest_object(obj))\n except Exception as e: # pylint: disable=broad-except\n print(f\"Translate {obj.name} to Schedule failed with: {e}\")\n return result\n\n return self.service_client.list(\n resource_group_name=self._operation_scope.resource_group_name,\n workspace_name=self._workspace_name,\n list_view_type=list_view_type,\n cls=safe_from_rest_object,\n **self._kwargs,\n **kwargs,\n )",
"def get_schedules(self) -> List[SwitcherV2Schedule]:\n return self._schedule_list",
"def test_get_schedule(self):\n response = self.client.open('/v1/schedule/{id}'.format(id=56),\n method='GET',\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_retrieve_instances_schedule_state(self):\n pass",
"def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")",
"def test_add_recurring_schedule(self):\n pass",
"def list_schedules(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"schedules\", \"id\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )",
"def mock_api_stage_success_get_schedules() -> List[bytes]:\n return create_standard_packets_list(DUMMY_GET_SCHEDULES_RESPONSE)",
"def found_schedules(self) -> bool:\n return self._schedule_list != []",
"def test_get_next_n_schedule(self):\n expected_list = [datetime.datetime(2021, 8, 7, 8, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 46, tzinfo=datetime.timezone.utc)]\n\n from_dt = datetime.datetime(2021, 8, 7, 8, 30, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_next_n_schedule(10, from_dt, '0/23 * * * ? *')\n self.assertEqual(str(expected_list), str(result))",
"def test_get_prev_n_schedule_1(self):\n expected_list = [datetime.datetime(2021, 8, 7, 11, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 8, 46, tzinfo=datetime.timezone.utc)]\n\n from_dt = datetime.datetime(2021, 8, 7, 11, 50, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_prev_n_schedule(10, from_dt, '0/23 * * * ? *')\n self.assertEqual(str(expected_list), str(result))",
"def get_schedules():\n path = config.get('schedule', 'paths', './schedule.json')\n with open(path) as schedule_file:\n return json.load(schedule_file)",
"def test_remove_recurring_schedule(self):\n pass",
"def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)",
"def get_schedules():\n return json.dumps(calendar.get_schedules())",
"def test_get_monitoring_schedules_vendor_v3(self):\n pass",
"def test_get_prev_n_schedule_2(self):\n expected_list = [datetime.datetime(2021, 8, 16, 8, 45, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 40, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 35, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 30, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 25, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 20, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 15, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 10, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 5, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 16, 8, 0, tzinfo=datetime.timezone.utc)]\n\n from_dt = datetime.datetime(2021, 8, 16, 8, 50, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_prev_n_schedule(10, from_dt, '0/5 8-17 ? * MON-FRI *')\n self.assertEqual(str(expected_list), str(result))",
"def test_get_all_schedule_bw_dates(self):\n expected_list = [datetime.datetime(2021, 8, 7, 8, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 9, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 23, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 10, 46, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 0, tzinfo=datetime.timezone.utc),\n datetime.datetime(2021, 8, 7, 11, 23, tzinfo=datetime.timezone.utc)]\n # datetime.datetime(year, month, day, hour, minute, second, tzinfo)\n from_dt = datetime.datetime(2021, 8, 7, 8, 30, 57, tzinfo=datetime.timezone.utc)\n to_date = datetime.datetime(2021, 8, 7, 11, 30, 57, tzinfo=datetime.timezone.utc)\n result = AWSCron.get_all_schedule_bw_dates(from_dt, to_date, '0/23 * * * ? *')\n self.assertEqual(str(expected_list), str(result))",
"def test_list_scheduled_payments(self):\n pass",
"def spm_schedules(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"spm_schedules\"), kwargs)",
"def test_list_runs(self):\n pass",
"def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )"
] | [
"0.7540668",
"0.7513023",
"0.7177156",
"0.7127198",
"0.7126025",
"0.7102762",
"0.7086109",
"0.69454896",
"0.69397473",
"0.6920113",
"0.6895051",
"0.68655485",
"0.68504727",
"0.6840357",
"0.682261",
"0.6779484",
"0.6776491",
"0.6756502",
"0.6745011",
"0.6710746",
"0.66829425",
"0.6669734",
"0.66595316",
"0.6650047",
"0.6628366",
"0.6569699",
"0.6557311",
"0.654884",
"0.6547032",
"0.6539218"
] | 0.93828064 | 0 |
Test case for list_submission_serivces_for_project | def test_list_submission_serivces_for_project(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_submission(self):\n # creating a submission\n sub_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n # getting it from the service\n get_response = get_submissions(self, self.token)\n response_data = json.loads(get_response.data.decode())\n self.assertTrue(response_data['data'][0]['text_count']==2)\n self.assertTrue(isinstance(response_data['data'][0]['texts'], list))",
"def test_get_submissions():\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each submission\n for x in threads:\n print(x.d_)",
"def test_add_submission_service_to_project(self):\n pass",
"def GetSubmissions(self, parameters):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/falconx-sandbox/GetSubmissions\n FULL_URL = self.base_url+'/falconx/entities/submissions/v1'\n HEADERS = self.headers\n PARAMS = parameters\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, params=PARAMS, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n\n return returned",
"def test_list_project_request(self):\n pass",
"def test_tests():\n submission = SubmissionBuilder(\"t\", \"b\", [\"anything\"]).build()\n assert submission.get(\"results\") == [\"anything\"], submission",
"def test_project_list(self):\n rv = self.app.get(\"/\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"Assignment1.0\", rv.data)\n self.assertIn(\"Assignment2.0\", rv.data)",
"def load_submissions(assignment: Assignment, submissions: List[Dict]) -> List[Submission]:\n logger.info(\"Creating %s submissions via Canvas API\", len(submissions))\n\n result: List[Submission] = []\n for submission in submissions:\n result.append(\n assignment.submit(submission)\n )\n\n logger.info(\"Successfully created %s submissions\", len(submissions))\n\n return result",
"def search_submissions():\n r = req('GET', SUB_API + 'search/submissions', params=handle_filters())\n submissions = []\n for submission in demisto.get(r.json(), 'data.items'):\n sample = sample_to_readable(demisto.get(submission, 'item'))\n sample['ID'] = demisto.get(submission, 'item.sample')\n sample['ThreatScore'] = demisto.get(submission, 'item.analysis.threat_score')\n submissions.append(sample)\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': submissions},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Submission Search', submissions,\n ['ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1',\n 'SHA256', 'SubmittedAt', 'ThreatScore']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })",
"def get_records_for_submitter_ids(self, sids, node):\n uuids = []\n pids = []\n count = 0\n for sid in sids:\n count += 1\n args = 'submitter_id:\"{}\"'.format(sid)\n res = self.paginate_query(node=node, args=args, props=[\"id\", \"submitter_id\",\"project_id\"])\n recs = res[\"data\"][node]\n if len(recs) == 1:\n uuids.append(recs[0][\"id\"])\n pids.append(recs[0][\"project_id\"])\n elif len(recs) == 0:\n print(\"No data returned for {}:\\n\\t{}\".format(sid, res))\n print(\"\\t{}/{}\".format(count, len(sids)))\n print(\n \"Finished retrieving {} uuids for {} submitter_ids\".format(\n len(uuids), len(sids)\n )\n )\n df = pd.DataFrame({'project_id':pids,'uuid':uuids,'submitter_id':sids})\n\n dfs = []\n for i in range(len(df)):\n sid = df.iloc[i]['submitter_id']\n pid = df.iloc[i]['project_id']\n uuid = df.iloc[i]['uuid']\n prog,proj = pid.split(\"-\",1)\n print(\"({}/{}): {}\".format(i+1,len(df),uuid))\n mydir = \"project_uuids/{}_tsvs\".format(pid) # create the directory to store TSVs\n if not os.path.exists(mydir):\n os.makedirs(mydir)\n filename = \"{}/{}_{}.tsv\".format(mydir,pid,uuid)\n if os.path.isfile(filename):\n print(\"File previously downloaded.\")\n else:\n self.sub.export_record(prog, proj, uuid, \"tsv\", filename)\n df1 = pd.read_csv(filename, sep=\"\\t\", header=0)\n dfs.append(df1)\n all_data = pd.concat(dfs, ignore_index=True)\n master = \"master_uuids_{}.tsv\".format(node)\n all_data.to_csv(\"{}\".format(master), sep='\\t',index=False)\n print(\"Master node TSV with {} total recs written to {}.\".format(len(all_data),master))\n return all_data",
"def buildsetSubmitted(buildset):",
"def submit(self):\n dispatcher.utter_template('utter_submit',tracker) \n\n return []",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_create_submission(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Test making a submission for an unknown project\n self.assertRaises(RuntimeError, self.nodes[1].overlay.create_submission, 'a', 3, 'test')\n\n # Node 2 now makes a submission\n project = self.nodes[1].overlay.persistence.get_projects()[0]\n yield self.nodes[1].overlay.create_submission(project['public_key'].decode('hex'), project['id'], 'test')\n yield self.deliver_messages()\n\n # Node 1 should have received this submission and added it to the database\n submissions = self.nodes[0].overlay.persistence.get_submissions_for_project(project['public_key'].decode('hex'), project['id'])\n self.assertTrue(submissions)",
"def test_list_scheduled_payments_bulk(self):\n pass",
"def QuerySubmissions(self, parameters):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/falconx-sandbox/QuerySubmissions\n FULL_URL = self.base_url+'/falconx/queries/submissions/v1'\n HEADERS = self.headers\n PARAMS = parameters\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, params=PARAMS, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n\n return returned",
"async def condor_submit(*resource_jdls: JDL, executor: Executor) -> Iterable[str]:\n # verbose submit gives an ordered listing of class ads, such as\n # ** Proc 15556.0:\n # Args = \"150\"\n # ClusterId = 15556\n # ...\n # ProcId = 0\n # QDate = 1641289701\n # ...\n #\n # ** Proc 15556.1:\n # ...\n command = f\"condor_submit -verbose -maxjobs {len(resource_jdls)}\"\n response = await executor.run_command(\n command,\n stdin_input=_submit_description(resource_jdls),\n )\n return (\n SUBMIT_ID_PATTERN.search(line).group(1)\n for line in response.stdout.splitlines()\n if line.startswith(\"** Proc\")\n )",
"def test_list_project(self):\n pass",
"def get_submissions(submissions):\n results = []\n for entry in submissions:\n results.append({\n 'timestamp' : entry['timestamp'],\n 'code' : clean_code(entry['raw_text']),\n 'style_score' : entry['style_score'],\n 'cluster' : entry['cluster'],\n 'correct' : get_correct(entry),\n 'hints' : get_hints(entry)\n })\n sorted(results, key=lambda x : x['timestamp'])\n for entry in results:\n entry['timestamp'] = convert_timestamp(entry['timestamp'])\n return results",
"def get_submission(self, sub_id) -> Optional[List[TestsSubmissions]]:\n try:\n submission = self.session.query(TestsSubmissions).get(sub_id)\n return submission\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t get tests: {excpt}')\n return None",
"def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)",
"def canvas_api_submissions(state, course_id, assignment_id):\n\n api = state.canvas_api()\n for submission in api.list_submissions(course_id, assignment_id):\n click.echo(str(submission))",
"def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')",
"def delegate_submission(request):\n sub_id = request.POST.get('submission_id', str())\n\n if not sub_id:\n sub_id = request.GET.get('submission_id', str())\n\n # submission record id not received\n if not sub_id:\n context = dict(status='error', message='Submission ID not found! Please try resubmitting with a valid ID.')\n out = jsonpickle.encode(context, unpicklable=False)\n return HttpResponse(out, content_type='application/json')\n\n # get submission record and dependencies\n doc = Submission().get_collection_handle().aggregate(\n [\n {\"$addFields\": {\n \"destination_repo_converted\": {\n \"$convert\": {\n \"input\": \"$destination_repo\",\n \"to\": \"objectId\",\n \"onError\": 0\n }\n },\n }\n },\n {\n \"$lookup\":\n {\n \"from\": \"RepositoryCollection\",\n \"localField\": \"destination_repo_converted\",\n \"foreignField\": \"_id\",\n \"as\": \"repository_docs\"\n }\n },\n {\n \"$project\": {\n \"repository_docs.type\": 1,\n \"bundle\": 1\n }\n },\n {\n \"$match\": {\n \"_id\": ObjectId(str(sub_id)),\n }\n },\n {\"$sort\": {\"date_modified\": 1}}\n ])\n\n records = cursor_to_list(doc)\n\n # get submission record\n try:\n sub = records[0]\n except (IndexError, AttributeError) as error:\n context = dict(status='error', message='Submission record not found. Please try resubmitting.')\n out = jsonpickle.encode(context, unpicklable=False)\n return HttpResponse(out, content_type='application/json')\n\n # get repository type\n try:\n repo = sub['repository_docs'][0]['type']\n except (IndexError, AttributeError) as error:\n # destination repository record not found\n context = dict(status='error', message='Repository information not found. Please contact an administrator.')\n out = jsonpickle.encode(context, unpicklable=False)\n return HttpResponse(out, content_type='application/json')\n\n # Submit to Figshare\n if repo == 'figshare':\n\n # check figshare credentials\n if figshareSubmission.FigshareSubmit(sub_id).isValidCredentials(user_id=request.user.id):\n\n figshareSubmission.FigshareSubmit(sub_id).submit(\n sub_id=sub_id,\n dataFile_ids=sub['bundle'],\n\n )\n return HttpResponse(jsonpickle.dumps({'status': 1}))\n\n else:\n # forward to control view\n return HttpResponse(jsonpickle.dumps({'status': 1, 'url': reverse('copo:authenticate_figshare')}))\n\n # Submit to ENA Sequence reads\n elif repo in ['ena', 'ena-asm']:\n result = schedule_submission(submission_id=sub_id, submission_repo=repo)\n return HttpResponse(jsonpickle.encode(result, unpicklable=False), content_type='application/json')\n\n # Submit to Dataverse\n elif repo == 'dataverse':\n result = dataverseSubmission.DataverseSubmit(submission_id=sub_id).submit()\n return HttpResponse(jsonpickle.encode(result, unpicklable=False), content_type='application/json')\n\n # Submit to CKAN\n elif repo == 'ckan':\n result = ckanSubmission.CkanSubmit(submission_id=sub_id).submit()\n return HttpResponse(jsonpickle.encode(result, unpicklable=False), content_type='application/json')\n\n # Submit to dspace\n elif repo == 'dspace':\n result = dspaceSubmission.DspaceSubmit(submission_id=sub_id).submit()\n return HttpResponse(jsonpickle.encode(result, unpicklable=False), content_type='application/json')\n\n else:\n result = dict(status=False, message=\"Selected submission type not supported.\")\n return HttpResponse(jsonpickle.encode(result, unpicklable=False), content_type='application/json')",
"def test_list_runs(self):\n pass",
"def test_update_submission_service(self):\n pass",
"def get_number_of_submissions():\n\n start = time.time()\n print(\"counting submissions in\", TEST_SUBREDDIT, 'between', TEST_START_DATE, 'and', TEST_END_DATE)\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n end = time.time()\n print('time elapsed: ', end - start)\n print('total submissions:', len(threads))\n print(TEST_MAX)",
"def get_submissions(self, test_id) -> Optional[List[TestsSubmissions]]:\n try:\n submissions = self.session.query(TestsSubmissions).filter(TestsSubmissions.test_id == test_id).all()\n return submissions\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t get tests: {excpt}')\n return None",
"def test_list_scheduled_payments(self):\n pass",
"def submit_data(self):\n\n database = Database()\n project_data = []\n\n project_entries = [\"\",\n \"\",\n \"\",\n self.proj_date.get(),\n self.proj_descrpt.get(),\n self.proj_estdatest.get(),\n self.proj_estdateend.get(),\n self.proj_estbudget.get(),\n self.proj_actdatest.get(),\n self.proj_actdateend.get(),\n self.proj_actcost.get()]\n\n index = 0\n num_filled = 0\n for item in project_entries:\n if item == \"\":\n project_entries[index] = None\n else:\n num_filled += 1\n index += 1\n\n cus_name = self.customer_name.get()\n\n if num_filled == 0 and cus_name == \"\":\n ErrorMessageWindow(\"You have to fill in at least one argument!\")\n else:\n # If a customer name is provided.\n if cus_name != \"\":\n customer_data = database.query_customer(cus_name=cus_name)\n if customer_data:\n project_entries[1] = customer_data[0][0]\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n else:\n ErrorMessageWindow(\"No customer with this name found.\")\n else:\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n\n if project_data:\n schedule_data = database.query_project_tasks(\n project_data=project_data)\n customer_data = database.query_customer(project_data[0][1])\n\n region_data = database.query_region(\n region_id=customer_data[0][1])\n\n # Project schedule window definition.\n ps_window = tkinter.Tk()\n ps_window.wm_title(\"Project Schedule Display\")\n tkinter.Label(\n ps_window, text=\"Project Information:\"\n ).grid()\n\n # Display project information.\n tkinter.Label(\n ps_window,\n text=\"Project ID: {}\".format(project_data[0][0]),\n ).grid(\n pady=5, column=0, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Description: {}\".format(project_data[0][4]),\n ).grid(\n pady=5, column=1, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Company: {}\".format(customer_data[0][2]),\n ).grid(\n pady=5, column=0, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Contract Date: {}\".format(project_data[0][3]),\n ).grid(\n pady=5, column=1, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Region: {}\".format(region_data[0][1]),\n ).grid(\n pady=5, column=2, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Start Date: {}\".format(project_data[0][5]),\n ).grid(\n pady=5, column=0, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"End Date: {}\".format(project_data[0][6]),\n ).grid(\n pady=5, column=1, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"Budget: ${}\".format(project_data[0][7]),\n ).grid(\n pady=5, column=2, row=3\n )\n\n # Schedule table definition.\n p_s_view = tkinter.ttk.Treeview(ps_window)\n p_s_view.grid(pady=10, column=1, row=5)\n\n p_s_view[\"show\"] = \"headings\"\n p_s_view[\"columns\"] = (\n \"Start Date\", \"End Date\", \"Task Description\",\n \"Skill(s) Required\", \"Quantity Required\"\n )\n\n # Table column headings.\n for heading in p_s_view[\"columns\"]:\n p_s_view.heading(heading, text=heading)\n p_s_view.column(heading, width=250)\n\n # Load data into table.\n for item in schedule_data:\n p_s_view.insert('', 'end', values=item)\n else:\n ErrorMessageWindow(\"No project found with given info.\")"
] | [
"0.707717",
"0.68935823",
"0.62899876",
"0.60892254",
"0.60685056",
"0.6009752",
"0.5995614",
"0.59602994",
"0.59098274",
"0.5906248",
"0.58676773",
"0.58306396",
"0.582981",
"0.58278203",
"0.57896775",
"0.5784175",
"0.5716008",
"0.56766653",
"0.5671425",
"0.56594247",
"0.56565684",
"0.5642838",
"0.5640762",
"0.5602898",
"0.55994815",
"0.55956227",
"0.5594027",
"0.55931854",
"0.55819845",
"0.5571598"
] | 0.952252 | 0 |
Test case for list_system_assets | def test_list_system_assets(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retrieve_system_asset(self):\n pass",
"def test_list_supported_assets(self):\n pass",
"def test_get_test_assets(self):\n pass",
"def test_import_system_asset(self):\n pass",
"def test_create_system_asset(self):\n pass",
"def test_list_dependent_assets(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_list_dependent_assets2(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def test_list_dependent_assets3(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_get_software_bundles(self):\n pass",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def test_get_test_assets_expanded(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_get_existing_archives(self):\n support = saltsupport.SaltSupportModule()\n out = support.archives()\n assert len(out) == 3\n for name in [\n \"/mnt/storage/one-support-000-000.bz2\",\n \"/mnt/storage/two-support-111-111.bz2\",\n \"/mnt/storage/000-support-000-000.bz2\",\n ]:\n assert name in out",
"def list_assets(request):\n user_assets = Asset.objects.filter(user=request.user, deleted=False).all()\n\n json_assets = ASSET_LIST_RESOURCE.to_json(dict(\n user_id=request.user.id,\n next_page_token=uuid.uuid4(),\n assets=user_assets\n ))\n request_format = request.GET.get('format', '')\n if request_format.lower() == 'json':\n return partial_json_response(request, json_assets)\n else:\n render_data = {'resource': json.dumps(json_assets)}\n render_data.update(csrf(request))\n return render('index.html', render_data)",
"def test_get_test_asset(self):\n pass",
"def test_get_container_assets_expanded(self):\n pass",
"def assets():",
"def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}",
"def test_import_software_asset(self):\n pass",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]",
"def assets():\n pass",
"def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)",
"def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))",
"def listmyassets_filter_zeros(self):\n\n # activate assets\n self.nodes[0].generate(500)\n self.sync_all()\n\n assert_equal(0, len(self.nodes[0].listmyassets()))\n assert_equal(0, len(self.nodes[1].listmyassets()))\n\n self.nodes[0].issue(\"FOO\", 1000)\n self.nodes[0].generate(10)\n self.sync_all()\n\n result = self.nodes[0].listmyassets()\n assert_equal(2, len(result))\n assert_contains_pair(\"FOO\", 1000, result)\n assert_contains_pair(\"FOO!\", 1, result)\n\n address_to = self.nodes[1].getnewaddress()\n self.nodes[0].transfer(\"FOO\", 1000, address_to)\n self.nodes[0].generate(10)\n self.sync_all()\n\n result = self.nodes[0].listmyassets()\n assert_equal(1, len(result))\n assert_contains_pair(\"FOO!\", 1, result)\n\n result = self.nodes[1].listmyassets()\n assert_equal(1, len(result))\n assert_contains_pair(\"FOO\", 1000, result)",
"def test_get_systems(self):\n pass",
"def test_update_software_asset_bundle(self):\n pass"
] | [
"0.7584738",
"0.7546175",
"0.69923866",
"0.69573283",
"0.67813885",
"0.67451614",
"0.6666616",
"0.65938014",
"0.6425119",
"0.63921833",
"0.6334366",
"0.63051814",
"0.6295946",
"0.61617863",
"0.60506225",
"0.5946125",
"0.59092623",
"0.59016424",
"0.58683807",
"0.5856259",
"0.58432734",
"0.58144903",
"0.58059406",
"0.5750744",
"0.5748117",
"0.5726889",
"0.5692884",
"0.5644578",
"0.5605202",
"0.55865383"
] | 0.93133473 | 0 |
Test case for list_template_registrations | def test_list_template_registrations(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retrieve_template_registration(self):\n pass",
"def test_list_unregistered_templates(self):\n pass",
"def test_update_template_registration(self):\n pass",
"def test_register_template(self):\n pass",
"def test_list_template_subscriptions(self):\n pass",
"def test_list_pending_template_subscriptions(self):\n pass",
"def test_share_template_registration(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_get_template_subscription(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_get_device_templates(self):\n pass",
"def test_case_4(self):\n\n self.assertIsInstance(Task.list_template_types(), list)\n self.assertGreater(len(Task.list_template_types()), 1)",
"def test_unregister_template(self):\n pass",
"def list_templates(self):\n raise NotImplementedError()",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_list_template_for_all_namespaces(self):\n pass",
"def test_unshare_template_registration(self):\n pass",
"def test_get_subscription_template(self):\n pass",
"def test_admin_sms_template_view_list(self):\n response = self.client.get('/admin/sms_module/smstemplate/')\n self.failUnlessEqual(response.status_code, 200)",
"def test_template_name():\n for t in templates:\n assert len(t.name) > 0",
"def test_create_template_subsciption(self):\n pass",
"def templatelist(cls):\n return cls._templatelist",
"def test_get_all_as_superuser_returns_all_templates(self):\n mock_request = create_mock_request(user=self.superuser)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 3)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))",
"def test_list_namespaced_template(self):\n pass",
"def test_create_subscription_template(self):\n pass",
"def test_template_json():\n for l in list(templates.data):\n t = templates[l]\n assert len(json.dumps(t.json())) > 0",
"def test_register_page_is_rendered(self):\n url = \"/regiter/\"\n response = self.client.get('/register/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")",
"def test_update_template_subscription(self):\n pass",
"def test_get_activity_templates(self):\n pass"
] | [
"0.8089516",
"0.7384518",
"0.7283718",
"0.70872927",
"0.70326877",
"0.6846537",
"0.678066",
"0.66708505",
"0.6598199",
"0.6456517",
"0.64323646",
"0.63570863",
"0.62226224",
"0.62030244",
"0.6199485",
"0.6144212",
"0.6125929",
"0.6091586",
"0.6055469",
"0.60391337",
"0.6027273",
"0.60045826",
"0.60017085",
"0.5978107",
"0.59708494",
"0.59412605",
"0.5901226",
"0.5887644",
"0.5843916",
"0.58388186"
] | 0.94790584 | 0 |
Test case for list_template_subscriptions | def test_list_template_subscriptions(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_pending_template_subscriptions(self):\n pass",
"def test_get_template_subscription(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_get_subscription_template(self):\n pass",
"def test_create_subscription_template(self):\n pass",
"def test_update_template_subscription(self):\n pass",
"def test_get_subscriptions(self):\n pass",
"def test_update_subscription_template(self):\n pass",
"def test_delete_template_subscription(self):\n pass",
"def test_list_template_registrations(self):\n pass",
"def test_delete_subscription_template(self):\n pass",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_process_subscriptions(self):\n pass",
"def test_successful_subscriptions_list_subscribers(self) -> None:\n result = self.api_get(\n self.test_user,\n \"/api/v1/users/me/subscriptions\",\n {\"include_subscribers\": \"true\"},\n )\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_issue_subscriptions(self):\n pass",
"def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_successful_subscriptions_list(self) -> None:\n result = self.api_get(self.test_user, \"/api/v1/users/me/subscriptions\")\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))",
"def test_list_unregistered_templates(self):\n pass",
"def test_admin_sms_template_view_list(self):\n response = self.client.get('/admin/sms_module/smstemplate/')\n self.failUnlessEqual(response.status_code, 200)",
"def subscriptions(id='None'):\n\trows = mongo_data({}, [\"publisher_id\",\"dt_hour\", \"new_subs\"],\"subscribers\")\n\t#returns [{_id:...,field1:...,field2:...}]\n\n\n\tCOLS = [\"publisher_id\", \"dt_hour\", \"new subs\"]\n\tROWS = [[y[\"publisher_id\"],y[\"dt_hour\"],y[\"new_subs\"]] for y in rows]\n\n\tTITLE = 'SUBSCRIPTIONS'\n\n\treturn render_template(\"simple_tester_report.html\", cols=COLS, rows=ROWS, report_title=TITLE);",
"def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_get_subscription(self):\n pass",
"def fake_subscription_list(subscription_list):\n return fake_generic_listing(subscription_list, 'subscription')",
"def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'",
"def test_list_namespaced_template(self):\n pass",
"def test_list_template_for_all_namespaces(self):\n pass",
"def test_get_subscriptions_auth(self):\n url = reverse('xds_api:interest-list-subscriptions')\n _, token = AuthToken.objects.create(self.user_1)\n # subscribe user 1 to interest list 3\n self.list_3.subscribers.add(self.user_1)\n self.list_3.save()\n response = self.client \\\n .get(url, HTTP_AUTHORIZATION='Token {}'.format(token))\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(self.list_3.subscribers.all()),\n len(responseDict))",
"def test_create_subscription(self):\n pass",
"def test_retrieve_template_registration(self):\n pass"
] | [
"0.8628919",
"0.82787085",
"0.8199221",
"0.7790471",
"0.7419106",
"0.7410631",
"0.7367596",
"0.7059741",
"0.70037997",
"0.69278824",
"0.67292595",
"0.6713537",
"0.6691017",
"0.65685594",
"0.654975",
"0.6541098",
"0.64744055",
"0.64571506",
"0.6438153",
"0.6392939",
"0.63533276",
"0.6340384",
"0.633553",
"0.629318",
"0.62383986",
"0.6227965",
"0.6224101",
"0.61431533",
"0.61272395",
"0.60893106"
] | 0.91405636 | 0 |
Test case for list_unregistered_templates | def test_list_unregistered_templates(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unregister_template(self):\n pass",
"def test_list_template_registrations(self):\n pass",
"def test_unshare_template_registration(self):\n pass",
"def test_list_template_for_all_namespaces(self):\n pass",
"def test_list_pending_template_subscriptions(self):\n pass",
"def test_retrieve_template_registration(self):\n pass",
"def test_list_template_subscriptions(self):\n pass",
"def test_get_device_templates(self):\n pass",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_unregister_all_from_list(self, unregister):\n unregister_list = [Mock(), Mock()]\n\n event_bus.unregister_all(from_list=unregister_list)\n\n self.assertEqual(unregister.call_count, len(unregister_list))\n for args, _ in unregister.call_args_list:\n subscription = args[0]\n self.assertTrue(subscription in unregister_list)",
"def list_templates(self):\n raise NotImplementedError()",
"def test_list_namespaced_template(self):\n pass",
"def test_get_subscription_templates(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_register_template(self):\n pass",
"def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)",
"def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)",
"def test_update_template_registration(self):\n pass",
"def test_template_name():\n for t in templates:\n assert len(t.name) > 0",
"def test_share_template_registration(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def get_available_templates_list():\n page = import_page.ImportPage()\n page.open()\n return page.open_download_template_modal().available_templates_list",
"def ListTemplates(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_hook_unregister(self):\n self.assertEqual(list(self.registry), [])\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.extension.shutdown()\n self.assertEqual(list(self.registry), [])",
"def list_templates(context):\n templates = get_oneoffixx_templates()\n template_group = context.REQUEST.form.get('form.widgets.template_group')\n terms = []\n\n for template in templates:\n terms.append(SimpleVocabulary.createTerm(\n template, template.template_id, template.title))\n\n # We filter templates when template_group has been selected\n if template_group is not None:\n favorites = get_oneoffixx_favorites()\n # Favorites are a special case\n if favorites and template_group[0] == favorites.get('id'):\n terms = [\n SimpleVocabulary.createTerm(\n OneOffixxTemplate(\n template, favorites.get('localizedName', '')),\n template.get('id'),\n template.get('localizedName'),\n )\n for template in favorites.get('templates')\n ]\n elif template_group[0] != '--NOVALUE--':\n terms = [term for term in terms if term.value.group == template_group[0]]\n\n return MutableObjectVocabulary(terms)",
"def templatelist(cls):\n return cls._templatelist",
"def test_unregister_all_from_event(self, unregister):\n mock_event = Mock()\n mock_event_2 = Mock()\n bus = event_bus._event_bus\n unregister_list = [Mock(), Mock()]\n bus._subscriptions[type(mock_event_2)] = [Mock(), Mock(), Mock()]\n bus._subscriptions[type(mock_event)] = unregister_list\n for sub_type in bus._subscriptions.keys():\n for subscription in bus._subscriptions[sub_type]:\n subscription.event_type = sub_type\n bus._registration_id_map[id(subscription)] = subscription\n\n event_bus.unregister_all(from_event=type(mock_event))\n\n self.assertEqual(unregister.call_count, len(unregister_list))\n for args, _ in unregister.call_args_list:\n subscription = args[0]\n self.assertTrue(subscription in unregister_list)",
"def template_list(self):\n return self.ezx.get_template_list()",
"def test_get_all_as_superuser_returns_all_templates(self):\n mock_request = create_mock_request(user=self.superuser)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 3)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))",
"def get_template_names(self):\n if self.template_name is None:\n raise ImproperlyConfigured(\n \"TemplateResponseMixin requires either a definition of \"\n \"'template_name' or an implementation of 'get_template_names()'\")\n else:\n return [self.template_name]"
] | [
"0.75481784",
"0.73308456",
"0.69799423",
"0.63553554",
"0.6333266",
"0.62794816",
"0.6273035",
"0.6190113",
"0.6148708",
"0.6106866",
"0.60849756",
"0.6065083",
"0.60300964",
"0.6014723",
"0.5821083",
"0.57576245",
"0.57576245",
"0.5705154",
"0.5694319",
"0.5686872",
"0.5639922",
"0.5615339",
"0.5613794",
"0.5595491",
"0.55281407",
"0.55103904",
"0.55051386",
"0.55025953",
"0.5490836",
"0.54866695"
] | 0.945489 | 0 |
Test case for list_virt_realms_in_cloud | def test_list_virt_realms_in_cloud(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_virtualization_realms(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def init_cloud_virtual_resources():\n test_cldvirt_resources = []\n\n # add info to list in memory, one by one, following signature values\n cldvirtres_ID = 1\n cldvirtres_name = \"nova-compute-1\"\n cldvirtres_info = \"nova VM in Arm pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 2\n cldvirtres_name = \"nova-compute-2\"\n cldvirtres_info = \"nova VM in LaaS\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [2,3]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n cldvirtres_ID = 3\n cldvirtres_name = \"nova-compute-3\"\n cldvirtres_info = \"nova VM in x86 pod\"\n cldvirtres_IPAddress = \"50.60.70.80\"\n cldvirtres_URL = \"http://50.60.70.80:8080\"\n cldvirtres_related_phys_rsrcIDs = [1]\n\n test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,\n cldvirtres_info,\n cldvirtres_IPAddress,\n cldvirtres_URL,\n cldvirtres_related_phys_rsrcIDs))\n\n\n # write list to binary file\n write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)\n\n return test_cldvirt_resources",
"def test_get_cloud_resources(self):\n pass",
"def test_ipam_vrfs_list(self):\n pass",
"def test_get_all_virtualservices(self,setup_suite):\n _, resp = get('virtualservice')\n vs_obj_list = resp['results']\n for vs_obj in vs_obj_list:\n logger.info(\" >>> VS Name: %s <<<\" % vs_obj['name'])",
"def step_list(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console list \"\n \"--resource-group {resourceGroup} --virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )",
"def list_machines(request):\n auth_context = auth_context_from_request(request)\n cloud_id = request.matchdict['cloud']\n # SEC get filtered resources based on auth_context\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n machines = methods.filter_list_machines(auth_context, cloud_id)\n\n if cloud.machine_count != len(machines):\n try:\n tasks.update_machine_count.delay(\n auth_context.owner.id, cloud_id, len(machines))\n except Exception as e:\n log.error('Cannot update machine count for user %s: %r' %\n (auth_context.owner.id, e))\n\n return machines",
"def list_realms(self, realm=None):\n uri = self._uri_realm_creator(realm=realm, uri='realms?_queryFilter=true')\n data = self._get(uri=uri, headers=self.headers)\n if data.status_code == 200:\n return data.json()\n else:\n return False",
"def test_list_hosts(self):\n hosts = os_hosts._list_hosts(self.req)\n self.assertEqual(hosts, LIST_RESPONSE)\n\n cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')\n expected = [host for host in LIST_RESPONSE\n if host['service'] == 'cinder-volume']\n self.assertEqual(cinder_hosts, expected)",
"def test_get_cloud(self):\n pass",
"def test_get_virtual_account_clients(self):\n pass",
"def list(self, args):\n try:\n cloud = self._context.getCloudService()\n vdcs = cloud.listVirtualDatacenters()\n pprint_vdcs(vdcs)\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()",
"def test_enable_virt_realm_remote_access(self):\n pass",
"def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)",
"def test_show_vcs_resources(mgmt_session):\n vcs_resource = rift.vcs.vcs.VcsResource(mgmt_session)\n vcs_resource_info = None\n\n # Get vcs resources\n vcs_resource_info = vcs_resource.get_vcs_resource()\n\n # Verify there are VM entries in the vcs resource info container\n vms = [vm for vm in vcs_resource_info.vm]\n if len(vms) == 0:\n raise AssertionError(\"No entries found in vcs resource info\")",
"def list_objects(remote):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_ListObjects()\n remote.runCommand(cmd1)\n objects = mmapi.vectori()\n cmd1.GetSceneCommandResult_ListObjects(key1, objects)\n return vectori_to_list(objects)",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def list_distributed_cameras(ns_host=None, metadata=None):\n with get_running_nameserver() as name_server:\n camera_uris = name_server.yplookup(meta_all=metadata)\n camera_uris = {k: v[0] for k, v in camera_uris.items()}\n logger.debug(f\"Found {len(camera_uris)} cameras on name server.\")\n return camera_uris",
"def list_distributed_cameras(ns_host=None, metadata=None):\n with get_running_nameserver() as name_server:\n camera_uris = name_server.yplookup(meta_all=metadata)\n camera_uris = {k: v[0] for k, v in camera_uris.items()}\n logger.debug(f\"Found {len(camera_uris)} cameras on name server.\")\n return camera_uris",
"def list_vms(connection: str = None) -> list:\n with libvirt.open(connection) as conn:\n return conn.listAllDomains()",
"def test_aws_service_api_vms_get(self):\n pass"
] | [
"0.7485598",
"0.7401586",
"0.6892124",
"0.65846014",
"0.65820247",
"0.649466",
"0.63773483",
"0.6369368",
"0.6039481",
"0.5988828",
"0.5881167",
"0.5849201",
"0.5798573",
"0.5791442",
"0.57859766",
"0.57586366",
"0.5661851",
"0.5630986",
"0.5615009",
"0.5587836",
"0.55593014",
"0.553802",
"0.55370843",
"0.5515579",
"0.55127144",
"0.55103475",
"0.54895544",
"0.54895544",
"0.547787",
"0.5476147"
] | 0.9607347 | 0 |
Test case for list_virtualization_realm_templates | def test_list_virtualization_realm_templates(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_register_virtualization_realm(self):\n pass",
"def test_get_virtualization_realms(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_list_template_registrations(self):\n pass",
"def test_invalidate_template_cache_in_virtualization_realm(self):\n pass",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_list_template_for_all_namespaces(self):\n pass",
"def test_list_namespaced_template(self):\n pass",
"def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_list_unregistered_templates(self):\n pass",
"def test_admin_sms_template_view_list(self):\n response = self.client.get('/admin/sms_module/smstemplate/')\n self.failUnlessEqual(response.status_code, 200)",
"def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids",
"def test_get_virtual_accounts(self):\n pass",
"def test_get_device_templates(self):\n pass",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def test_remove_virt_realm(self):\n pass",
"def list_templates(self):\n raise NotImplementedError()",
"def test_update_virt_realm(self):\n pass",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_ipam_vrfs_list(self):\n pass",
"def test_vault_get_all_vault_sections(self):\n pass",
"def test_get_project_virt_realms(self):\n pass"
] | [
"0.8621017",
"0.7722742",
"0.74632126",
"0.69660634",
"0.67916626",
"0.67195785",
"0.6663076",
"0.64468944",
"0.6398868",
"0.62883246",
"0.628213",
"0.6210438",
"0.61632943",
"0.61375594",
"0.61226946",
"0.6101096",
"0.60812944",
"0.60292757",
"0.59332967",
"0.591286",
"0.5905461",
"0.5875233",
"0.58594",
"0.5845774",
"0.5802666",
"0.5798125",
"0.5788302",
"0.57788795",
"0.5750947",
"0.57404673"
] | 0.94661987 | 0 |
Test case for perform_host_action | def test_perform_host_action(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def host_action(self, host, action):\n url = '/os-hosts/{0}/{1}'.format(host, action)\n return self._get(url, response_key='host')",
"def test_execute_host_maintenance(self):\n self.addCleanup(self.rollback_compute_nodes_status)\n instances = self._create_one_instance_per_host_with_statistic()\n hostname = instances[0].get('OS-EXT-SRV-ATTR:hypervisor_hostname')\n audit_parameters = {\"maintenance_node\": hostname}\n\n _, goal = self.client.show_goal(self.GOAL)\n _, strategy = self.client.show_strategy(\"host_maintenance\")\n _, audit_template = self.create_audit_template(\n goal['uuid'], strategy=strategy['uuid'])\n\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_action_plans_finished),\n duration=600,\n sleep_for=2\n ))\n\n _, audit = self.create_audit(\n audit_template['uuid'], parameters=audit_parameters)\n\n try:\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_audit_finished, audit['uuid']),\n duration=600,\n sleep_for=2\n ))\n except ValueError:\n self.fail(\"The audit has failed!\")\n\n _, finished_audit = self.client.show_audit(audit['uuid'])\n if finished_audit.get('state') in ('FAILED', 'CANCELLED'):\n self.fail(\"The audit ended in unexpected state: %s!\" %\n finished_audit.get('state'))\n\n _, action_plans = self.client.list_action_plans(\n audit_uuid=audit['uuid'])\n action_plan = action_plans['action_plans'][0]\n\n _, action_plan = self.client.show_action_plan(action_plan['uuid'])\n _, action_list = self.client.list_actions(\n action_plan_uuid=action_plan[\"uuid\"])\n\n if action_plan['state'] in ('SUPERSEDED', 'SUCCEEDED'):\n # This means the action plan is superseded so we cannot trigger it,\n # or it is empty.\n return\n for action in action_list['actions']:\n self.assertEqual('PENDING', action.get('state'))\n\n # Execute the action by changing its state to PENDING\n _, updated_ap = self.client.start_action_plan(action_plan['uuid'])\n\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_action_plan_finished, action_plan['uuid']),\n duration=600,\n sleep_for=2\n ))\n _, finished_ap = self.client.show_action_plan(action_plan['uuid'])\n _, action_list = self.client.list_actions(\n action_plan_uuid=finished_ap[\"uuid\"])\n self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))\n self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))\n\n for action in action_list['actions']:\n self.assertEqual('SUCCEEDED', action.get('state'))",
"async def execute_host(self):\n return True",
"def test_get_host_access(self):\n pass",
"def run_on_host(self, *args, **kwargs) -> Any:\n raise NotImplementedError",
"def action(self, action):\n\n if action not in self.VALID_ACTIONS:\n raise ValueError('Invalid action: {0}'.format(action))\n\n data = self.request_params\n data.update({\n 'action': action,\n 'status': 'true'\n })\n\n response = requests.post(\n self.vendor.endpoint + self._ENDPOINT, data=data)\n if response.status_code != requests.codes.ok or \\\n '<status>success</status>' not in response.text:\n raise RuntimeError(\n 'Unable to {0} host: {1}'.format(action, response.text))",
"def execute_action(self, agent, action):\n abstract",
"def device_action(host, details, action):\n if details:\n pprint(cs.get_device_details(host))\n if action:\n pprint(cs.device_action(host, action))",
"def test_execute_deployment(self):\n pass",
"def host_power_action(self, host, action):\n return action",
"def host_power_action(self, host, action):\n return action",
"def perform_action(self, action_data):\n pass",
"def execute_action(self, agent, action):\n raise NotImplementedError",
"def execute_action(self, agent, action):\n raise NotImplementedError",
"async def perform_action(self) -> None:",
"def test_get_host(self):\n pass",
"def _do_action(self):\n pass",
"def _do_action(self):\n pass",
"def test_swact_fails_when_host_query_fails(self):\n\n # mock the get_host query is empty and raises an exception\n self.sysinv_client.get_host.side_effect = \\\n Exception(\"Unable to find host controller-0\")\n\n # invoke the strategy state operation on the orch thread\n self.worker.perform_state_action(self.strategy_step)\n\n # verify the swact command was never attempted\n self.sysinv_client.swact_host.assert_not_called()\n\n # verify that the state moves to the next state\n self.assert_step_updated(self.strategy_step.subcloud_id,\n consts.STRATEGY_STATE_FAILED)",
"def test_process_host_commands(self):\n\n command = [\"df\", \"-h\"]\n output = run(verification.process_host_commands(command))\n self.assertTrue(\"```\\nThat command is not available.```\" not in output)\n\n command = [\"ls\", \"-la\"]\n output = run(verification.process_host_commands(command))\n self.assertEqual(\"```\\nThat command is not available.```\", output)",
"def post(self, request, *args, **kwargs): # NOQA\n\n stack = self.get_object()\n\n if stack.status not in models.Stack.SAFE_STATES:\n raise BadRequest('You may not perform an action while the '\n 'stack is in its current state.')\n\n driver_hosts_map = stack.get_driver_hosts_map()\n total_host_count = len(stack.get_hosts().exclude(instance_id=''))\n action = request.DATA.get('action', None)\n args = request.DATA.get('args', [])\n\n if not action:\n raise BadRequest('action is a required parameter.')\n\n # check the individual provider for available actions\n for driver, hosts in driver_hosts_map.iteritems():\n available_actions = driver.get_available_actions()\n if action not in available_actions:\n raise BadRequest('At least one of the hosts in this stack '\n 'does not support the requested action.')\n\n # All actions other than launch require hosts to be available\n if action != BaseCloudProvider.ACTION_LAUNCH and total_host_count == 0:\n raise BadRequest('The submitted action requires the stack to have '\n 'available hosts. Perhaps you meant to run the '\n 'launch action instead.')\n\n # Hosts may be spread accross different providers, so we need to\n # handle them differently based on the provider and its implementation\n driver_hosts_map = stack.get_driver_hosts_map()\n for driver, hosts in driver_hosts_map.iteritems():\n\n # check the action against current states (e.g., starting can't\n # happen unless the hosts are in the stopped state.)\n # XXX: Assuming that host metadata is accurate here\n for host in hosts:\n if action == driver.ACTION_START and \\\n host.state != driver.STATE_STOPPED:\n raise BadRequest('Start action requires all hosts to be '\n 'in the stopped state first. At least '\n 'one host is reporting an invalid state: '\n '{0}'.format(host.state))\n if action == driver.ACTION_STOP and \\\n host.state != driver.STATE_RUNNING:\n raise BadRequest('Stop action requires all hosts to be in '\n 'the running state first. At least one '\n 'host is reporting an invalid state: '\n '{0}'.format(host.state))\n if action == driver.ACTION_TERMINATE and \\\n host.state not in (driver.STATE_RUNNING,\n driver.STATE_STOPPED):\n raise BadRequest('Terminate action requires all hosts to '\n 'be in the either the running or stopped '\n 'state first. At least one host is '\n 'reporting an invalid state: {0}'\n .format(host.state))\n if (\n action == driver.ACTION_PROVISION or\n action == driver.ACTION_ORCHESTRATE or\n action == driver.ACTION_CUSTOM\n ) and host.state not in (driver.STATE_RUNNING,):\n raise BadRequest(\n 'Provisioning actions require all hosts to be in the '\n 'running state first. At least one host is reporting '\n 'an invalid state: {0}'.format(host.state))\n\n # Kick off the celery task for the given action\n stack.set_status(models.Stack.EXECUTING_ACTION,\n models.Stack.PENDING,\n 'Stack is executing action \\'{0}\\''.format(action))\n\n if action == BaseCloudProvider.ACTION_CUSTOM:\n\n task_list = []\n\n action_ids = []\n\n for command in args:\n action = models.StackAction(stack=stack)\n action.host_target = command['host_target']\n action.command = command['command']\n action.type = BaseCloudProvider.ACTION_CUSTOM\n action.start = datetime.now()\n action.save()\n\n action_ids.append(action.id)\n\n task_list.append(tasks.custom_action.si(\n action.id,\n command['host_target'],\n command['command']\n ))\n\n task_chain = reduce(or_, task_list)\n\n task_chain()\n\n ret = {\n \"results_urls\": []\n }\n\n for action_id in action_ids:\n ret['results_urls'].append(reverse(\n 'stackaction-detail',\n kwargs={\n 'pk': action_id,\n },\n request=request\n ))\n\n return Response(ret)\n\n # Keep track of the tasks we need to run for this execution\n task_list = []\n\n # FIXME: not generic\n if action in (BaseCloudProvider.ACTION_STOP,\n BaseCloudProvider.ACTION_TERMINATE):\n # Unregister DNS when executing the above actions\n task_list.append(tasks.unregister_dns.si(stack.id))\n\n # Launch is slightly different than other actions\n if action == BaseCloudProvider.ACTION_LAUNCH:\n task_list.append(tasks.launch_hosts.si(stack.id))\n task_list.append(tasks.update_metadata.si(stack.id))\n task_list.append(tasks.cure_zombies.si(stack.id))\n\n # Terminate should leverage salt-cloud or salt gets confused about\n # the state of things\n elif action == BaseCloudProvider.ACTION_TERMINATE:\n task_list.append(\n tasks.destroy_hosts.si(stack.id,\n delete_hosts=False,\n delete_security_groups=False)\n )\n\n elif action in (BaseCloudProvider.ACTION_PROVISION,\n BaseCloudProvider.ACTION_ORCHESTRATE,):\n # action that gets handled later\n pass\n\n # Execute other actions that may be available on the driver\n else:\n task_list.append(tasks.execute_action.si(stack.id, action, *args))\n\n # Update the metadata after the action has been executed\n if action != BaseCloudProvider.ACTION_TERMINATE:\n task_list.append(tasks.update_metadata.si(stack.id))\n\n # Launching requires us to tag the newly available infrastructure\n if action in (BaseCloudProvider.ACTION_LAUNCH,):\n tasks.tag_infrastructure.si(stack.id)\n\n # Starting and launching requires DNS updates\n if action in (BaseCloudProvider.ACTION_START,\n BaseCloudProvider.ACTION_LAUNCH):\n task_list.append(tasks.register_dns.si(stack.id))\n\n # starting, launching, or reprovisioning requires us to execute the\n # provisioning tasks\n if action in (BaseCloudProvider.ACTION_START,\n BaseCloudProvider.ACTION_LAUNCH,\n BaseCloudProvider.ACTION_PROVISION,\n BaseCloudProvider.ACTION_ORCHESTRATE):\n task_list.append(tasks.ping.si(stack.id))\n task_list.append(tasks.sync_all.si(stack.id))\n\n if action in (BaseCloudProvider.ACTION_START,\n BaseCloudProvider.ACTION_LAUNCH,\n BaseCloudProvider.ACTION_PROVISION):\n task_list.append(tasks.highstate.si(stack.id))\n task_list.append(tasks.global_orchestrate.si(stack.id))\n task_list.append(tasks.orchestrate.si(stack.id))\n\n if action == BaseCloudProvider.ACTION_ORCHESTRATE:\n task_list.append(tasks.orchestrate.si(stack.id, 2))\n\n task_list.append(tasks.finish_stack.si(stack.id))\n\n # chain together our tasks using the bitwise or operator\n task_chain = reduce(or_, task_list)\n\n # execute the chain\n task_chain()\n\n # Update all host states\n stack.get_hosts().update(state='actioning')\n\n serializer = self.get_serializer(stack)\n return Response(serializer.data)",
"def handle_host(self, host):\n LOG.info('FakeHandler: handle host %s' % host)",
"def perform_action(self, action_id: int) -> None:\r\n ...",
"def _run_test(self, host, path_info='/', cookie_dict=None, action=None,\n set_email=None, set_admin=None, continue_url=None,\n method='GET'):\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n # The SERVER_NAME should never be used by the login module -- always defer\n # to the HTTP Host (so the user is not redirected to a different domain).\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join(m.OutputString() for m in cookie.values())\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict((k.lower(), v)\n for (k, v) in headers)\n\n login.application(environ, start_response)\n\n return (response_dict['status'],\n response_dict['headers'].get('location'),\n response_dict['headers'].get('set-cookie'),\n response_dict['headers'].get('content-type'))",
"def test_profile_action():\n CalmTask.Exec.ssh(name=\"Task5\", script='echo \"Hello\"', target=ref(AhvVmService))",
"def test(cls, hostname):\n pass",
"def test_download_host(self):\n pass",
"def test_perform_action(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Create a dummy event and get it back.\n event_id = boilerplate.createEvent(context)\n event = repo.LookupActivityEvent()(event_id)\n\n # Check we're allowed.\n state_changer = request.state_changer\n with transaction.manager:\n bm.Session.add(context)\n self.assertTrue(state_changer.can_perform(context, a.START))\n context_id = context.id\n\n # Perform the action.\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n context = model.Model.query.get(context_id)\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n _ = state_changer.perform(context, a.START, event)\n status = context.work_status.value\n\n # The context is now in the configured state.\n self.assertEqual(status, s.STARTED)",
"def test_post_accepts_known_host(self, publish_mock: mock.Mock) -> None:\n\n def side_effect(*args: str, **_: str) -> Any:\n if args[0] == \"registry:first:value\":\n return [\"00:00:00:00:00\"]\n if args[0] == \"app_url\":\n return [\"/\"]\n if args[0] == \"jinja:render\":\n return [\"\"]\n return mock.DEFAULT\n\n publish_mock.side_effect = side_effect\n\n response = self.request(\"/\", method=\"POST\", host=\"host1\")\n\n self.assertEqual(response.code, 303)",
"def test_user_actions_post(self):\n pass"
] | [
"0.7159185",
"0.6891614",
"0.67042726",
"0.66250044",
"0.6416423",
"0.63644123",
"0.6353317",
"0.6332438",
"0.6328068",
"0.6314844",
"0.6314844",
"0.6277904",
"0.62587076",
"0.62587076",
"0.6244839",
"0.6232687",
"0.616714",
"0.616714",
"0.6150289",
"0.6057667",
"0.6020432",
"0.59756637",
"0.5969495",
"0.59137034",
"0.5904419",
"0.5884962",
"0.5872753",
"0.58610684",
"0.5848693",
"0.58256185"
] | 0.9437831 | 0 |
Test case for publish_deployment_run | def test_publish_deployment_run(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_execute_deployment(self):\n pass",
"def test_release_deployment_run(self):\n pass",
"def test_get_deployment_run(self):\n pass",
"def test_get_deployment_runs(self):\n pass",
"def test_get_deployment_runs1(self):\n pass",
"def test_create_deployment(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def test_retest_deployment_run(self):\n pass",
"def test_create_deployment_entire(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_update_deployment(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def test_get_deployment_run_reports(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def test_download_deployment_run_test_report(self):\n pass",
"def deploy():",
"def test_get_deployments(self):\n pass",
"def test_get_deployments(self):\n pass",
"def test_get_deployment_resources(self):\n pass",
"def testPublish(self):\n\n analysisDir = self.reqarea\n\n # Missing required -d option\n expRes = CommandResult(1, 'ERROR: Task option is required')\n pub = publish(self.logger, self.maplistopt + [\"-u\", 'http:/somewhere.com/'])\n res = pub()\n self.assertEquals(expRes, res)\n\n # Missing required -u option\n expRes = CommandResult(1, 'ERROR: DBS URL option is required')\n pub = publish(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = pub()\n self.assertEquals(expRes, res)\n\n # Correct command\n expRes = CommandResult(0, '')\n pub = publish(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-u\", 'http:/somewhere.com/'])\n res = pub()\n self.assertEquals(expRes, res)\n\n return",
"def _run_ci_publish():\n _run_install(False)\n _run_tests(False)\n _run_publish(True)",
"def deploy(parameters):\n\n print(\"In deploy module\")",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def test_get_deployment_resource(self):\n pass",
"def test_clone_deployment(self):\n pass",
"def test_archive_run(self):\n pass",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())"
] | [
"0.8526962",
"0.8483117",
"0.83470637",
"0.79084",
"0.7902565",
"0.78378445",
"0.77951825",
"0.7778999",
"0.76014686",
"0.7455409",
"0.7455409",
"0.74202234",
"0.7324894",
"0.7178446",
"0.7171523",
"0.6879278",
"0.68773216",
"0.6827293",
"0.6827293",
"0.68041193",
"0.6782757",
"0.675873",
"0.6717203",
"0.66793436",
"0.66698223",
"0.663074",
"0.65452653",
"0.6480026",
"0.6414756",
"0.6414756"
] | 0.94700634 | 0 |
Test case for publish_scenario_to_composition | def test_publish_scenario_to_composition(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_launch_composition(self):\n pass",
"def test_composition(self):",
"def test_update_composition(self):\n pass",
"def test_get_composition(self):\n pass",
"def test_publish_deployment_run(self):\n pass",
"def test_stream_publish(self):\n pass",
"def test_create_scenario(self):\n pass",
"def test_create_scenario1(self):\n pass",
"def test_composition_adds_to_100_percent(self):",
"def test_list_compositions(self):\n pass",
"def test_list_composition_status(self):\n pass",
"def ConstructStage(self):\n raise NotImplementedError(self, \"ConstructStage: Implement in your test\")",
"def test_setup_succeeds(self):\n assert self.add_statestream(base_topic='pub')",
"def test_get_scenarios(self):\n pass",
"def test_execute_deployment(self):\n pass",
"def test_deploy_workflow_definition(self):\n pass",
"def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)",
"def test_delete_composition(self):\n pass",
"def test_get_scenario(self):\n pass",
"def test_workflows_post(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def test_workflows_change_stream_post(self):\n pass",
"def test_create_deployment(self):\n pass",
"def test_exchange_publish(self):\n self.exchange_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.exchange_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to exchange', cm.output[8])",
"def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)",
"def test_create_deployment_entire(self):\n pass",
"def test_create_part(self):\n pass",
"def test_create_activity(self):\n pass",
"def test_durable_exchange_publish(self):\n self.durable_exchange_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.durable_exchange_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to exchange', cm.output[8])",
"def test_process_subscriptions(self):\n pass"
] | [
"0.7309759",
"0.71597904",
"0.661634",
"0.6569027",
"0.65529925",
"0.64002776",
"0.6281115",
"0.6125292",
"0.59854794",
"0.59726113",
"0.59433573",
"0.5917493",
"0.5877703",
"0.58758044",
"0.58738774",
"0.5857569",
"0.5840035",
"0.5765939",
"0.57415116",
"0.5739839",
"0.5725684",
"0.57229614",
"0.5711771",
"0.56962943",
"0.56938106",
"0.5668656",
"0.56661785",
"0.5633835",
"0.5601714",
"0.56010884"
] | 0.93706095 | 0 |
Test case for quick_build | def test_quick_build(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_quick_build1(self):\n pass",
"def test_build_model(arguments):\n ...",
"def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()",
"def info_build_test(self):\n\n self._export(\"H0\", \"0.1\")\n\n self._export(\"H1a\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n self._export(\"H1b\", \"0.1\", deps=[\"H0/0.1@lu/st\"])\n self._export(\"H1c\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n\n self._export(\"H2a\", \"0.1\", deps=[\"H1a/0.1@lu/st\"])\n self._export(\"H2c\", \"0.1\", deps=[\"H1c/0.1@lu/st\"])\n\n self._export(\"H3\", \"0.1\", deps=[\"H2a/0.1@lu/st\",\n \"H2c/0.1@lu/st\"])\n\n # If we install H3 we need to build all except H1b\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # If we install H0 we need to build nothing (current project)\n self.clients[\"H0\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H0\"], \"\")\n\n # If we install H0 we need to build H0\n self.clients[\"H1a\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H1a\"], \"H0/0.1@lu/st\")\n\n # If we build and upload H1a and H1c, no more H0 (private) is required\n self.clients[\"H3\"].run(\"install H1a/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"install H1c/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"upload H1a/0.1@lu/st --all\")\n self.clients[\"H3\"].run(\"upload H1c/0.1@lu/st --all\")\n\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But if we force to build all, all nodes have to be built\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # Now upgrade the recipe H1a and upload it (but not the package)\n # so the package become outdated\n conanfile_path = os.path.join(self.clients[\"H1a\"].current_folder, CONANFILE)\n conanfile = load(conanfile_path)\n conanfile += \"\\n# MODIFIED\"\n save(conanfile_path, conanfile)\n self.clients[\"H1a\"].run(\"export lu/st\")\n self.clients[\"H1a\"].run(\"upload H1a/0.1@lu/st\") # NOW IS OUTDATED!\n\n # Without build outdated the built packages are the same\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But with build outdated we have to build the private H0 (but only once) and H1a\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build outdated\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")",
"def test_build(self):\n self.app.build()",
"def test_main(self):\n self.createFakeSphinxProject()\n self.builder.main([self.sphinxDir.parent().path])\n self.verifyBuilt()",
"def test_build(self):\n version = \"1.2.3\"\n input1, output1 = self.getArbitraryLoreInputAndOutput(version)\n input2, output2 = self.getArbitraryLoreInputAndOutput(version)\n\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.howtoDir.child(\"two.xhtml\").setContent(input2)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile)\n out1 = self.howtoDir.child('one.html')\n out2 = self.howtoDir.child('two.html')\n self.assertXMLEqual(out1.getContent(), output1)\n self.assertXMLEqual(out2.getContent(), output2)",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"",
"def test_build(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n\n projectName = \"Foobar\"\n packageName = \"quux\"\n projectURL = \"scheme:project\"\n sourceURL = \"scheme:source\"\n docstring = \"text in docstring\"\n privateDocstring = \"should also appear in output\"\n\n inputPath = FilePath(self.mktemp()).child(packageName)\n inputPath.makedirs()\n inputPath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '%s'\\n\"\n \"def _bar():\\n\"\n \" '%s'\" % (docstring, privateDocstring))\n\n outputPath = FilePath(self.mktemp())\n outputPath.makedirs()\n\n builder = APIBuilder()\n builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n self.assertTrue(\n indexPath.exists(),\n \"API index %r did not exist.\" % (outputPath.path,))\n self.assertIn(\n '<a href=\"%s\">%s</a>' % (projectURL, projectName),\n indexPath.getContent(),\n \"Project name/location not in file contents.\")\n\n quuxPath = outputPath.child(\"quux.html\")\n self.assertTrue(\n quuxPath.exists(),\n \"Package documentation file %r did not exist.\" % (quuxPath.path,))\n self.assertIn(\n docstring, quuxPath.getContent(),\n \"Docstring not in package documentation file.\")\n self.assertIn(\n '<a href=\"%s/%s\">View Source</a>' % (sourceURL, packageName),\n quuxPath.getContent())\n self.assertIn(\n '<a href=\"%s/%s/__init__.py#L1\" class=\"functionSourceLink\">' % (\n sourceURL, packageName),\n quuxPath.getContent())\n self.assertIn(privateDocstring, quuxPath.getContent())\n\n # There should also be a page for the foo function in quux.\n self.assertTrue(quuxPath.sibling('quux.foo.html').exists())\n\n self.assertEqual(stdout.getvalue(), '')",
"def test_build(self):\n stdout = BytesIO()\n self.patch(sys, \"stdout\", stdout)\n\n projectName = \"Foobar\"\n packageName = \"quux\"\n projectURL = \"scheme:project\"\n sourceURL = \"scheme:source\"\n docstring = \"text in docstring\"\n privateDocstring = \"should also appear in output\"\n\n inputPath = FilePath(self.mktemp()).child(packageName)\n inputPath.makedirs()\n inputPath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '{}'\\n\"\n \"def _bar():\\n\"\n \" '{}'\".format(docstring, privateDocstring).encode()\n )\n\n outputPath = FilePath(self.mktemp())\n\n builder = APIBuilder()\n builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n\n self.assertTrue(\n indexPath.exists(), \"API index {!r} did not exist.\".format(outputPath.path)\n )\n self.assertIn(\n '<a href=\"{}\">{}</a>'.format(projectURL, projectName),\n indexPath.getContent().decode(),\n \"Project name/location not in file contents.\",\n )\n\n quuxPath = outputPath.child(\"quux.html\")\n self.assertTrue(\n quuxPath.exists(),\n \"Package documentation file {!r} did not exist.\".format(quuxPath.path),\n )\n self.assertIn(\n docstring,\n quuxPath.getContent().decode(),\n \"Docstring not in package documentation file.\",\n )\n self.assertIn(\n '<a href=\"{}/{}/__init__.py\">(source)</a>'.format(sourceURL, packageName),\n quuxPath.getContent().decode(),\n )\n self.assertIn(\n '<a class=\"functionSourceLink\" href=\"%s/%s/__init__.py#L1\">'\n % (sourceURL, packageName),\n quuxPath.getContent().decode(),\n )\n self.assertIn(privateDocstring, quuxPath.getContent().decode())\n\n self.assertEqual(stdout.getvalue(), b\"\")",
"def build():",
"def build(parameters):\n\n\n print(\"In Build module\")",
"def build(_):",
"def test_valid_basic_build():\n config = load_json_fixture(\"basic-build-config.json\")\n\n vd.SCHEMA_BUILD_CONFIG(config)",
"def test_build_using_custom_builder(cli, build_resources):\n books, _ = build_resources\n config = books.joinpath(\"config_custombuilder\")\n result = cli.invoke(\n commands.build,\n [\n config.as_posix(),\n \"--builder=custom\",\n \"--custom-builder=mycustombuilder\",\n \"-n\",\n \"-W\",\n \"--keep-going\",\n ],\n )\n assert result.exit_code == 0, result.output\n html = config.joinpath(\"_build\", \"mycustombuilder\", \"index.html\").read_text(\n encoding=\"utf8\"\n )\n assert '<p class=\"title logo__title\">TEST PROJECT NAME</p>' in html\n assert '<link rel=\"stylesheet\" type=\"text/css\" href=\"_static/mycss.css\" />' in html\n assert '<script src=\"_static/js/myjs.js\"></script>' in html",
"def check():\n cmake('tests')\n docker('./{build}/tests', build=BUILD)",
"def test_testutils():\n build()\n sh(\"%s psutil\\\\tests\\\\test_testutils.py\" % PYTHON)",
"def test_create_namespaced_build(self):\n pass",
"def pre_build(self):",
"def getBuild():",
"def test_install_build_single(build_all):\n build_all.run(\"install --requires=foobar/1.0@user/testing --build=foo/*\")\n build_all.assert_listed_binary({\"bar/1.0@user/testing\": (bar_id, \"Cache\"),\n \"foo/1.0@user/testing\": (foo_id, \"Build\"),\n \"foobar/1.0@user/testing\": (foobar_id, \"Cache\"),\n })\n assert \"foo/1.0@user/testing: Forced build from source\" in build_all.out\n assert \"bar/1.0@user/testing: Forced build from source\" not in build_all.out\n assert \"foobar/1.0@user/testing: Forced build from source\" not in build_all.out\n assert \"No package matching\" not in build_all.out",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_full(build=DEVELOPMENT, **kwargs):\n optimize(build)\n test('slk', build[:build.rfind('.w3x')], **kwargs)",
"def test_generate_all_testing(self):\n pass",
"def tests():",
"def test_basic_execution(self):",
"def test_failToBuild(self):\n # note no fake sphinx project is created\n self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)",
"def run_quick(session):\n run_tests(session)\n run_doctests(session)"
] | [
"0.9178004",
"0.7234953",
"0.7011216",
"0.6978222",
"0.6942303",
"0.6799926",
"0.6792072",
"0.6701608",
"0.6653364",
"0.658661",
"0.6564973",
"0.6511957",
"0.6503971",
"0.64864445",
"0.64777625",
"0.6362981",
"0.63470364",
"0.6344338",
"0.63411856",
"0.6294657",
"0.6292472",
"0.6284615",
"0.6284615",
"0.6284615",
"0.6282891",
"0.62693834",
"0.625931",
"0.62530667",
"0.62051964",
"0.6180592"
] | 0.9337133 | 0 |
Test case for quick_build1 | def test_quick_build1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_quick_build(self):\n pass",
"def info_build_test(self):\n\n self._export(\"H0\", \"0.1\")\n\n self._export(\"H1a\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n self._export(\"H1b\", \"0.1\", deps=[\"H0/0.1@lu/st\"])\n self._export(\"H1c\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n\n self._export(\"H2a\", \"0.1\", deps=[\"H1a/0.1@lu/st\"])\n self._export(\"H2c\", \"0.1\", deps=[\"H1c/0.1@lu/st\"])\n\n self._export(\"H3\", \"0.1\", deps=[\"H2a/0.1@lu/st\",\n \"H2c/0.1@lu/st\"])\n\n # If we install H3 we need to build all except H1b\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # If we install H0 we need to build nothing (current project)\n self.clients[\"H0\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H0\"], \"\")\n\n # If we install H0 we need to build H0\n self.clients[\"H1a\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H1a\"], \"H0/0.1@lu/st\")\n\n # If we build and upload H1a and H1c, no more H0 (private) is required\n self.clients[\"H3\"].run(\"install H1a/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"install H1c/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"upload H1a/0.1@lu/st --all\")\n self.clients[\"H3\"].run(\"upload H1c/0.1@lu/st --all\")\n\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But if we force to build all, all nodes have to be built\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # Now upgrade the recipe H1a and upload it (but not the package)\n # so the package become outdated\n conanfile_path = os.path.join(self.clients[\"H1a\"].current_folder, CONANFILE)\n conanfile = load(conanfile_path)\n conanfile += \"\\n# MODIFIED\"\n save(conanfile_path, conanfile)\n self.clients[\"H1a\"].run(\"export lu/st\")\n self.clients[\"H1a\"].run(\"upload H1a/0.1@lu/st\") # NOW IS OUTDATED!\n\n # Without build outdated the built packages are the same\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But with build outdated we have to build the private H0 (but only once) and H1a\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build outdated\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")",
"def test_build_model(arguments):\n ...",
"def test_build(self):\n version = \"1.2.3\"\n input1, output1 = self.getArbitraryLoreInputAndOutput(version)\n input2, output2 = self.getArbitraryLoreInputAndOutput(version)\n\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.howtoDir.child(\"two.xhtml\").setContent(input2)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile)\n out1 = self.howtoDir.child('one.html')\n out2 = self.howtoDir.child('two.html')\n self.assertXMLEqual(out1.getContent(), output1)\n self.assertXMLEqual(out2.getContent(), output2)",
"def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()",
"def test_main(self):\n self.createFakeSphinxProject()\n self.builder.main([self.sphinxDir.parent().path])\n self.verifyBuilt()",
"def test_build(self):\n self.app.build()",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"",
"def build(parameters):\n\n\n print(\"In Build module\")",
"def test_T01():",
"def build():",
"def build(_):",
"def test_build(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n\n projectName = \"Foobar\"\n packageName = \"quux\"\n projectURL = \"scheme:project\"\n sourceURL = \"scheme:source\"\n docstring = \"text in docstring\"\n privateDocstring = \"should also appear in output\"\n\n inputPath = FilePath(self.mktemp()).child(packageName)\n inputPath.makedirs()\n inputPath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '%s'\\n\"\n \"def _bar():\\n\"\n \" '%s'\" % (docstring, privateDocstring))\n\n outputPath = FilePath(self.mktemp())\n outputPath.makedirs()\n\n builder = APIBuilder()\n builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n self.assertTrue(\n indexPath.exists(),\n \"API index %r did not exist.\" % (outputPath.path,))\n self.assertIn(\n '<a href=\"%s\">%s</a>' % (projectURL, projectName),\n indexPath.getContent(),\n \"Project name/location not in file contents.\")\n\n quuxPath = outputPath.child(\"quux.html\")\n self.assertTrue(\n quuxPath.exists(),\n \"Package documentation file %r did not exist.\" % (quuxPath.path,))\n self.assertIn(\n docstring, quuxPath.getContent(),\n \"Docstring not in package documentation file.\")\n self.assertIn(\n '<a href=\"%s/%s\">View Source</a>' % (sourceURL, packageName),\n quuxPath.getContent())\n self.assertIn(\n '<a href=\"%s/%s/__init__.py#L1\" class=\"functionSourceLink\">' % (\n sourceURL, packageName),\n quuxPath.getContent())\n self.assertIn(privateDocstring, quuxPath.getContent())\n\n # There should also be a page for the foo function in quux.\n self.assertTrue(quuxPath.sibling('quux.foo.html').exists())\n\n self.assertEqual(stdout.getvalue(), '')",
"def test_valid_basic_build():\n config = load_json_fixture(\"basic-build-config.json\")\n\n vd.SCHEMA_BUILD_CONFIG(config)",
"def pre_build(self):",
"def test_generate_all_testing(self):\n pass",
"def test_1():",
"def test_T1():",
"def test_T1():",
"def getBuild():",
"def test_build(self):\n stdout = BytesIO()\n self.patch(sys, \"stdout\", stdout)\n\n projectName = \"Foobar\"\n packageName = \"quux\"\n projectURL = \"scheme:project\"\n sourceURL = \"scheme:source\"\n docstring = \"text in docstring\"\n privateDocstring = \"should also appear in output\"\n\n inputPath = FilePath(self.mktemp()).child(packageName)\n inputPath.makedirs()\n inputPath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '{}'\\n\"\n \"def _bar():\\n\"\n \" '{}'\".format(docstring, privateDocstring).encode()\n )\n\n outputPath = FilePath(self.mktemp())\n\n builder = APIBuilder()\n builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n\n self.assertTrue(\n indexPath.exists(), \"API index {!r} did not exist.\".format(outputPath.path)\n )\n self.assertIn(\n '<a href=\"{}\">{}</a>'.format(projectURL, projectName),\n indexPath.getContent().decode(),\n \"Project name/location not in file contents.\",\n )\n\n quuxPath = outputPath.child(\"quux.html\")\n self.assertTrue(\n quuxPath.exists(),\n \"Package documentation file {!r} did not exist.\".format(quuxPath.path),\n )\n self.assertIn(\n docstring,\n quuxPath.getContent().decode(),\n \"Docstring not in package documentation file.\",\n )\n self.assertIn(\n '<a href=\"{}/{}/__init__.py\">(source)</a>'.format(sourceURL, packageName),\n quuxPath.getContent().decode(),\n )\n self.assertIn(\n '<a class=\"functionSourceLink\" href=\"%s/%s/__init__.py#L1\">'\n % (sourceURL, packageName),\n quuxPath.getContent().decode(),\n )\n self.assertIn(privateDocstring, quuxPath.getContent().decode())\n\n self.assertEqual(stdout.getvalue(), b\"\")",
"def tests():",
"def test_4_4_1_1(self):\n pass",
"def test_uparforvarg(self):",
"def unitary_test():",
"def runtest(self):",
"def check():\n cmake('tests')\n docker('./{build}/tests', build=BUILD)",
"def test_basic_execution(self):",
"def TestOneStep(self):\n pass",
"def test_version(self):\n pass"
] | [
"0.90343785",
"0.70075536",
"0.6934669",
"0.6801372",
"0.673822",
"0.6652555",
"0.6648126",
"0.659174",
"0.656793",
"0.64955795",
"0.6491635",
"0.6450607",
"0.63706475",
"0.6363086",
"0.6357556",
"0.62959576",
"0.6294545",
"0.6275839",
"0.6275839",
"0.6272288",
"0.6272061",
"0.6264427",
"0.62576294",
"0.6235494",
"0.6230786",
"0.6223359",
"0.62165016",
"0.621431",
"0.6211904",
"0.6199103"
] | 0.95006156 | 0 |
Test case for redeploy_container_asset | def test_redeploy_container_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_redeploy(self):\n pass",
"def test_retest_deployment_run(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def test_update_deployment(self):\n pass",
"def test_redeploy_edges(self):\n pass",
"def test_update_container(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def test_release_deployment_run(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def test_execute_deployment(self):\n pass",
"def test_destroy_container(self):\n pass",
"def test_update_system_asset(self):\n pass",
"def test_publish_deployment_run(self):\n pass",
"def test_get_container_assets(self):\n pass",
"def test_clone_deployment(self):\n pass",
"def test_update_asset(self):\n pass",
"def test_update_test_asset(self):\n pass",
"def prepare_image_for_deploy(runtime: \"mlrun.runtimes.BaseRuntime\"):\n pass",
"def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()",
"def deploy():",
"def test_update_software_asset_bundle(self):\n pass",
"def test_create_namespaced_deployment_config_rollback(self):\n pass",
"def test_create_deployment(self):\n pass",
"def test_update_software_asset(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_update_deployment_state(self):\n pass",
"def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)",
"def test_create_deployment_entire(self):\n pass",
"def test_delete_asset(self):\n pass",
"def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass"
] | [
"0.7583045",
"0.7410216",
"0.70913064",
"0.66361076",
"0.66169864",
"0.66155636",
"0.6515826",
"0.64134705",
"0.63725084",
"0.625122",
"0.619479",
"0.61929744",
"0.6148904",
"0.6102583",
"0.6077907",
"0.60367733",
"0.6008068",
"0.599872",
"0.59836715",
"0.5963843",
"0.59585464",
"0.59553343",
"0.5915755",
"0.59132916",
"0.5911145",
"0.5904843",
"0.58981055",
"0.5863886",
"0.5819069",
"0.5815345"
] | 0.9522976 | 0 |
Test case for redeploy_edges | def test_redeploy_edges(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_redeploy(self):\n pass",
"def test_retest_deployment_run(self):\n pass",
"def test_redeploy_container_asset(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def test_failover_during_rebalance(self):\n def get_reb_out_nodes():\n nodes = list()\n nodes_with_services = dict()\n cluster_nodes = self.rest.get_nodes()\n for node in cluster_nodes:\n node.services.sort()\n d_key = '_'.join(node.services)\n if d_key not in nodes_with_services:\n nodes_with_services[d_key] = list()\n nodes_with_services[d_key].append(node)\n\n for services in out_nodes:\n services = services.split(\"_\")\n services.sort()\n services = \"_\".join(services)\n rand_node = choice(nodes_with_services[services])\n nodes_with_services[services].remove(rand_node)\n nodes.append(rand_node)\n return nodes\n\n self.nodes_in = self.input.param(\"nodes_in\", 0)\n pre_fo_data_load = self.input.param(\"pre_fo_data_load\", False)\n if pre_fo_data_load:\n self.__perform_doc_ops(durability=self.durability_level,\n validate_num_items=True)\n\n add_nodes = list()\n remove_nodes = list()\n # Format - kv:kv_index -> 2 nodes with services [kv, kv:index]\n out_nodes = self.input.param(\"out_nodes\", \"kv\").split(\":\")\n # Can take any of (in/out/swap)\n rebalance_type = self.input.param(\"rebalance_type\", \"in\")\n services_to_fo = self.failover_order[0].split(\":\")\n self.nodes_to_fail = self.get_nodes_to_fail(services_to_fo,\n dynamic_fo_method=True)\n loader_task = None\n reader_task = None\n\n if rebalance_type == \"in\":\n add_nodes = self.cluster.servers[\n self.nodes_init:self.nodes_init+self.nodes_in]\n self.cluster.kv_nodes.extend(add_nodes)\n elif rebalance_type == \"out\":\n remove_nodes = get_reb_out_nodes()\n elif rebalance_type == \"swap\":\n remove_nodes = get_reb_out_nodes()\n add_nodes = self.cluster.servers[\n self.nodes_init:self.nodes_init+self.nodes_in]\n self.cluster.kv_nodes.extend(add_nodes)\n\n expected_fo_nodes = self.num_nodes_to_be_failover\n self.__update_server_obj()\n\n # Start doc_ops in background\n if self.load_during_fo:\n doc_gen = doc_generator(\"fo_docs\", 0, 200000)\n loader_task = self.task.async_continuous_doc_ops(\n self.cluster, self.cluster.buckets[0], doc_gen,\n DocLoading.Bucket.DocOps.UPDATE, exp=5, process_concurrency=1)\n reader_task = self.task.async_continuous_doc_ops(\n self.cluster, self.cluster.buckets[0], doc_gen,\n DocLoading.Bucket.DocOps.READ, process_concurrency=1)\n\n self.__update_unaffected_node()\n self.__display_failure_node_status(\"Nodes to be failed\")\n\n # Create Auto-failover task but won't start it\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"induce_failure\")\n\n # Start rebalance operation\n self.log.info(\"Starting rebalance operation\")\n rebalance_task = self.task.async_rebalance(\n self.cluster, to_add=add_nodes, to_remove=remove_nodes)\n\n self.sleep(max(10, 4*self.nodes_in),\n \"Wait for rebalance to start before failover\")\n self.task_manager.add_new_task(failover_task)\n\n try:\n self.log.info(\"Wait for failover task to complete\")\n self.task_manager.get_task_result(failover_task)\n\n failure_msg = \"Auto-failover task failed\"\n if expected_fo_nodes == 0:\n # Task is expected to fail since no failover is triggered\n self.assertFalse(failover_task.result, failure_msg)\n else:\n self.assertTrue(failover_task.result, failure_msg)\n\n # Validate auto_failover_settings after failover\n self.validate_failover_settings(True, self.timeout,\n expected_fo_nodes, self.max_count)\n\n # Stop background doc_ops\n if self.load_during_fo:\n for task in [loader_task, reader_task]:\n task.end_task()\n self.task_manager.get_task_result(task)\n\n # Perform collection crud + doc_ops before rebalance operation\n self.__perform_doc_ops(durability=\"NONE\", validate_num_items=False)\n\n finally:\n # Disable auto-fo after the expected time limit\n retry = 5\n for i in range(retry):\n try:\n status = self.rest.update_autofailover_settings(\n enabled=False, timeout=self.timeout, maxCount=self.max_count,\n preserve_durability_during_auto_fo=self.preserve_durability_during_auto_fo)\n self.assertTrue(status)\n break\n except Exception as e:\n if i >= retry - 1:\n raise e\n else:\n self.sleep(1, \"waiting 1 sec before afo setting \"\n \"update retry\")\n\n # Recover all nodes from induced failures\n recovery_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(recovery_task)\n self.task_manager.get_task_result(recovery_task)\n self.task_manager.stop_task(rebalance_task)\n\n # Enable back prev auto_fo settings\n self.sleep(5, \"Wait before enabling back auto-fo\")\n self.rest.update_autofailover_settings(\n enabled=True, timeout=self.timeout, maxCount=self.max_count,\n preserve_durability_during_auto_fo=self.preserve_durability_during_auto_fo,)\n\n # Rebalance the cluster to remove failed nodes\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Rebalance failed\")\n\n # Validate auto_failover_settings after rebalance operation\n self.validate_failover_settings(True, self.timeout, 0,\n self.max_count)\n\n # Perform collection crud + doc_ops after rebalance operation\n self.__perform_doc_ops()",
"def test_update_deployment(self):\n pass",
"def abort(self):\n for node in self.dep_graph.nodes_iter():\n role = self.roles[node]\n role.new_rep = role.cur_rep\n role.new_hosts = list(role.cur_hosts)\n for edge in self.dep_graph.edges_iter():\n edge_data = self.dep_graph.get_edge_data(*edge)\n edge_data['new_weight'] = edge_data['cur_weight']",
"def test_del_some_edges(graph_with_edges):\n graph_with_edges.del_edges('A', 'B')\n assert graph_with_edges['A'] == {'C': 9}",
"def test_routing_redistribution_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_redistribution.delete,\n {'rule': {}}\n )",
"def test_MB_51219(self):\n len_of_nodes_to_afo = len(self.failover_order[0].split(\":\"))\n nodes_to_fo = dict()\n nodes_in_cluster = self.rest.get_nodes()\n for node in nodes_in_cluster:\n if len_of_nodes_to_afo <= 0:\n break\n if str(self.cluster.master.ip) == str(node.ip):\n continue\n nodes_to_fo[node] = self.failover_method\n len_of_nodes_to_afo -= 1\n self.cluster_util.update_cluster_nodes_service_list(self.cluster)\n self.nodes_to_fail = nodes_to_fo\n self.__update_server_obj()\n try:\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=self.fo_events,\n task_type=\"induce_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n dictionary = dict(list(self.nodes_to_fail.items())[:1])\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=dictionary,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n timeout = int(time()) + 15\n task_id_changed = False\n self.prev_rebalance_status_id = None\n while not task_id_changed and int(time()) < timeout:\n server_task = self.rest.ns_server_tasks(\n task_type=\"rebalance\", task_sub_type=\"failover\")\n if server_task and server_task[\"statusId\"] != \\\n self.prev_rebalance_status_id:\n task_id_changed = True\n self.prev_rebalance_status_id = server_task[\"statusId\"]\n self.log.debug(\"New failover status id: %s\"\n % server_task[\"statusId\"])\n self.assertTrue(task_id_changed,\n \"Fail-over did not happen as expected\")\n self.bucket_util._wait_warmup_completed(self.cluster.buckets[0],\n servers=[\n self.cluster.master],\n wait_time=30)\n finally:\n # reverting failure from all the nodes\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Final re-balance failed\")",
"def test_delete_deployment_run(self):\n pass",
"def test_update_deployment_state(self):\n pass",
"def test_swap(self, dim):\r\n graph = nx.complete_graph(dim)\r\n graph.remove_edge(0, dim - 1)\r\n s = list(range(dim - 1))\r\n assert set(clique.swap(s, graph)) == set(range(1, dim))",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()",
"def test_create_namespaced_deployment_config_rollback(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def on_prune(self, function_graph, node, reason):",
"def testEvaluatingRandomDAG(self):\n jobStore = self._getTestJobStorePath()\n for test in range(5):\n # Temporary file\n tempDir = self._createTempDir(purpose='tempDir')\n # Make a random DAG for the set of child edges\n nodeNumber = random.choice(range(2, 8))\n childEdges = self.makeRandomDAG(nodeNumber)\n # Get an adjacency list representation and check is acyclic\n adjacencyList = self.getAdjacencyList(nodeNumber, childEdges)\n self.assertTrue(self.isAcyclic(adjacencyList))\n # Add in follow on edges - these are returned as a list, and as a set of augmented\n # edges in the adjacency list\n followOnEdges = self.addRandomFollowOnEdges(adjacencyList)\n self.assertTrue(self.isAcyclic(adjacencyList))\n # Make the job graph\n rootJob = self.makeJobGraph(nodeNumber, childEdges, followOnEdges, tempDir)\n # Run the job graph\n options = Job.Runner.getDefaultOptions(\"%s.%i\" % (jobStore, test))\n options.logLevel = \"DEBUG\"\n options.retryCount = 1\n options.badWorker = 0.25\n options.badWorkerFailInterval = 0.01\n # Because we're going to be killing the services all the time for\n # restarts, make sure they are paying attention.\n options.servicePollingInterval = 1\n\n # Now actually run the workflow\n try:\n with Toil(options) as toil:\n toil.start(rootJob)\n numberOfFailedJobs = 0\n except FailedJobsException as e:\n numberOfFailedJobs = e.numberOfFailedJobs\n\n # Restart until successful or failed\n totalTrys = 1\n options.restart = True\n while numberOfFailedJobs != 0:\n try:\n with Toil(options) as toil:\n toil.restart()\n numberOfFailedJobs = 0\n except FailedJobsException as e:\n numberOfFailedJobs = e.numberOfFailedJobs\n if totalTrys > 32: #p(fail after this many restarts) ~= 0.5**32\n self.fail() #Exceeded a reasonable number of restarts\n totalTrys += 1\n\n # For each job check it created a valid output file and add the ordering\n # relationships contained within the output file to the ordering relationship,\n # so we can check they are compatible with the relationships defined by the job DAG.\n ordering = None\n for i in range(nodeNumber):\n with open(os.path.join(tempDir, str(i))) as fH:\n ordering = list(map(int, fH.readline().split()))\n self.assertEqual(int(ordering[-1]), i)\n for j in ordering[:-1]:\n adjacencyList[int(j)].add(i)\n # Check the ordering retains an acyclic graph\n if not self.isAcyclic(adjacencyList):\n print(\"ORDERING\", ordering)\n print(\"CHILD EDGES\", childEdges)\n print(\"FOLLOW ON EDGES\", followOnEdges)\n print(\"ADJACENCY LIST\", adjacencyList)\n self.assertTrue(self.isAcyclic(adjacencyList))",
"def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass",
"def schedule_apply_edges(graph, u, v, eid, apply_func, inplace, outframe=...): # -> None:\n ...",
"def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()",
"def test_add_and_override_resource(self):\n deployment, modified_bp_path = self._deploy_and_get_modified_bp_path(\n 'add_and_override_resource')\n\n node_mapping = {\n 'stagnant': 'site1',\n 'added_relationship': 'site2',\n 'new': 'site3'\n }\n\n base_nodes, base_node_instances = \\\n self._map_node_and_node_instances(deployment.id, node_mapping)\n\n # check all operation have been executed\n self._assertDictContainsSubset(\n {'source_ops_counter': '3'},\n base_node_instances['added_relationship'][0]\n ['runtime_properties']\n )\n\n self.client.blueprints.upload(modified_bp_path, BLUEPRINT_ID)\n wait_for_blueprint_upload(BLUEPRINT_ID, self.client)\n self._do_update(deployment.id, BLUEPRINT_ID)\n\n # Get all related and affected nodes and node instances\n modified_nodes, modified_node_instances = \\\n self._map_node_and_node_instances(deployment.id, node_mapping)\n\n # get the nodes and node instances\n added_relationship_node_instance = \\\n modified_node_instances['added_relationship'][0]\n new_node_instance = modified_node_instances['new'][0]\n\n # check all operation have been executed.\n # source_ops_counter was increased for each operation between site2 and\n # site1, and another site2.source_ops_counter should have\n # decreased once because of the resource override\n self._assertDictContainsSubset(\n {'source_ops_counter': '2'},\n added_relationship_node_instance['runtime_properties']\n )\n\n self._assertDictContainsSubset(\n {'source_ops_counter': '3'},\n new_node_instance['runtime_properties']\n )",
"def test_create_deployment_config_rollback_for_all_namespaces(self):\n pass",
"def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)",
"def test_replace_cluster_network(self):\n pass",
"def test_failover_and_rebalance_out(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n out_nodes = list()\n nodes_in_cluster = self.servers[:self.nodes_init]\n for graceful in [True, False]:\n failover_nodes = random.sample(nodes_in_cluster[1:], 1)\n _ = self.cluster.async_failover(nodes_in_cluster, failover_nodes,\n graceful=graceful)\n self.wait_for_failover_or_assert(1)\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], failover_nodes)\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n for node in failover_nodes:\n out_nodes.append(node)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init] if node not in out_nodes]\n self.auth(servers=nodes_in_cluster)",
"def test_graph_deletes_nodes(graph_with_edges):\n graph_with_edges.del_nodes('B')\n listy = ['A', 'C', 'D', 'E', 'F']\n for node in listy:\n assert node in graph_with_edges.nodes()\n assert 'B' not in graph_with_edges.nodes()",
"def test_graph_cant_delete_an_unpresent_node(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.del_nodes(3.14)",
"def test_clone_deployment(self):\n pass",
"def add_resets_edges(graph, start):\n for node in graph.nodes:\n neighbors = list(graph[node])\n if neighbors == [node]:\n graph.add_edge(node, start, label=\"RESET / \")"
] | [
"0.73295414",
"0.6898475",
"0.65333545",
"0.64987963",
"0.61344343",
"0.5811225",
"0.5765322",
"0.576377",
"0.5688659",
"0.56628996",
"0.5650137",
"0.56399715",
"0.56339985",
"0.563365",
"0.556404",
"0.5521764",
"0.55171126",
"0.5505464",
"0.5483661",
"0.5483389",
"0.54431635",
"0.54118073",
"0.5404892",
"0.5402389",
"0.53892034",
"0.53769416",
"0.5371518",
"0.5351717",
"0.5337315",
"0.532077"
] | 0.9500575 | 0 |
Test case for register_cloud | def test_register_cloud(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_cloud(self):\n pass",
"def test_update_cloud(self):\n pass",
"def test_get_clouds(self):\n pass",
"def test_delete_cloud(self):\n pass",
"def test_register(self):\n self._configure_testshib_provider()\n self._test_register()",
"def test_get_cloud_resources(self):\n pass",
"def test_register_network(self):\n pass",
"def init_cloud_api(self, args=None):\n pass",
"def test_cloud_api():\n mock = provider.MockProvider()\n\n mock.setup_cloud('empty config....')\n\n assert mock.get_ext_ip_addr('some-node')",
"def test_register_existing(self):\n\n data = {'username': User.objects.all()[0].username, 'password': \"123test\", 'email': '[email protected]',\n 'device': self.device, 'newsletter': 'true', 'research': 'true'}\n\n response = self.requestRegistration(data)\n self.assertTrue(response.status_code == status.HTTP_400_BAD_REQUEST)\n self.assertTrue(not 'client_id' in response.data)\n self.assertTrue('User with this Username already exists.' in response.data['username'])",
"def test_register(self):\n rc = self.register(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n assert b'Login to Code TA' in rc.data\n\n rc = self.register(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n assert b'Sorry, that username is already taken.' in rc.data\n\n rc = self.register('', 'derp')\n assert b'Field must be between 1 and 100 characters long.' in rc.data\n\n rc = self.register('derp', '')\n assert b'This field is required.' in rc.data\n\n rc = self.register('derp', 'pass', 'not same pass')\n assert b'Passwords must match.' in rc.data\n\n rc = self.register('derp', 'pass', 'pass', email='broken', email2='broken')\n assert b'You must enter a valid email address.' in rc.data\n\n rc = self.register('derp', 'pass', 'pass', email='[email protected]')\n assert b'Email addresses must match.' in rc.data",
"def test_register(self):\n # Register good data\n data = mock_data['register']\n data = json.dumps(data)\n response = self.client.post(\n 'api/v2/auth/signup', content_type=\"application/json\", data=data)\n data = json.loads(response.data)\n self.assertEqual(data['message'], 'User registered')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('user' in data)",
"def setUp(self):\n self.manager, self.proxy = tests.utils.setup_xmlrpc()\n self.proxy.provider.register(\n PROVIDER_ID, USERNAME, PASSWORD, URL, TENANT, PROVIDER_TYPE,\n DEFAULT_IMAGE, DEFAULT_FLAVOR\n )\n status = self.proxy.server.create(\n PROVIDER_ID, IMAGE, FLAVOR\n )\n self.check_xmlrpc_command_result(status)\n status = self.proxy.server.list(PROVIDER_ID)\n info = self.check_xmlrpc_simple(status, {})\n self.machine_uuid = info['uuid']",
"def _register(self,user,project):\n url = reverse(\"comicsite.views._register\", \n kwargs={\"site_short_name\":project.short_name})\n factory = RequestFactory()\n request = factory.get(url)\n request.user = user\n self.apply_standard_middleware(request)\n \n response = _register(request,project.short_name)\n \n \n self.assertEqual(response.status_code,\n 200,\n \"After registering as user %s at '%s', page did not\"\n \" load properly\" % (user.username,url))\n \n self.assertTrue(project.is_participant(user),\n \"After registering as user %s at '%s', user does not \"\n \" appear to be registered.\" % (user.username,url))",
"def test_registration(self):\n\n print(\" --------------------------- Test 1 - Registration ----------------------------\")\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n response = register_user(user_id, password, currency)\n data = response.json()['message']\n self.assertEqual(response.json()['code'], 201)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n print(json.dumps(data, indent=4))",
"def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)",
"def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)",
"def test_register(self):\n users = User.objects.filter(username='test')\n self.assertTrue(len(users) == 0)\n\n username = \"test3\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': 'false', 'research': 'true', 'device': self.device}\n\n response = self.requestRegistration(data)\n\n self.assertTrue('client_id' in response.data)\n self.assertTrue(not 'password' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n self.assertTrue(profile.research)\n self.assertFalse(profile.newsletter)\n\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)\n self.assertTrue(phone.cordova == self.device['cordova'])",
"def test_registration(self):\n response = self.client.post(\n '/api/v1/auth/register',\n data=json.dumps(dict(\n username='joe',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully registered.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)",
"def test_registry(self):\n validate_registry()",
"def register_vim(cls,\n cloud_owner: str,\n cloud_region_id: str,\n default_tenant: str = None) -> None:\n cls.send_message(\n \"POST\",\n \"Register VIM instance to ONAP\",\n f\"{cls.base_url}/{cloud_owner}/{cloud_region_id}/registry\",\n data={\"defaultTenant\": default_tenant} if default_tenant else None\n )",
"def test_transportzone_create(self):\n self.assertTrue(True)",
"def test_cloud_service(self):\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n cloud = CloudCredentials.objects.cloud_service()\n self.assertEqual(cloud.access_token, ACCESS_TOKEN)\n cur.delete()",
"def test_register(self):\n app = self.create_app()\n c = app.test_client()\n\n # test response of register page\n c.get('/auth/register')\n self.assert_template_used(\"auth/register.html\")\n\n # test registering user\n rv = register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_status(rv, 200)\n\n # test registering user with the same name\n register(c, app.config['USERNAME'], app.config['PASSWORD'])\n self.assert_message_flashed(f\"User {app.config['USERNAME']} is already registered.\")",
"def test_register(self):\n\t\tresponse = self.client.get('/register')\n\t\tself.assertContains(response, 'Register', 3, 200)",
"def setUp(self):\n self.manager, self.proxy = tests.utils.setup_xmlrpc()\n self.proxy.provider.register(\n PROVIDER_ID, USERNAME, PASSWORD, URL, TENANT, PROVIDER_TYPE\n )",
"def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()",
"def test_create_deployment(self):\n pass",
"def cloud():\n log.debug(\"Initializing Cloud API fixture\")\n\n api_gw = os.getenv('PELION_CLOUD_API_GW', 'https://api.us-east-1.mbedcloud.com')\n api_key = os.environ['PELION_CLOUD_API_KEY']\n cloud_api = PelionCloud(api_gw, api_key)\n\n payload = {'name': 'pelion_e2e_dynamic_api_key'}\n r = cloud_api.account.create_api_key(payload, expected_status_code=201)\n resp = r.json()\n cloud_api.rest_api.set_default_api_key(resp['key'])\n\n yield cloud_api\n\n log.debug('Cleaning out the Cloud API fixture')\n headers = {'Authorization': 'Bearer {}'.format(api_key)}\n cloud_api.account.delete_api_key(resp['id'], headers=headers, expected_status_code=204)",
"def test_create_anonymous_classical_register(self):\n cr = ClassicalRegister(size=3)\n self.assertIsInstance(cr, ClassicalRegister)"
] | [
"0.7157487",
"0.6737486",
"0.653346",
"0.63271844",
"0.6182842",
"0.6169539",
"0.6133024",
"0.60901374",
"0.60055655",
"0.596246",
"0.5962196",
"0.59534353",
"0.59406364",
"0.59078103",
"0.5855592",
"0.583869",
"0.583869",
"0.583538",
"0.5834689",
"0.57787716",
"0.5765356",
"0.57470506",
"0.57448334",
"0.5727166",
"0.5694414",
"0.568282",
"0.5679241",
"0.567218",
"0.56591815",
"0.56557417"
] | 0.93856466 | 0 |
Test case for register_network | def test_register_network(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_network(self):\n pass",
"def test_create_network():\n _network = Network()",
"def onRegisterNetworkNode(self):\n pass",
"def test_get_network(self):\n pass",
"def register_network(key, module):\n register(key, module, network_dict)",
"def test_networking_project_network_create(self):\n pass",
"def test_get_networks(self):\n pass",
"def test_api_use_royal_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/royal-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_api_use_web_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/web-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)",
"def test_networking_project_network_tag_create(self):\n pass",
"def setup_net(self):\n pass",
"def test_create_cluster_network(self):\n pass",
"def test_get_default_network(self):\n pass",
"def test_register(self):\n username = \"testuser\"\n hostname = \"testhost\"\n servername = \"testserver\"\n self.protocol.realname = \"testname\"\n self.protocol.password = None\n self.protocol.register(username, hostname, servername)\n expected = [\n \"NICK {}\".format(username),\n \"USER %s %s %s :%s\"\n % (username, hostname, servername, self.protocol.realname),\n \"\",\n ]\n self.assertEqualBufferValue(self.transport.value().split(b\"\\r\\n\"), expected)",
"def test_create_network(self):\n network = vertigo.create_network(\"test\")\n self.assert_equals(\"test\", network.address)\n network.address = \"foo\"\n self.assert_equals(\"foo\", network.address)\n network.enable_acking()\n self.assert_true(network.acking_enabled())\n network.disable_acking()\n self.assert_false(network.acking_enabled())\n network.num_ackers = 10\n self.assert_equals(10, network.num_ackers)\n network.ack_expire = 50000\n self.assert_equals(50000, network.ack_expire)\n component = network.from_verticle('test_feeder_verticle', main='test_feeder_verticle.py')\n self.assert_equals('test_feeder_verticle', component.name)\n self.assert_equals('test_feeder_verticle.py', component.main)\n component.workers = 4\n self.assert_equals(4, component.workers)\n component2 = component.to_verticle('test_worker_verticle')\n component2.main = 'test_worker_verticle.py'\n self.assert_equals('test_worker_verticle.py', component2.main)\n self.complete()",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def test_initialize_network(self, network_mock, create_mock, libvirt_mock):\n network = mock.Mock()\n network.name.return_value = 'baz'\n network_mock.return_value = network\n resources = lxc.LXCResources('foo', {'domain': 'bar', 'network': 'baz'})\n network_mock.assert_called_with(resources.hypervisor, 'foo', 'baz')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name='baz')",
"def initialise_network(self):\n raise NotImplementedError",
"def test_register_cloud(self):\n pass",
"def test_create_router_no_external_network_and_add_network_port(self):\n # Create Router\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_ports_\" + suffix\n router_id = self.__create_router_test_helper__(router_name)\n\n # Create Network with only one subnet\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 253\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)",
"def test_networking_project_network_get(self):\n pass",
"def test_delete_network(self):\n pass",
"def test_get_unregistered_networks(self):\n pass",
"def _build_network(self):\n pass",
"def test_create_net_namespace(self):\n pass",
"def test_networking_project_network_tag_put(self):\n pass",
"def test_01(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_01\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"1-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network).doc(\"1-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"1-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"1-2-1\") \\\n .success()\n\n # network 2 sees router to networks 1 and 3\n tnet.sniffer2.start_state.doc(\"1-3-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 3],\n ).doc(\"1-3-1\") \\\n .success()\n\n # network 3 sees router to networks 1 and 2\n tnet.sniffer3.start_state.doc(\"1-4-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 2],\n ).doc(\"1-4-1\") \\\n .success()\n\n # run the group\n tnet.run()",
"def test_create_router_no_external_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_\" + suffix\n self.__create_router_test_helper__(router_name)"
] | [
"0.812138",
"0.75402945",
"0.7406809",
"0.7280738",
"0.7205411",
"0.70631146",
"0.6723832",
"0.66827446",
"0.66750836",
"0.6654871",
"0.6654254",
"0.6628287",
"0.6352071",
"0.6333451",
"0.63133043",
"0.62750834",
"0.62175715",
"0.620439",
"0.61911345",
"0.61797327",
"0.616214",
"0.61459416",
"0.61155266",
"0.61141074",
"0.60974604",
"0.6094114",
"0.6063906",
"0.6056362",
"0.6042115",
"0.60364646"
] | 0.93271506 | 0 |
Test case for register_template | def test_register_template(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retrieve_template_registration(self):\n pass",
"def test_share_template_registration(self):\n pass",
"def test_update_template_registration(self):\n pass",
"def test_create_template_subsciption(self):\n pass",
"def test_unregister_template(self):\n pass",
"def test_list_template_registrations(self):\n pass",
"def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'inicio.html')",
"def test_create_namespaced_template(self):\n pass",
"def test_create_namespaced_processed_template(self):\n pass",
"def test_register_page_is_rendered(self):\n url = \"/regiter/\"\n response = self.client.get('/register/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")",
"def test_template_feedback(self):\r\n pass",
"def test_template(self):\n self.assertTemplateUsed(self.response, 'formularios.html')",
"def test_get_template_subscription(self):\n pass",
"def test_create_template_for_all_namespaces(self):\n pass",
"def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'cadastro.html')",
"def test_unshare_template_registration(self):\n pass",
"def test_create_subscription_template(self):\n pass",
"def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)",
"def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_tcp(self):\n pass",
"def register_template(self, name, template):\n key = name, len(template.args)\n existing = self.templates.get(key)\n if existing:\n raise mio.MIOException('The template \"%s/%d\" is already registered' % (name, len(template.args)))\n self.templates[key] = template",
"def test_templates(self):\n path = str(Template())\n self.assertTrue(os.path.exists(path))",
"def test_template_home(self):\n self.assertTemplateUsed(self.response, 'index.html')",
"def test_search_template(self):\n self.assertTemplateUsed(self.response, 'rango/search.html', f\"{FAILURE_HEADER}Your search() view does not use the expected template.{FAILURE_FOOTER}\")",
"def test_render_all_templates():\n assert templates.xhook__initialize({})\n assert templates.xhook__handlers({})\n assert templates.xhook__enable()\n assert templates.xhook__release()\n assert templates.asserts__call_count({})\n assert templates.asserts__calls({})",
"def test_update_template_subscription(self):\n pass",
"def test_replace_namespaced_template(self):\n pass",
"def test_create_device_template(self):\n pass",
"def test_otoroshi_controllers_adminapi_templates_controller_template_spec(self):\n pass",
"def test_create_activity_template(self):\n pass"
] | [
"0.8175605",
"0.7836516",
"0.7830867",
"0.75545096",
"0.73835033",
"0.73322237",
"0.72050023",
"0.6964111",
"0.6930974",
"0.69097716",
"0.6897082",
"0.68895656",
"0.68613935",
"0.6761555",
"0.6739039",
"0.66649055",
"0.6626135",
"0.6503044",
"0.6496168",
"0.64731616",
"0.6431013",
"0.64223015",
"0.6421094",
"0.6414328",
"0.6354712",
"0.6348884",
"0.6327726",
"0.62563956",
"0.62511426",
"0.620737"
] | 0.9210528 | 0 |
Test case for register_virtualization_realm | def test_register_virtualization_realm(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_allocate_virtualization_realm(self):\n pass",
"def test_get_virtualization_realm(self):\n pass",
"def test_set_virtualization_realm_active(self):\n pass",
"def test_update_virtualization_realm(self):\n pass",
"def test_get_virtualization_realm_resources(self):\n pass",
"def test_get_virtualization_realms(self):\n pass",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_list_virtualization_realm_templates(self):\n pass",
"def test_update_virt_realm(self):\n pass",
"def test_set_project_default_virtualization_realm(self):\n pass",
"def test_enable_virt_realm_remote_access(self):\n pass",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def test_remove_virt_realm(self):\n pass",
"def test_create_virtual_account(self):\n pass",
"def test_deallocate_virt_realm(self):\n pass",
"def test_invalidate_template_cache_in_virtualization_realm(self):\n pass",
"def test_update_virt_realm_remote_access_config(self):\n pass",
"def test_get_virtual_accounts(self):\n pass",
"def test_create_virtual_account_transfer(self):\n pass",
"def test_powerup(self):\n self.assertIdentical(self.realm, IRealm(self.store))",
"def test_get_team_owned_or_managed_virtualization_realms(self):\n pass",
"def test_create_virtual_account_client(self):\n pass",
"def test_disable_virt_realm_remote_access(self):\n pass",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def test_add_virtual_service(self):\n pass",
"def test_list_virt_realms_in_cloud(self):\n pass",
"def test_get_project_virt_realms(self):\n pass",
"def pre_virtual_machine_interface_create(self, resource_dict):\n pass",
"def pre_virtual_machine_create(self, resource_dict):\n pass"
] | [
"0.85439557",
"0.84896624",
"0.8129741",
"0.8071332",
"0.75970346",
"0.73914474",
"0.73835886",
"0.7202073",
"0.7140036",
"0.71066266",
"0.69417346",
"0.69129175",
"0.6883006",
"0.67515963",
"0.64610565",
"0.62607133",
"0.6211565",
"0.60823244",
"0.5968309",
"0.59140307",
"0.59134096",
"0.5895212",
"0.58668184",
"0.5841264",
"0.5812084",
"0.5777489",
"0.5777273",
"0.572611",
"0.57100344",
"0.565545"
] | 0.94979864 | 0 |
Test case for relaunch_deployment_run | def test_relaunch_deployment_run(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retest_deployment_run(self):\n pass",
"def test_redeploy(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def test_get_deployment_run(self):\n pass",
"def test_release_deployment_run(self):\n pass",
"def test_get_deployment_runs1(self):\n pass",
"def test_execute_deployment(self):\n pass",
"def test_update_deployment(self):\n pass",
"def test_workflows_restart(self):\n pass",
"def test_get_deployment_runs(self):\n pass",
"def test_publish_deployment_run(self):\n pass",
"def test_redeploy_container_asset(self):\n pass",
"def test_redeploy_same_app():\n\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", [{\"name\": \"d1\"}, {\"name\": \"d2\"}])\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n # Deploy the same app with different deployments\n unused_deployments = app_state_manager.deploy_application(\n \"test_app\", [{\"name\": \"d2\"}, {\"name\": \"d3\"}]\n )\n assert unused_deployments == [\"d1\"]\n\n app_state_manager.deployment_state_manager.add_deployment_status(\n DeploymentStatusInfo(\"d3\", DeploymentStatus.UPDATING)\n )\n assert app_state_manager._application_states[\"test_app\"].deployments_to_delete == {\n \"d1\"\n }\n\n # After updating, the deployment should be deleted successfully, and\n # deployments_to_delete should be empty\n app_state_manager.deployment_state_manager.delete_deployment(\"d1\")\n app_state_manager.update()\n assert (\n app_state_manager._application_states[\"test_app\"].deployments_to_delete == set()\n )",
"def test_update_deployment_state(self):\n pass",
"def test_config_deploy_app(fail_deploy):\n signal = SignalActor.remote()\n\n @ray.remote\n def task():\n ray.get(signal.wait.remote())\n if fail_deploy:\n raise Exception(\"fail!\")\n\n object_ref = task.remote()\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", object_ref)\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n signal.send.remote()\n time.sleep(2)\n if fail_deploy:\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n else:\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING",
"def test_set_deployment_run_lock(self):\n pass",
"def test_set_power_schedule_for_deployment_run(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def test_restart_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/restart_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, False)",
"def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")",
"def test_graceful_restart_services(\r\n simcore_stack_deployed_services: list[Service],\r\n docker_compose_service_key: str,\r\n exit_code: int,\r\n):\r\n assert simcore_stack_deployed_services\r\n\r\n assert any(\r\n s.name.endswith(docker_compose_service_key)\r\n for s in simcore_stack_deployed_services\r\n )\r\n\r\n # Service names:'pytest-simcore_static-webserver', 'pytest-simcore_webserver'\r\n service: Service = next(\r\n s\r\n for s in simcore_stack_deployed_services\r\n if s.name.endswith(f\"_{docker_compose_service_key}\")\r\n )\r\n\r\n # NOTE: This is how it looks status. Do not delete\r\n # \"Status\": {\r\n # \"Timestamp\": \"2019-11-18T19:33:30.448132327Z\",\r\n # \"State\": \"shutdown\",\r\n # \"Message\": \"shutdown\",\r\n # \"ContainerStatus\": {\r\n # \"ContainerID\": \"f2921c983ad934b4daa0c514543bbfd1a9ea89189bd1ad98b67d63b9f98f05be\",\r\n # \"PID\": 0,\r\n # \"ExitCode\": 143\r\n # },\r\n # \"PortStatus\": {}\r\n # },\r\n # \"DesiredState\": \"shutdown\",\r\n assert all(task[\"Status\"][\"State\"] == \"running\" for task in service.tasks())\r\n\r\n assert service.force_update()\r\n\r\n time.sleep(MAX_TIME_TO_RESTART_SERVICE)\r\n\r\n shutdown_tasks = service.tasks(filters={\"desired-state\": \"shutdown\"})\r\n assert len(shutdown_tasks) == 1\r\n\r\n task = shutdown_tasks[0]\r\n assert task[\"Status\"][\"ContainerStatus\"][\"ExitCode\"] == exit_code, (\r\n f\"{docker_compose_service_key} expected exit_code=={exit_code}; \"\r\n f\"got task_status={pformat(task['Status'])}\"\r\n )\r\n\r\n # TODO: check ps ax has TWO processes\r\n ## name = core_service_name.name.replace(\"simcore_\", \"\")\r\n ## cmd = f\"docker exec -it $(docker ps | grep {name} | awk '{{print $1}}') /bin/sh -c 'ps ax'\"\r\n # $ docker exec -it $(docker ps | grep storage | awk '{print $1}') /bin/sh -c 'ps ax'\r\n # PID USER TIME COMMAND\r\n # 1 root 0:00 /sbin/docker-init -- /bin/sh services/storage/docker/entry\r\n # 6 scu 0:02 {simcore-service} /usr/local/bin/python /usr/local/bin/sim\r\n # 54 root 0:00 ps ax\r",
"def first_deployment_mode():\n env.initial_deploy = True",
"def restart_interrupted_tasks(app, organization=None, team=None):\n pass",
"def test_update_app_deploy_failed():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_unhealthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n # rerun update, application status should not make difference\n app_state_manager.update()\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED",
"def test_create_deployment(self):\n pass",
"def redeploy(*args, **kwargs):\n try:\n _redeploy(*args, **kwargs)\n except InvalidHTTPInvocationError:\n uri = os.getenv(\"SERVER_NAME\") + os.getenv(\"SCRIPT_NAME\")\n print(\"Status: 405 Method Not Allowed\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 405 Method Not Allowed\")\n print(\n \"Invalid invocation. \"\n \"You must make a POST request with the secret.\\n\"\n \"\\n\"\n \" curl -XPOST -dsecret=XXXXXX \" + uri\n )\n except RedeployError as err:\n print(\"Status: 400 Bad Request\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 400 Bad Request\")\n print(\"Could not redeploy:\", type(err).__name__)\n except subprocess.CalledProcessError as err:\n print(\"Status: 500 Server Error\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 500 Server Error\")\n else:\n # All went okay :)\n print(\"Status: 200 OK\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Redeployment script run.\")",
"def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)",
"def test_redeploy_edges(self):\n pass",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')"
] | [
"0.84857965",
"0.7715781",
"0.7549263",
"0.72129637",
"0.7136321",
"0.70621735",
"0.7024416",
"0.70191705",
"0.6944505",
"0.69251263",
"0.6900094",
"0.6757972",
"0.6753752",
"0.6692603",
"0.65913856",
"0.65108556",
"0.643769",
"0.6378879",
"0.6287494",
"0.6216494",
"0.61866236",
"0.61633515",
"0.6145795",
"0.612663",
"0.61034477",
"0.6101395",
"0.6069627",
"0.6068478",
"0.605327",
"0.6041975"
] | 0.9446837 | 0 |
Test case for release_deployment_run | def test_release_deployment_run(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_publish_deployment_run(self):\n pass",
"def test_get_deployment_run(self):\n pass",
"def test_execute_deployment(self):\n pass",
"def test_retest_deployment_run(self):\n pass",
"def test_launch_deployment(self):\n pass",
"def test_get_deployment_runs1(self):\n pass",
"def test_get_deployment_runs(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def test_create_deployment(self):\n pass",
"def test_update_deployment(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_get_deployment(self):\n pass",
"def test_create_deployment_entire(self):\n pass",
"def test_get_deployment_run_reports(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def test_download_deployment_run_test_report(self):\n pass",
"def _doReleaseBuild(self, farbconfig):\n print \"Building all releases ...\"\n try:\n rbr = runner.ReleaseBuildRunner(farbconfig)\n rbr.run()\n print \"Release build completed.\"\n except runner.ReleaseBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)",
"def test_get_deployments(self):\n pass",
"def test_get_deployments(self):\n pass",
"def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def test_update_deployment_state(self):\n pass",
"def test_get_deployment_resource(self):\n pass",
"def test_redeploy(self):\n pass",
"def deploy():",
"def test_get_deployment_resources(self):\n pass",
"def test_release_simple(client_mock):\n store = Store()\n store.release('testname', 123, ['somechannel'])\n\n expected_body = [{'revision': 123, 'channel': 'somechannel'}]\n assert client_mock.mock_calls == [\n call.post('/v1/charm/testname/releases', expected_body),\n ]",
"def test_redeploy_container_asset(self):\n pass",
"def deploy(parameters):\n\n print(\"In deploy module\")"
] | [
"0.84098387",
"0.82107514",
"0.81345266",
"0.7813035",
"0.77208346",
"0.765626",
"0.7605639",
"0.7540046",
"0.7485967",
"0.74764496",
"0.73526156",
"0.7260109",
"0.7260109",
"0.7181759",
"0.6839999",
"0.6838638",
"0.6826302",
"0.6701041",
"0.6663548",
"0.6663548",
"0.6631534",
"0.65908986",
"0.655472",
"0.65179604",
"0.64856833",
"0.6484098",
"0.64836353",
"0.64630723",
"0.64388925",
"0.6436478"
] | 0.9428278 | 0 |
Test case for remove_category_from_asset | def test_remove_category_from_asset(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_category_to_asset(self):\n pass",
"def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())",
"def test_remove_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())",
"def test_delete_category(self):\n pass",
"def test_delete_asset(self):\n pass",
"def test_delete_system_asset(self):\n pass",
"def test_delete_category(self):\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')",
"def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')",
"def test_delete_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.del_category()\n self.assertIn(b'successfully deleted category', rv.data)",
"def test_delete_software_asset_bundle(self):\n pass",
"def test_delete_a_category(self):\n self.test_add_category_success()\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('category deleted permanently',\n response.data.decode())",
"def remove_category(self, category):\n raise NotImplementedError()",
"def test_delete_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n rv = self.del_recipe()\n self.assertIn(b'deleted successfully', rv.data)",
"def test_delete_category(self):\n rv = self.client().post(\n '/categories/',\n data={'category_name': 'Sauces'})\n self.assertEqual(rv.status_code, 201)\n res = self.client().delete('/categories/1')\n #self.assertEqual(res.status_code, 200)\n # Test to see if it exists, should return a 404\n result = self.client().get('/categories/1')\n #self.assertEqual(result.status_code, 404)",
"def test_category_delete(category):\n category.delete()\n\n category = Category.query.filter_by(id=category.id).first()\n\n assert category is None",
"def test_delete_asset_type(self):\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n statuses = AssetStatus.objects.filter(asset=get_asset)\n for status in statuses:\n status.delete()\n get_asset.delete()\n self.assertEqual(self.all_assets.count(), 0)",
"def test_delete_collection_image(self):\n pass",
"def test_update_category(self):\n pass",
"def test_delete(self, init_db, category):\n category.delete()\n assert Category.get(category.id) == None",
"def test_remove(self):\n pass",
"def delete_category(self, category: str) -> None:\n for letter in self.data:\n if category in self.data[letter]:\n self.data[letter].pop(category)\n print(f'Categoria: {category} apagada do dicionário.')\n self.save()\n self.beautify_json()",
"def test_category_delete(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.delete('/api/v2/categories/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Deleted!')\n self.assertEqual(res.status_code, 200)",
"def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')",
"def test_create_category(self):\n pass",
"def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")",
"def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])",
"def remove_fact(self, category, line_number):\n raise NotImplementedError()",
"def test_delete_single_recipe_category(self):\n with self.client:\n response = self.register_user(\n \"Patrick\", \"Walukagga\", \n \"[email protected]\", \"telnetcmd123\"\n )\n # registered user login\n rep_login = self.login_user(\"[email protected]\", \"telnetcmd123\")\n # valid token\n headers=dict(\n Authorization='Bearer ' + json.loads(\n rep_login.data.decode()\n )['auth_token']\n )\n cresponse = self.create_category(\"Breakfast\", \n \"How to make breakfast\", \n headers)\n \n response = self.create_category(\"Lunchfast\", \n \"How to make lunchfast\", \n headers)\n response = self.client.delete('/recipe_category/1', \n headers=headers)\n self.assertEqual(response.status_code, 200)\n self.assertIn('Recipe category deleted', \n str(response.data))\n # delete recipe category not in database\n response = self.client.delete('/recipe_category/3', \n headers=headers, )\n self.assertEqual(response.status_code, 404)\n self.assertIn('No category found', \n str(response.data))",
"def test_delete_skills_category_when_skills_category_is_invalid(\n self, mock_skills_category_repo_get\n ):\n # Arrange\n with self.app.app_context():\n mock_skills_category_repo_get.return_value = None\n skills_category_controler = SkillCategoryController(self.request_context)\n\n # Act\n result = skills_category_controler.delete_skills_category(1)\n\n # Assert\n assert result.status_code == 404\n assert (\n result.get_json()[\"msg\"] == \"Invalid or incorrect \"\n \"skills_category_id provided\"\n )",
"def test_CategoriesDelete(self):\n trans1 = DebitsCredits.objects.create(account=self.account,\n currency=self.euro,\n name=\"Shopping\",\n amount=1,\n category=self.cat1)\n self.cat1.delete()\n self.assertEqual(self.cat1.active, False)\n\n trans1.delete()\n self.assertEqual(self.cat1.transactions.all().count(), 0)\n\n self.cat1.delete()\n self.assertEqual(Category.objects.all().count(), 1)"
] | [
"0.7689884",
"0.74581015",
"0.73706794",
"0.7341034",
"0.73355156",
"0.69181174",
"0.68672204",
"0.65927577",
"0.653511",
"0.65268093",
"0.6309645",
"0.6302582",
"0.62733746",
"0.6266972",
"0.6256184",
"0.62476575",
"0.6126672",
"0.61234635",
"0.6052439",
"0.60357016",
"0.6004709",
"0.59781474",
"0.5963345",
"0.5930052",
"0.5916521",
"0.58939296",
"0.58798903",
"0.58688605",
"0.5863309",
"0.5861909"
] | 0.95206994 | 0 |
Test case for remove_project | def test_remove_project(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_remove_project_member(self):\n pass",
"def test_remove_trusted_project(self):\n pass",
"def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)",
"def test_remove_trusted_project1(self):\n pass",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_remove_trusted_project2(self):\n pass",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def delete_project(arn=None):\n pass",
"def test_replace_project(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects",
"def test_config_remove(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config remove project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('My Project', self.env.config.get('project', 'name'))",
"def test_remove_trusted_project6(self):\n pass",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def test_remove_role_from_project_member(self):\n pass",
"def pre_project_delete(self, resource_id):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_update_project(self):\n pass",
"def test_update_project(self):\n pass",
"def tearDown(self):\n Project.objects.all().delete()",
"def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def test_remove_trusted_project5(self):\n pass",
"def test_remove(self):\n pass",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)",
"def test_add_project(self):\n pass"
] | [
"0.8767963",
"0.8767963",
"0.8237118",
"0.7877129",
"0.7595696",
"0.754786",
"0.751687",
"0.74362373",
"0.7412617",
"0.73787194",
"0.73299974",
"0.7294391",
"0.7245092",
"0.7237376",
"0.7234451",
"0.71789837",
"0.7103605",
"0.7058092",
"0.70438504",
"0.70320225",
"0.695753",
"0.69403905",
"0.6921306",
"0.6921306",
"0.691875",
"0.6916978",
"0.6900587",
"0.6897912",
"0.6883846",
"0.68651503"
] | 0.953181 | 0 |
Test case for remove_project_member | def test_remove_project_member(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_role_from_project_member(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_delete_team_member(self):\n pass",
"def test_add_project_member(self):\n pass",
"def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_remove_trusted_project(self):\n pass",
"def test_remove_member_from_group(client):\n group = client.remove_members_from_group(TEAM_ID, GROUP_ID, 35555)\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 35555 not in group.members",
"def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)",
"def test_remove_trusted_project1(self):\n pass",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_remove_trusted_project2(self):\n pass",
"def test_teams_remove_user_from_team_v2(self):\n pass",
"def test_handle_remove(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user,\n test_user, other_user]\n self.db.query.return_value = [team]\n team_attach = [team.get_attachment()]\n with self.app.app_context():\n self.testcommand.handle(\"team add brs ID\", user)\n resp, code = self.testcommand.handle(\"team remove brs ID\", user)\n expect = {'attachments': team_attach,\n 'text': 'Removed ' 'User from brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n self.gh.remove_team_member.assert_called_once_with(\"myuser\",\n \"githubid\")",
"def test_remove_user(self):\n pass",
"def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_teams_remove_user_from_team_v1(self):\n pass",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove_trusted_project6(self):\n pass",
"def test_remove_members_from_group(client):\n group = client.remove_members_from_group(TEAM_ID, GROUP_ID, [52911, 35555])\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 52911 not in group.members\n assert 35555 not in group.members",
"async def on_member_remove(member):\r\n pass",
"def test_remove(self):\n pass",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects",
"def test_remove_team_manager_from_team(self):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_unassign_managing_team(self):\n pass",
"def test_remove_trusted_project5(self):\n pass",
"def test_add_role_to_project_member(self):\n pass"
] | [
"0.85118395",
"0.8030549",
"0.7708815",
"0.7382339",
"0.713292",
"0.7131107",
"0.7131107",
"0.70848227",
"0.70440656",
"0.6995782",
"0.6969919",
"0.69667006",
"0.6948475",
"0.6910283",
"0.6872892",
"0.6840699",
"0.68179274",
"0.6802598",
"0.6795497",
"0.6773166",
"0.674492",
"0.6744637",
"0.6709652",
"0.6637181",
"0.6628085",
"0.66197973",
"0.6613969",
"0.6525986",
"0.6513797",
"0.6481585"
] | 0.9583643 | 0 |
Test case for remove_recurring_schedule | def test_remove_recurring_schedule(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_recurring_schedule(self):\n pass",
"def test_delete_schedule(self):\n response = self.client.open('/v1/schedule/{id}'.format(id=56),\n method='DELETE',\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_delete_monitoring_schedule_vendor_v3(self):\n pass",
"def delete_schedule(sender, instance, **kwargs):\n try:\n instance.schedule_on.delete()\n except (AssertionError, AttributeError) as e:\n print('No on schedule')\n try:\n instance.schedule_off.delete()\n except (AssertionError, AttributeError) as e:\n print('No off schedule')\n try:\n instance.schedule_on.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab on')\n try:\n instance.schedule_off.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab off')",
"def schedule_remove(retval=None):\n _scheduler_remove(getcurrent())\n r = schedule(retval)\n return r",
"def test_remove(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 0, sched)\n inst_map.remove(\"tmp\", 0)\n self.assertFalse(inst_map.has(\"tmp\", 0))\n with self.assertRaises(PulseError):\n inst_map.remove(\"not_there\", (0,))\n self.assertFalse(\"tmp\" in inst_map.qubit_instructions(0))",
"def stopSchedule(self):\n DPxStopDinSched()",
"def test_list_schedules(self):\n pass",
"def test_details_modal__delete_all_scheduled_rides(\n self, recurring_ride_factory: Factory, service_with_recurring_rides: fixture,\n ) -> None:\n ride: dict = recurring_ride_factory.create(service=service_with_recurring_rides)\n self.API.cancel_booked_ride(ride)\n\n self.rides.visit()\n\n self.rides.sidebar.select_tab('Active')\n row: SubscriptionRow = self.rides.ride_subscription_table.surface_subscription_row(\n ride['ride_subscription_id'],\n )\n row.open_kebab_menu()\n row.kebab_menu.details_button.click()\n\n row.details_modal.delete_all_rides()\n\n assert row.details_modal.summary == '6 total trips: 1 canceled; 5 deleted'",
"def unschedule(self):\n response = self._post(self.uri_for(\"unschedule\"), json.dumps({}))",
"def test_cron_workflow_service_terminate_cron_workflow(self):\n pass",
"def delete_scheduled_events():\n curr_date = date.today()\n\n scheduled_events_all = ScheduledEvent.objects.all()\n\n for event in scheduled_events_all:\n if (curr_date - event.event_date).days > 0:\n event.delete()",
"async def test_delete(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Set schedule to be interval based\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'deletetest'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(5)\n\n # Delete a scheduled task\n await scheduler.delete_schedule(interval_schedule.schedule_id)\n\n # Assert that process was deleted\n try:\n await scheduler.delete_schedule(interval_schedule.schedule_id)\n assert False\n except ScheduleNotFoundError:\n pass\n\n await self.stop_scheduler(scheduler)",
"def test_calendar_query_partial_recurring(self):\n raise SkipTest(\"test unimplemented\")",
"def remove_scheduled_spirit(self, schedule_id):\n\n raise NotImplementedError",
"def test_delete_monitoring_schedule_manufacturer_v3(self):\n pass",
"def test_calendar_query_expanded_recurring(self):\n raise SkipTest(\"test unimplemented\")",
"def test_cron_workflow_service_update_cron_workflow(self):\n pass",
"def test_remove_gate(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 0, sched)\n inst_map.remove(\"tmp\", 0)\n self.assertFalse(inst_map.has(\"tmp\", 0))\n with self.assertRaises(PulseError):\n inst_map.remove(\"not_there\", (0,))\n self.assertFalse(\"tmp\" in inst_map.qubit_instructions(0))",
"def delete_schedule(connection, id, fields=None, error_msg=None):\n return connection.delete(\n url=f'{connection.base_url}/api/schedules/{id}', params={'fields': fields}\n )",
"def mock_recurring_another_day_schedule() \\\n -> Generator[SwitcherV2Schedule, Any, None]:\n schedule_patch = patch(\n 'aioswitcher.schedules.SwitcherV2Schedule',\n recurring=True,\n start_time=create_random_time(),\n days=[WEEKDAY_TUP[get_weekday_for_day_delta(3)]])\n\n schedule = schedule_patch.start()\n yield schedule\n schedule_patch.stop()",
"def test_autoscaling_schedules_unset(self) -> None:\n if self.prod_env:\n schedules = self.autoscaling.describe_scheduled_actions(AutoScalingGroupName='saints-xctf-server-prod-asg')\n self.assertTrue(len(schedules.get('ScheduledUpdateGroupActions')) == 0)\n else:\n self.assertTrue(all([\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-morning',\n recurrence='30 11 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-morning',\n recurrence='30 13 * * 1-5', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-afternoon',\n recurrence='30 22 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-night',\n recurrence='30 3 * * 2-6', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekend', recurrence='30 11 * * 0,6',\n max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekend', recurrence='30 3 * * 0,1',\n max_size=0, min_size=0, desired_size=0)\n ]))",
"async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)",
"def test_details_modal__delete_ride(\n self, recurring_ride_factory: Factory, service_with_recurring_rides: fixture,\n ) -> None:\n ride: dict = recurring_ride_factory.create(service=service_with_recurring_rides)\n\n self.rides.visit()\n\n self.rides.sidebar.select_tab('Active')\n row: SubscriptionRow = self.rides.ride_subscription_table.surface_subscription_row(\n ride['ride_subscription_id'],\n )\n row.open_kebab_menu()\n row.kebab_menu.details_button.click()\n\n scheduled_ride: ScheduledRideRow = row.details_modal.scheduled_rides_list.scheduled_rides[1]\n scheduled_ride.delete_ride()\n\n assert scheduled_ride.status == 'DELETED'",
"def test_deleteEvent(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n client = APIClient()\n resp = client.delete('/api/events/christmas-party',\n { \"search\": {\"title\": \"christmas party\"}}, format='json')\n self.assertEqual(resp.status_code, 204)",
"def delete_job_schedule(self):\n job_schedule_delete = netapp_utils.zapi\\\n .NaElement.create_node_with_children(\n 'job-schedule-cron-destroy',\n **{'job-schedule-name': self.name})\n try:\n self.server.invoke_successfully(job_schedule_delete,\n enable_tunneling=True)\n except netapp_utils.zapi.NaApiError as error:\n self.module.fail_json(msg='Error deleting job schedule %s: %s'\n % (self.name, to_native(error)),\n exception=traceback.format_exc())",
"def delete_scheduled_run(scheduled_run_id):\n url = f\"{SCHEDULER_URI}/api/{scheduled_run_id}\"\n resp = requests.delete(url=url)\n is_success = resp.status_code == HTTPStatus.OK\n if not is_success:\n logging.warning(\"Failed to delete scheduled run: %s\", resp.text)\n return is_success",
"def delete_schedule(module, array):\n changed = True\n if not module.check_mode:\n try:\n current_state = array.get_pgroup(module.params['name'], schedule=True)\n if module.params['schedule'] == \"replication\":\n if current_state['replicate_enabled']:\n array.set_pgroup(module.params['name'], replicate_enabled=False)\n array.set_pgroup(module.params['name'], target_days=0, target_per_day=0,\n target_all_for=1)\n array.set_pgroup(module.params['name'], replicate_frequency=14400,\n replicate_blackout=None)\n else:\n changed = False\n else:\n if current_state['snap_enabled']:\n array.set_pgroup(module.params['name'], snap_enabled=False)\n array.set_pgroup(module.params['name'], days=0, per_day=0, all_for=1)\n array.set_pgroup(module.params['name'], snap_frequency=300)\n else:\n changed = False\n except Exception:\n module.fail_json(msg='Deleting pgroup {0} {1} schedule failed.'.format(module.params['name'],\n module.params['schedule']))\n module.exit_json(changed=changed)",
"def test_issue_delete_stop_watch(self):\n pass",
"def mock_recurring_tommorow_schedule() \\\n -> Generator[SwitcherV2Schedule, Any, None]:\n schedule_patch = patch(\n 'aioswitcher.schedules.SwitcherV2Schedule',\n recurring=True,\n start_time=create_random_time(),\n days=[WEEKDAY_TUP[get_weekday_for_day_delta()]])\n\n schedule = schedule_patch.start()\n yield schedule\n schedule_patch.stop()"
] | [
"0.7956822",
"0.67888874",
"0.6714517",
"0.65860903",
"0.65719694",
"0.653646",
"0.6340578",
"0.6334153",
"0.6301509",
"0.62353414",
"0.62305284",
"0.6219421",
"0.6205763",
"0.6189735",
"0.61695516",
"0.6119493",
"0.60855323",
"0.60843736",
"0.60748357",
"0.60578877",
"0.6016543",
"0.59807026",
"0.5940192",
"0.5901977",
"0.58846235",
"0.58819395",
"0.5880422",
"0.5871987",
"0.5860846",
"0.58342886"
] | 0.9500176 | 0 |
Test case for remove_role_from_project_member | def test_remove_role_from_project_member(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_project_member(self):\n pass",
"def test_add_role_to_project_member(self):\n pass",
"def test_delete_role(self):\n pass",
"def remove_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Revoke role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True",
"def test_delete_namespaced_role(self):\n pass",
"def test_ipam_roles_delete(self):\n pass",
"def test_delete_team_member(self):\n pass",
"def test_delete_cluster_role(self):\n pass",
"def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_remove_project(self):\n pass",
"def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)",
"async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def test_teams_remove_user_from_team_v2(self):\n pass",
"def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))",
"async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"def test_replace_roles(self):\n pass",
"def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))",
"def test_delete_namespaced_role_binding(self):\n pass",
"def test_remove_user(self):\n pass",
"def test_delete_role(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 204, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 2)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=self.project, user=self.assign_user\n ).count(),\n 0,\n )",
"def test_add_role(self):\n pass",
"async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass",
"def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)",
"def test_delete_namespaced_role_binding_restriction(self):\n pass",
"async def remove_from(self, target: discord.Member) -> None:\n role = await self.get_role(target.guild)\n if role:\n await target.remove_roles(role)\n\n if not role.members:\n await role.delete()",
"def clean_role():",
"def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})",
"def test_teams_remove_user_from_team_v1(self):\n pass",
"def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))",
"def test_add_role_simple(self):\n pass"
] | [
"0.8425967",
"0.780259",
"0.7703239",
"0.76485175",
"0.7332525",
"0.7034757",
"0.6904417",
"0.69026774",
"0.68761796",
"0.6741146",
"0.67336035",
"0.66723615",
"0.66658014",
"0.6644678",
"0.6640216",
"0.6632175",
"0.6621736",
"0.66168046",
"0.6616609",
"0.6601481",
"0.6590669",
"0.65870714",
"0.6574725",
"0.6565463",
"0.6552718",
"0.6520033",
"0.6502764",
"0.6476769",
"0.6460445",
"0.64522094"
] | 0.96308094 | 0 |
Test case for remove_submission_service_from_project | def test_remove_submission_service_from_project(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_submission_service_to_project(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_update_submission_service(self):\n pass",
"def test_remove_project_member(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_remove_trusted_project(self):\n pass",
"def test_delete_deployment_run(self):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_delete_services_with_tasks(self):\n\n with self.assertRaises(UserError):\n self.services_pigs.unlink()\n\n # click on the archive button\n self.services_pigs.write({'active': False})\n\n with self.assertRaises(UserError):\n self.services_pigs.unlink()",
"def test_delete_deployment(self):\n pass",
"def test_remove_trusted_project1(self):\n pass",
"def test_remove_trusted_project6(self):\n pass",
"def test_ticket_type_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type remove task')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_workflow_definition(self):\n pass",
"def test_remove_trusted_project2(self):\n pass",
"def test_ipam_services_delete(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove(self):\n pass",
"def test_issue_delete_subscription(self):\n pass",
"def test_list_submission_serivces_for_project(self):\n pass",
"def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass",
"def test_remove_trusted_project5(self):\n pass",
"def test_delete_goal(self):\n pass",
"def test_delete_team(self):\n pass",
"def test_removeProcess(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertEqual(len(self.pm.processes), 1)\r\n self.pm.removeProcess(\"foo\")\r\n self.assertEqual(len(self.pm.processes), 0)",
"def pre_project_delete(self, resource_id):\n pass",
"def test_website_companies_remove_additions(self):\n pass",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects"
] | [
"0.7842967",
"0.7226375",
"0.700907",
"0.67904794",
"0.6711891",
"0.6711891",
"0.66065073",
"0.6568535",
"0.6364486",
"0.6360798",
"0.63558185",
"0.63431853",
"0.63110226",
"0.6294088",
"0.62873584",
"0.6136282",
"0.6130068",
"0.6092745",
"0.6082106",
"0.6066053",
"0.60383075",
"0.6030936",
"0.60194683",
"0.6009871",
"0.59696203",
"0.59475493",
"0.59352183",
"0.59345824",
"0.58983696",
"0.5886999"
] | 0.9661895 | 0 |
Test case for remove_team_manager_from_team | def test_remove_team_manager_from_team(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unassign_managing_team(self):\n pass",
"def test_add_team_manager_to_team(self):\n pass",
"def test_delete_team(self):\n pass",
"def test_teams_remove_user_from_team_v2(self):\n pass",
"def test_delete_team_member(self):\n pass",
"def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_teams_remove_user_from_team_v1(self):\n pass",
"def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])",
"def test_handle_remove(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user,\n test_user, other_user]\n self.db.query.return_value = [team]\n team_attach = [team.get_attachment()]\n with self.app.app_context():\n self.testcommand.handle(\"team add brs ID\", user)\n resp, code = self.testcommand.handle(\"team remove brs ID\", user)\n expect = {'attachments': team_attach,\n 'text': 'Removed ' 'User from brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n self.gh.remove_team_member.assert_called_once_with(\"myuser\",\n \"githubid\")",
"def test_teams_delete_team_v1(self):\n pass",
"def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def test_teams_remove_customer_from_workgroup_v1(self):\n pass",
"def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)",
"def test_handle_delete(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"12345\"\n test_user = User(\"userid\")\n test_user.github_id = \"1234\"\n team.add_team_lead(\"1234\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (f\"Team brs deleted\", 200))\n self.db.delete.assert_called_once_with(Team, \"12345\")\n self.gh.org_delete_team.assert_called_once_with(int(\"12345\"))",
"def test_handle_unassign_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project unassign 1\",\r\n user)\r\n assert (resp, code) == (\"Project successfully unassigned!\", 200)",
"def delete_workteam(WorkteamName=None):\n pass",
"def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])",
"def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)",
"def test_remove_project_member(self):\n pass",
"def test_update_team(self):\n pass",
"def test_handle_delete_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))",
"def test_assign_managing_team(self):\n pass",
"def test_handle_remove_github_error(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.side_effect = GithubAPIException(\"error\")\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User removed unsuccessfully with the \"\n \"following error: error\", 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()",
"def remove_member(self, team_id, user_id):\n # Ensure that the team exists. Raises error if team does not exist.\n # If the user is the team owner the constraint that the owner has to be\n # a team member is violated.\n sql = 'SELECT owner_id FROM team WHERE id = ?'\n team = self.con.execute(sql, (team_id,)).fetchone()\n if team is None:\n raise err.UnknownTeamError(team_id)\n elif team['owner_id'] == user_id:\n raise err.ConstraintViolationError('cannot remove team owner')\n sql = 'DELETE FROM team_member WHERE team_id = ? AND user_id = ?'\n self.con.execute(sql, (team_id, user_id))\n self.con.commit()",
"def test_milestone_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone remove milestone3')\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()",
"def test_remove_role_from_project_member(self):\n pass",
"def remove_self_from_participant_team(request, participant_team_pk):\n try:\n participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)\n except ParticipantTeam.DoesNotExist:\n response_data = {\"error\": \"ParticipantTeam does not exist!\"}\n return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)\n\n try:\n participant = Participant.objects.get(\n user=request.user, team=participant_team\n )\n except Participant.DoesNotExist:\n response_data = {\"error\": \"Sorry, you do not belong to this team!\"}\n return Response(response_data, status=status.HTTP_401_UNAUTHORIZED)\n\n if get_list_of_challenges_for_participant_team(\n [participant_team]\n ).exists():\n response_data = {\n \"error\": \"Sorry, you cannot delete this team since it has taken part in challenge(s)!\"\n }\n return Response(response_data, status=status.HTTP_403_FORBIDDEN)\n else:\n participant.delete()\n participants = Participant.objects.filter(team=participant_team)\n if participants.count() == 0:\n participant_team.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def team_delete(token_user, team_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n if team.team_type.name == 'single':\n abort(403, 'unable to delete team of type \"single\"')\n\n # check for permissions to delete the team\n if not (token_user.has_permission('team.delete.elevated') or\n (token_user.has_permission('team.delete') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to delete team')\n\n # deschedule reservations for the team then delete the team\n Reservation.query.filter_by(team_id=team.id).delete()\n get_db().delete(team)\n get_db().commit()\n\n return '', 204",
"def check_existing_teams(user, teams_from_lms):\n teams = user.teams.all()\n for team in teams:\n if team not in teams_from_lms:\n user.teams.remove(team)"
] | [
"0.7736126",
"0.76965594",
"0.76597404",
"0.7627749",
"0.75544083",
"0.7447805",
"0.74260515",
"0.7294839",
"0.712906",
"0.70324224",
"0.7022559",
"0.6872612",
"0.68682903",
"0.6837995",
"0.67318153",
"0.6667099",
"0.66502196",
"0.6639373",
"0.66099364",
"0.6507255",
"0.64261764",
"0.64110285",
"0.63497144",
"0.62894696",
"0.6275418",
"0.6271815",
"0.6266907",
"0.62279654",
"0.62108177",
"0.6196322"
] | 0.95891887 | 0 |
Test case for remove_trusted_project | def test_remove_trusted_project(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_trusted_project1(self):\n pass",
"def test_remove_trusted_project2(self):\n pass",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_remove_trusted_project6(self):\n pass",
"def test_remove_trusted_project5(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_remove_project_member(self):\n pass",
"def test_add_trusted_project(self):\n pass",
"def test_add_trusted_project4(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_add_trusted_project3(self):\n pass",
"def test_add_trusted_project6(self):\n pass",
"def test_add_trusted_project2(self):\n pass",
"def test_add_trusted_project1(self):\n pass",
"def test_add_trusted_project7(self):\n pass",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_add_trusted_project5(self):\n pass",
"def test_remove_role_from_project_member(self):\n pass",
"def test_replace_project(self):\n pass",
"def test_config_remove(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config remove project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('My Project', self.env.config.get('project', 'name'))",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects",
"def remove_fake_project_dir(request):\n def fin_remove_fake_project_dir():\n if os.path.isdir('fake-project'):\n utils.rmtree('fake-project')\n request.addfinalizer(fin_remove_fake_project_dir)",
"def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)",
"def clean(project, prepare_result):\n status = prepare.unprepare(project, prepare_result)\n\n errors = list(status.errors)\n\n if status:\n project.frontend.info(status.status_description)\n else:\n errors.extend([status.status_description])\n project.frontend.error(status.status_description)\n\n # we also nuke any \"debris\" from non-current choices, like old\n # environments or services\n def cleanup_dir(dirname):\n if os.path.isdir(dirname):\n project.frontend.info(\"Removing %s.\" % dirname)\n try:\n shutil.rmtree(dirname)\n except Exception as e:\n errors.append(\"Error removing %s: %s.\" % (dirname, str(e)))\n\n cleanup_dir(os.path.join(project.directory_path, \"services\"))\n\n # Clean up the environments only if they are inside the project\n our_root = project.directory_path\n for base in os.environ.get('ANACONDA_PROJECT_ENVS_PATH', '').split(os.pathsep):\n base = os.path.expanduser(base) if base else 'envs'\n apath = os.path.abspath(os.path.join(our_root, base))\n if apath == our_root:\n errors.append('Not removing the project directory itself.')\n elif apath.startswith(our_root + os.sep):\n cleanup_dir(apath)\n else:\n errors.append('Not removing external environment directory: %s' % base)\n\n if status and len(errors) == 0:\n return SimpleStatus(success=True, description=\"Cleaned.\", errors=errors)\n else:\n return SimpleStatus(success=False, description=\"Failed to clean everything up.\", errors=errors)",
"def test_delete_deployment_run(self):\n pass",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)"
] | [
"0.92440224",
"0.91666335",
"0.9137631",
"0.912832",
"0.8961073",
"0.89420015",
"0.89193857",
"0.8280003",
"0.7697746",
"0.7512066",
"0.742216",
"0.7391039",
"0.7391039",
"0.73804307",
"0.7331833",
"0.73246884",
"0.7281088",
"0.72597283",
"0.72342914",
"0.7225931",
"0.6689102",
"0.66185385",
"0.6475445",
"0.6408722",
"0.6289708",
"0.6286686",
"0.6151559",
"0.6147611",
"0.6141992",
"0.613249"
] | 0.95486027 | 0 |
Test case for remove_trusted_project1 | def test_remove_trusted_project1(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_trusted_project2(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_trusted_project(self):\n pass",
"def test_remove_trusted_project6(self):\n pass",
"def test_remove_trusted_project5(self):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_add_trusted_project1(self):\n pass",
"def test_add_trusted_project4(self):\n pass",
"def test_add_trusted_project3(self):\n pass",
"def test_add_trusted_project6(self):\n pass",
"def test_add_trusted_project2(self):\n pass",
"def test_add_trusted_project5(self):\n pass",
"def test_add_trusted_project7(self):\n pass",
"def test_remove_project_member(self):\n pass",
"def test_add_trusted_project(self):\n pass",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_remove_role_from_project_member(self):\n pass",
"def test_replace_project(self):\n pass",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)",
"def test_config_remove(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config remove project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('My Project', self.env.config.get('project', 'name'))",
"def test_remove(self):\n pass",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects",
"def test_delete_deployment_run(self):\n pass",
"def test_remove_user(self):\n pass",
"def clean(project, prepare_result):\n status = prepare.unprepare(project, prepare_result)\n\n errors = list(status.errors)\n\n if status:\n project.frontend.info(status.status_description)\n else:\n errors.extend([status.status_description])\n project.frontend.error(status.status_description)\n\n # we also nuke any \"debris\" from non-current choices, like old\n # environments or services\n def cleanup_dir(dirname):\n if os.path.isdir(dirname):\n project.frontend.info(\"Removing %s.\" % dirname)\n try:\n shutil.rmtree(dirname)\n except Exception as e:\n errors.append(\"Error removing %s: %s.\" % (dirname, str(e)))\n\n cleanup_dir(os.path.join(project.directory_path, \"services\"))\n\n # Clean up the environments only if they are inside the project\n our_root = project.directory_path\n for base in os.environ.get('ANACONDA_PROJECT_ENVS_PATH', '').split(os.pathsep):\n base = os.path.expanduser(base) if base else 'envs'\n apath = os.path.abspath(os.path.join(our_root, base))\n if apath == our_root:\n errors.append('Not removing the project directory itself.')\n elif apath.startswith(our_root + os.sep):\n cleanup_dir(apath)\n else:\n errors.append('Not removing external environment directory: %s' % base)\n\n if status and len(errors) == 0:\n return SimpleStatus(success=True, description=\"Cleaned.\", errors=errors)\n else:\n return SimpleStatus(success=False, description=\"Failed to clean everything up.\", errors=errors)"
] | [
"0.9360389",
"0.9320278",
"0.9286423",
"0.92278147",
"0.91655755",
"0.9156899",
"0.911215",
"0.7719205",
"0.7613016",
"0.7563217",
"0.7561766",
"0.75393796",
"0.75236434",
"0.74675465",
"0.73888713",
"0.7334428",
"0.7222747",
"0.69200206",
"0.68326104",
"0.68326104",
"0.6362742",
"0.62179446",
"0.61837775",
"0.6174206",
"0.6084889",
"0.6070509",
"0.60422534",
"0.60358465",
"0.5998751",
"0.59644634"
] | 0.95638967 | 0 |
Test case for remove_trusted_project2 | def test_remove_trusted_project2(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_trusted_project1(self):\n pass",
"def test_remove_trusted_project3(self):\n pass",
"def test_remove_trusted_project4(self):\n pass",
"def test_remove_trusted_project(self):\n pass",
"def test_remove_trusted_project6(self):\n pass",
"def test_remove_trusted_project5(self):\n pass",
"def test_remove_trusted_project7(self):\n pass",
"def test_add_trusted_project2(self):\n pass",
"def test_remove_project(self):\n pass",
"def test_add_trusted_project3(self):\n pass",
"def test_add_trusted_project1(self):\n pass",
"def test_add_trusted_project4(self):\n pass",
"def test_add_trusted_project6(self):\n pass",
"def test_remove_project_member(self):\n pass",
"def test_add_trusted_project5(self):\n pass",
"def test_add_trusted_project7(self):\n pass",
"def test_add_trusted_project(self):\n pass",
"def test_remove_submission_service_from_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def test_remove_role_from_project_member(self):\n pass",
"def test_replace_project(self):\n pass",
"def test_remove(self):\n pass",
"def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)",
"def test_teams_remove_user_from_team_v2(self):\n pass",
"def test_remove_user(self):\n pass",
"def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects",
"def test_config_remove(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config remove project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('My Project', self.env.config.get('project', 'name'))",
"def test_provider_system_hook_file_remove(change_dir, fix_file_perms):\n o = tackle(context_file='remove.yaml', no_input=True)\n assert o['if_file']\n assert not o['not_file']\n assert o['if_files']\n assert not o['not_files']",
"def test_unassign_managing_team(self):\n pass"
] | [
"0.94368666",
"0.92541194",
"0.9170183",
"0.9087056",
"0.9065251",
"0.9031328",
"0.8988181",
"0.7653349",
"0.75945485",
"0.74361837",
"0.7426623",
"0.7392154",
"0.7378052",
"0.73119617",
"0.7280662",
"0.72011167",
"0.7040344",
"0.6709958",
"0.66355455",
"0.66355455",
"0.64193225",
"0.6210254",
"0.61222047",
"0.6082845",
"0.6061785",
"0.60132205",
"0.59831333",
"0.59571093",
"0.5883296",
"0.5870961"
] | 0.95377606 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.