text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(monkeypatch, kwargs, base, url_root): monkeypatch.setattr(Response, "autocorrect_location_header", True) @Request.application def app(request): # for header assert request.remote_addr == "192.168.0.1" # proto, host, port, prefix headers assert request.url_root == url_root urls = url_map.bind_to_environ(request.environ) parrot_url = urls.build("parrot") # build includes prefix assert urls.build("parrot") == "/".join((request.script_root, "parrot")) # match doesn't include prefix assert urls.match("/parrot")[0] == "parrot" # With autocorrect_location_header enabled, location header will # start with url_root return redirect(parrot_url) url_map = Map([Rule("/parrot", endpoint="parrot")]) app = ProxyFix(app, **kwargs) base.setdefault("REMOTE_ADDR", "192.168.0.1") environ = create_environ(environ_overrides=base) # host is always added, remove it if the test doesn't set it if "HTTP_HOST" not in base: del environ["HTTP_HOST"] response = Client(app).open(Request(environ)) assert response.location == f"{url_root}parrot"
[ 9, 127, 1112 ]
async def METHOD_NAME(self, project_id, region, subnetwork_id): try: gce_client = self._get_client() return await run_concurrently( lambda: gce_client.subnetworks().get(project=project_id, region=region, subnetwork=subnetwork_id).execute() ) except Exception as e: if 'was not found' in str(e): print_warning(f'Failed to retrieve subnetwork: {e}') else: print_exception(f'Failed to retrieve subnetwork: {e}') return None
[ 19, 10959 ]
def METHOD_NAME(tmpdir): logger = CSVLogger(tmpdir, flush_logs_every_n_steps=2) metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)} logger.save = MagicMock() logger.log_metrics(metrics, step=0) logger.save.assert_not_called() logger.log_metrics(metrics, step=1) logger.save.assert_called_once()
[ 9, 1579, 293, 1910 ]
def METHOD_NAME(self, session_factory): event.listen(session_factory, "do_orm_execute", self._do_orm_execute)
[ 4243, 69, 240 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(game_name: str, setting: Optional[str] = None) -> pyspiel.Game: """Creates an OpenSpiel game with the specified setting. Args: game_name: Name of a registered game, e.g. mfg_crowd_modelling_2d. setting: Name of the pre-defined setting. If None, game_name will be used instead. The setting should be present in the GAME_SETTINGS map above. Returns: a Game. """ setting = setting or game_name params = GAME_SETTINGS.get(setting) if params is None: raise ValueError(f"{setting} setting does not exist for {game_name}.") logging.info("Creating %s game with parameters: %r", game_name, params) # Dynamic routing game requires setting the network and demand explicitly. if game_name == "python_mfg_dynamic_routing": # Create a copy since we modify it below removing the network key. params = params.copy() network = params.pop("network") network, od_demand = DYNAMIC_ROUTING_NETWORK[network] return dynamic_routing.MeanFieldRoutingGame( params, network=network, od_demand=od_demand) return pyspiel.load_game(game_name, params)
[ 129, 2674, 41, 1333 ]
def METHOD_NAME(self, sr, sr_uuid): self.sr = sr self.sr_uuid = sr_uuid
[ 248, 176 ]
def METHOD_NAME(ntree, arrange=True): nodeoutput = outputnode_search(ntree) if nodeoutput is None: # print ("nodeoutput is None") return None a = [] a.append([]) for i in nodeoutput: a[0].append(i) level = 0 while a[level]: a.append([]) for node in a[level]: inputlist = [i for i in node.inputs if i.is_linked] if inputlist: for input in inputlist: for nlinks in input.links: node1 = nlinks.from_node a[level + 1].append(node1) else: pass level += 1 del a[level] level -= 1 # remove duplicate nodes at the same level, first wins for x, nodes in enumerate(a): a[x] = list(OrderedDict(zip(a[x], repeat(None)))) # remove duplicate nodes in all levels, last wins top = level for row1 in range(top, 1, -1): for col1 in a[row1]: for row2 in range(row1 - 1, 0, -1): for col2 in a[row2]: if col1 == col2: a[row2].remove(col2) break """ for x, i in enumerate(a): print (x) for j in i: print (j) #print() """ """ #add node frames to nodelist frames = [] print ("Frames:") print ("level:", level) print ("a:",a) for row in range(level, 0, -1): for i, node in enumerate(a[row]): if node.parent: print ("Frame found:", node.parent, node) #if frame already added to the list ? frame = node.parent #remove node del a[row][i] if frame not in frames: frames.append(frame) #add frame to the same place than node was a[row].insert(i, frame) pprint.pprint(a) """ # return None ######################################## if not arrange: nodelist = [j for i in a for j in i] nodes_odd(ntree, nodelist=nodelist) return None ######################################## levelmax = level + 1 level = 0 values.x_last = 0 while level < levelmax: values.average_y = 0 nodes = [x for x in a[level]] # print ("level, nodes:", level, nodes) nodes_arrange(nodes, level) level = level + 1 return None
[ 480, 3972 ]
def METHOD_NAME(self): """Test that to_rdkit_mol returns correct indices and atom mappings.""" bond_order_dict = {"SINGLE": 1, "DOUBLE": 2, "TRIPLE": 3, "AROMATIC": 1.5} mol = Molecule().from_smiles("C1CCC=C1C=O") rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=False, return_mapping=True) for atom in mol.atoms: # Check that all atoms are found in mapping assert atom in rd_atom_indices # Check that all bonds are in rdkitmol with correct mapping and order for connected_atom, bond in atom.bonds.items(): bond_type = str(rdkitmol.GetBondBetweenAtoms(rd_atom_indices[atom], rd_atom_indices[connected_atom]).GetBondType()) rdkit_bond_order = bond_order_dict[bond_type] assert bond.order == rdkit_bond_order # Test for remove_h = True rdkitmol2, rd_atom_indices2 = to_rdkit_mol(mol, remove_h=True, return_mapping=True) for atom in mol.atoms: # Check that all non-hydrogen atoms are found in mapping if atom.symbol != "H": assert atom in rd_atom_indices2 # Check that all bonds connected to non-hydrogen have the correct mapping and order for connected_atom, bond in atom.bonds.items(): if connected_atom.symbol != "H": bond_type = str(rdkitmol2.GetBondBetweenAtoms(rd_atom_indices2[atom], rd_atom_indices2[connected_atom]).GetBondType()) rdkit_bond_order = bond_order_dict[bond_type] assert bond.order == rdkit_bond_order
[ 9, 4637, 445, 1170 ]
def METHOD_NAME(self, entry): text = entry.get_text().strip() if self._required and len(text) == 0: entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "gtk-cancel") else: entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "gtk-apply")
[ 69, 475, 1180 ]
def METHOD_NAME(error): print('D-Bus call failed: ' + str(error)) mainloop.quit()
[ 1680, 168, 905 ]
def METHOD_NAME(self): self.thread_pool_executor_patch = patch("samcli.lib.sync.sync_flow_executor.ThreadPoolExecutor") self.thread_pool_executor_mock = self.thread_pool_executor_patch.start() self.thread_pool_executor = self.thread_pool_executor_mock.return_value self.thread_pool_executor.__enter__.return_value = self.thread_pool_executor self.lock_distributor_patch = patch("samcli.lib.sync.sync_flow_executor.LockDistributor") self.lock_distributor_mock = self.lock_distributor_patch.start() self.lock_distributor = self.lock_distributor_mock.return_value self.executor = ContinuousSyncFlowExecutor()
[ 0, 1 ]
def METHOD_NAME(self) -> dict: return METHOD_NAME.loads(self.payload)
[ 763 ]
def METHOD_NAME(self, sync_bb, sync_gl, sync_gh): sync_gh.side_effect = SyncServiceError sync_bb.side_effect = SyncServiceError r = sync_remote_repositories(self.user.pk) self.assertIn("GitHub", r["error"]) self.assertIn("Bitbucket", r["error"]) self.assertNotIn("GitLab", r["error"]) sync_bb.assert_called_once() sync_gl.assert_called_once() sync_gh.assert_called_once()
[ 9, 164, 1230, 13867, 4138, 489, 206 ]
def METHOD_NAME(self): root = pyhit.load(os.path.join('..', '..', 'test_files', 'test.hit')) self.assertEqual(len(root(1)), 2) sec = root(1).insert(0, 'B-3') self.assertEqual(len(root(1)), 3) self.assertIs(root(1)(0), sec) self.assertIsNone(sec.get('year')) sec['year'] = 1980 self.assertEqual(sec.get('year'), 1980)
[ 9, 408 ]
def METHOD_NAME(self) -> list[str]: ...
[ 19, 551, 219, 3301 ]
f METHOD_NAME(self):
[ 9, 8302, 4177, 157 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(): input_variable_name = "input" input_variable_value = 42 output_variable_name = "output" sparkcommand = SendStringToSparkCommand( input_variable_name, input_variable_value, output_variable_name ) with pytest.raises( BadUserDataException, ): sparkcommand.to_command( "invalid", input_variable_name, input_variable_value, output_variable_name, )
[ 9, 24, 462, 532 ]
def METHOD_NAME(): global model global device # Get the path where the deployed model can be found. model_filename = 'cifar10torch.pt' model_file_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') model = SimpleDLA() model.to(device) saved_state_dict = torch.load(model_file_path, map_location=device) cleaned_dict = OrderedDict() for key, value in saved_state_dict.items(): if key.startswith("module"): first_dot = key.find(".") key = key[first_dot + 1:] cleaned_dict[key] = value model.load_state_dict(cleaned_dict) model.eval()
[ 176 ]
def METHOD_NAME(self, form_data): sender = MessageSender(self.object, self.message, form_data) sent_message = sender.send() return redirect( reverse( "letter-sent", kwargs={"letter_id": self.object.id, "message_id": sent_message.id}, ) )
[ 353, 3371 ]
def METHOD_NAME(self) -> str: """ Resource location """ return pulumi.get(self, "location")
[ 708 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME( requests: Requests, info_resource_list: List[tuple], registration_enabled: bool, search_enabled: bool, repeater_id: str, ) -> List[tuple]: info_resource_list_to_send = [] for info, resource in info_resource_list: if resource['resourceType'] != 'Patient': info_resource_list_to_send.append((info, resource)) continue if info.extra_fields['external_id']: # Patient is already registered info_resource_list_to_send.append((info, resource)) continue if search_enabled: patient = find_patient(requests, resource) # Raises DuplicateWarning else: patient = None if patient: _set_external_id(info, patient['id'], repeater_id) info_resource_list_to_send.append((info, resource)) elif registration_enabled: patient = register_patient(requests, resource) _set_external_id(info, patient['id'], repeater_id) # Don't append `resource` to `info_resource_list_to_send` # because the remote service has all its data now. return info_resource_list_to_send
[ 372, 13881 ]
def METHOD_NAME(h, s, v): if s == 0.0: return v, v, v i = int(h * 6.0) f = (h * 6.0) - i p = v * (1.0 - s) q = v * (1.0 - s * f) t = v * (1.0 - s * (1.0 - f)) i = i % 6 if i == 0: return v, t, p if i == 1: return q, v, p if i == 2: return p, v, t if i == 3: return p, q, v if i == 4: return t, p, v if i == 5: return v, p, q
[ 4612, 24, 2310 ]
def METHOD_NAME() -> None: p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter) p.add_argument('filenames', nargs='+') p.add_argument('--passes', type=int, default=1, help="Number of passes (0 means unlimited)") p.add_argument('--pattern', required=True, action='append', default=[], dest='patterns', help="Pattern to scan for") p.add_argument('--inplace', '-i', action='store_true', help="Patch file in place") p.add_argument('--dry-run', action='store_true', help="Don't patch files or print patching results") p.add_argument('--force', '-f', action='store_true', help="Perform changes even if not completely safe") p.add_argument('--diff', action='store_true', help="Print diff output on stdout") p.add_argument('--debug', '-d', action='store_true', help="Enable debugging") p.add_argument('--verbose', '-v', action='store_true', help="Verbose logging on stderr") p.add_argument('--table', action='store_true', help="Print CSV table of type information") p.add_argument_group("Valid pattern names", PATTERN_HELP) args = p.parse_args() loglevel = (logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.WARN) logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel) DBG("args: %r", args) process_all_files(p, args)
[ 57 ]
def METHOD_NAME(self, stages, path, scenario): """Validate the all of the stages.""" matches = [] categories = set() if len(stages) < 1: # pylint: disable=C1801 self.logger.debug( "Stages was empty. Should have been caught by generic linting." ) return matches for sidx, stage in enumerate(stages): for aidx, action in enumerate(stage.get("Actions", [])): action_type_id = action.get("ActionTypeId") categories.add(action_type_id.get("Category")) if sidx > 0 and action_type_id.get("Category") == "Source": message = ( "Only the first stage of a pipeline may contain source actions." ) matches.append( RuleMatch( path + [sidx, "Actions", aidx], self._format_error_message(message, scenario), ) ) if not categories - set(["Source"]): message = "At least one stage in pipeline must contain an action that is not a source action." matches.append( RuleMatch(path, self._format_error_message(message, scenario)) ) return matches
[ 250, 1458, 1116 ]
def METHOD_NAME(): """Dummy implementation of _thread.exit().""" raise SystemExit
[ 538 ]
def METHOD_NAME(topology_st): """ Configure managed entries plugins(tempalte/definition), then perform a modrdn(deleteoldrdn 1), and make sure the server does not crash. """ GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX USER_NEWRDN = 'uid=\+user1' # # First enable dynamic plugins # try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) # # Add our org units (they should already exist, but do it just in case) # try: topology_st.standalone.add_s(Entry((PEOPLE_OU, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((GROUP_OU, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False # # Add the template entry # try: topology_st.standalone.add_s(Entry((TEMPLATE_DN, { 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), 'cn': 'MEP Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to add template entry: error ' + e.args[0]['desc']) assert False # # Add the definition entry # try: topology_st.standalone.add_s(Entry((CONFIG_DN, { 'objectclass': 'top extensibleObject'.split(), 'cn': 'config', 'originScope': PEOPLE_OU, 'originFilter': 'objectclass=posixAccount', 'managedBase': GROUP_OU, 'managedTemplate': TEMPLATE_DN }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to add config entry: error ' + e.args[0]['desc']) assert False # # Add an entry that meets the MEP scope # try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': 'user1', 'cn': 'user1', 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/user1', 'description': 'uiser description' }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to user1: error ' + e.args[0]['desc']) assert False # # Perform a modrdn on USER_DN # try: topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn: error ' + e.args[0]['desc']) assert False log.info('Test complete')
[ 9, -1 ]
def METHOD_NAME(path): ''' Returns True if the directory exists and is empty; False otherwise ''' try: next(os.scandir(str(path))) except StopIteration: return True except FileNotFoundError: pass return False
[ 1190, 35 ]
def METHOD_NAME(arch_check: Callable): """ Add extra checks for pattern checks in architecture checker. :param arch_check: pattern checker function. :param check_kind: Check kind. Different check kind requires different arguments. """ ArchChecker._pattern_checks.append(arch_check)
[ 238, 652, 250 ]
def METHOD_NAME(self): """Return table_id with version suffix stripped.""" return "_".join(self.bq_table.split("_")[:-1])
[ 3578, 410, -1 ]
def METHOD_NAME( lines: str, version_patterns: list[Pattern[str]], ignore: list[Pattern[str]] ) -> str: """Search a set of lines to find a match for the given regex""" new_guess = "" for pattern in version_patterns: match = pattern.search(lines) if match: new_guess = match.group(1).strip() for i in ignore: if str(i) in str(new_guess) or str(new_guess) in str(i): new_guess = "" break if new_guess != "": new_guess = new_guess.replace("_", ".") return new_guess.replace("-", ".") else: return "UNKNOWN"
[ 211, 416 ]
def METHOD_NAME(self) -> SecurityRequirement: # pragma: no cover """Return OpenAPI 3.1. :data:`SecurityRequirement <.openapi.spec.SecurityRequirement>` for the auth backend. Returns: An OpenAPI 3.1 :data:`SecurityRequirement <.openapi.spec.SecurityRequirement>` dictionary. """ raise NotImplementedError
[ 2326, 6577 ]
def METHOD_NAME(table_name, column_names, suffix=""): """ COPIED FROM https://github.com/django/django/blob /ba9ced3e9a643a05bc521f0a2e6d02e3569de374/django/db/backends/base/schema.py#L989 Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = "%s%s" % ( names_digest(table_name, *column_names, length=8), suffix, ) max_length = connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[: max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = "%s_%s_%s" % ( table_name[:other_length], "_".join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name
[ 775, 5063, 2026, 724, 156, 8688 ]
def METHOD_NAME(): # fgd_path = r"F:\SteamLibrary\steamapps\common\Black Mesa\bin\bms.fgd" # fgd_path = r"H:\SteamLibrary\SteamApps\common\Team Fortress 2\bin\base.fgd" # fgd_path = r"H:\SteamLibrary\SteamApps\common\Team Fortress 2\bin\tf.fgd" # fgd_path = r"D:\SteamLibrary\steamapps\common\Portal\bin\portal.fgd" # fgd_path = r"D:\SteamLibrary\steamapps\common\Counter-Strike Global Offensive\bin\csgo.fgd" # fgd_path = r"D:\SteamLibrary\steamapps\common\Portal 2\bin\portal2.fgd" # fgd_path = r"H:\SteamLibrary\SteamApps\common\Left 4 Dead 2\bin\left4dead2.fgd" # fgd_path = r"F:\SteamLibrary\steamapps\common\Half-Life 2\bin\halflife2.fgd" fgd_path = r"H:\SteamLibrary\SteamApps\common\SourceFilmmaker\game\bin\swarm.fgd" ContentManager().scan_for_content(fgd_path) fgd: Fgd = FgdParse(fgd_path) processed_classes = [] buffer = '' buffer += """
[ 57 ]
def METHOD_NAME(): # Initialise display lcd_byte(0x33,LCD_CMD) lcd_byte(0x32,LCD_CMD) lcd_byte(0x0C,LCD_CMD) lcd_byte(0x06,LCD_CMD) lcd_byte(0x28,LCD_CMD) lcd_byte(0x01,LCD_CMD) time.sleep(E_DELAY)
[ 7488, 176 ]
def METHOD_NAME(self) -> None: connection = MysqlConnection( username="name", hostPort="hostPort", ) get_connection_fn = import_connection_fn( connection=connection, function_name="get_connection" ) self.assertIsNotNone(get_connection_fn) self.assertRaises( DynamicImportException, import_connection_fn, connection=connection, function_name="random", )
[ 9, 512, 19, 550 ]
def METHOD_NAME(self): super().METHOD_NAME() self.users.update({ 'staff_blogpost_edit_own': create_user( username='staff_blogpost_edit_own', is_staff=True, user_permissions=('change_blogpost',), ), 'staff_blogpost_edit_all': create_user( username='staff_blogpost_edit_all', is_staff=True, user_permissions=('change_blogpost', 'edit_all_post'), ), 'org_member': create_user( username='org_member', ), }) self.users['staff_organization_admin'].profile.organizations.add(self.organizations['open']) self.users['org_member'].profile.organizations.add(self.organizations['open']) self.basic_blogpost = create_blogpost( title='basic', authors=('staff_blogpost_edit_own',), ) self.visible_blogpost = create_blogpost( title='visible', visible=True, ) self.visible_blogpost_in_org = create_blogpost( title='visible_org', visible=True, global_post=False, organization=self.organizations['open'], authors=('staff_organization_admin',), ) self.non_visible_blogpost_in_org = create_blogpost( title='non_visible_org', visible=False, global_post=False, organization=self.organizations['open'], authors=('staff_organization_admin',), )
[ 0, 1, 9, 365 ]
def METHOD_NAME(request): """ 检测 DAT 文件是否支持导入 body: data { "data_file(required)": "DAT格式模板数据文件" } return: 检测结果 { "can_override": "是否能够进行覆盖操作(bool)", "new_template": [ { "id": "能够新建的模板ID(integer)", "name": "能够新建的模板名(string)" } ], "override_template": [ { "id": "能够覆盖的模板ID(integer)", "name": "能够覆盖的模板名(string)", "template_id": "模板UUID(string)" } ] } """ return base_check_before_import(request, CommonTemplate, [])
[ 250, 1553, 512 ]
def METHOD_NAME(self, timeout=None): with Timeout(timeout): sleep(0.1) while self.motor_state == MotorStates.MOVING: sleep(0.1)
[ 618, 1798, 47, 132 ]
def METHOD_NAME(self): self.check_args('/NumberField/?signature=%5B0%2C3%5D&galois_group=S3', '6.0.177147.2') self.check_args('/NumberField/?signature=%5B3%2C0%5D&galois_group=S3', '3.3.229.1') self.check_args('/NumberField/?signature=[4%2C0]&galois_group=C2xC2&class_number=3%2C6','4.4.1311025.1') self.check_args('/NumberField/?signature=[4%2C0]&galois_group=C2xC2&class_number=6%2C3','4.4.1311025.1') self.check_args('/NumberField/?signature=[4%2C0]&galois_group=C2xC2&class_number=5-6%2C3','4.4.485809.1')
[ 9, 265, 4352 ]
METHOD_NAME(self, evt):
[ 69, 1462 ]
async def METHOD_NAME( by_address: bool, macos_use_bdaddr: bool, addresses: Iterable[str], uuids: Iterable[str], ): lock = asyncio.Lock() await asyncio.gather( *( connect_to_device(lock, by_address, macos_use_bdaddr, address, uuid) for address, uuid in zip(addresses, uuids) ) )
[ 57 ]
def METHOD_NAME(self): vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"] vocab_items = ["a", "b", "d", "e", "<UNK>"] self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys())) self.assertCountEqual(vocab_items, list(self.vocab))
[ 9, 3259, 84, 7526, 4833 ]
def METHOD_NAME(sr_tokenizer): text = "(Један, два, три, четири, проба)." tokens = sr_tokenizer(text) assert tokens[len(tokens) - 1].text == "."
[ 9, 2457, 1345, 3286, 4283, 688 ]
def METHOD_NAME(self, name): self._p("%s CHARREF" % name)
[ 276, 13872 ]
def METHOD_NAME(x): return x + 1
[ -1 ]
def METHOD_NAME(self, line): self.writer.Write(line+"\n") i = 0 while i < 40 and not self.proc.HasExited: Thread.CurrentThread.Join(100) i += 1 return self.proc.ExitCode
[ 750, 61, 538 ]
def METHOD_NAME(output_wave,input_wave,input_flux,required_resolution,input_ivar=None,order=3,max_resolution=None): """Performs spline fit of input_flux vs. input_wave and resamples at output_wave Args: output_wave : 1D array of output wavelength samples input_wave : 1D array of input wavelengths input_flux : 1D array of input flux density required_resolution (float) : resolution for spline knot placement (same unit as wavelength) Options: input_ivar : 1D array of weights for input_flux order (int) : spline order max_resolution (float) : if not None and first fit fails, try once this resolution Returns: output_flux : 1D array of flux sampled at output_wave """ if input_ivar is not None : selection=np.where(input_ivar>0)[0] if selection.size < 2 : log=get_logger() log.error("cannot do spline fit because only {0:d} values with ivar>0".format(selection.size)) raise ValueError w1=input_wave[selection[0]] w2=input_wave[selection[-1]] else : w1=input_wave[0] w2=input_wave[-1] res=required_resolution n=int((w2-w1)/res) res=(w2-w1)/(n+1) knots=w1+res*(0.5+np.arange(n)) ## check that nodes are close to pixels dknots = abs(knots[:,None]-input_wave) mins = np.amin(dknots,axis=1) w=mins<res knots = knots[w] try : toto=scipy.interpolate.splrep(input_wave,input_flux,w=input_ivar,k=order,task=-1,t=knots) output_flux = scipy.interpolate.splev(output_wave,toto) except ValueError as err : log=get_logger() if max_resolution is not None and required_resolution < max_resolution : log.warning("spline fit failed with resolution={}, retrying with {}".format(required_resolution,max_resolution)) return METHOD_NAME(output_wave,input_wave,input_flux,max_resolution,input_ivar=input_ivar,order=3,max_resolution=None) else : log.error("spline fit failed") raise ValueError return output_flux
[ 6990, 90 ]
def METHOD_NAME(waveform_extractor, peak_sign: str = "neg"): """ In some situations spike sorters could return a spike index with a small shift related to the waveform peak. This function estimates and return these alignment shifts for the mean template. This function is internally used by `compute_spike_amplitudes()` to accurately retrieve the spike amplitudes. Parameters ---------- waveform_extractor: WaveformExtractor The waveform extractor peak_sign: str Sign of the template to compute best channels ('neg', 'pos', 'both') Returns ------- shifts: dict Dictionary with unit ids as keys and shifts as values """ sorting = waveform_extractor.sorting unit_ids = sorting.unit_ids extremum_channels_ids = get_template_extremum_channel(waveform_extractor, peak_sign=peak_sign) shifts = {} templates = waveform_extractor.get_all_templates(mode="average") for unit_ind, unit_id in enumerate(unit_ids): template = templates[unit_ind, :, :] chan_id = extremum_channels_ids[unit_id] chan_ind = waveform_extractor.channel_ids_to_indices([chan_id])[0] if peak_sign == "both": peak_pos = np.argmax(np.abs(template[:, chan_ind])) elif peak_sign == "neg": peak_pos = np.argmin(template[:, chan_ind]) elif peak_sign == "pos": peak_pos = np.argmax(template[:, chan_ind]) shift = peak_pos - waveform_extractor.nbefore shifts[unit_id] = shift return shifts
[ 19, 671, -1, 307, 4626, 929 ]
def METHOD_NAME(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags")
[ 114 ]
def METHOD_NAME(self): return self.renderer.METHOD_NAME( self.vert_pos, self.vert_col, self.vert_rad, torch.cat([self.cam_pos, self.cam_rot, self.cam_sensor]), self.gamma, 45.0, )
[ 76 ]
def METHOD_NAME(self): self.cr.fit(self.X, self.y, **{'confounder': self.multiple_confounders}) X_transformed = self.cr.transform(self.X, **{'confounder': self.multiple_confounders}) np.testing.assert_array_almost_equal(X_transformed[0], self.X_transformed)
[ 9, 107, -1 ]
def METHOD_NAME( num_datapoints: int, num_dims: int, negative: bool, jit_compile: bool, key_val: int ): key = jr.PRNGKey(key_val) D = build_data(num_datapoints, num_dims, key, binary=True) # Build model p = Prior( kernel=gpx.RBF(active_dims=list(range(num_dims))), mean_function=gpx.Constant() ) likelihood = Bernoulli(num_datapoints=num_datapoints) post = p * likelihood mll = NonConjugateMLL(negative=negative) assert isinstance(mll, AbstractObjective) if jit_compile: mll = jax.jit(mll) evaluation = mll(post, D) assert isinstance(evaluation, jax.Array) assert evaluation.shape == () mll2 = LogPosteriorDensity(negative=negative) if jit_compile: mll2 = jax.jit(mll2) assert mll2(post, D) == evaluation
[ 9, 256, 6374, -1 ]
def METHOD_NAME(fn): stateName = "__poll_" + fn.__name__ + "__" return _Descriptor(fn, stateName)
[ 103 ]
def METHOD_NAME(self, gsparams=None, **kwargs): """Create a version of the current object with the given gsparams .. note:: Unless you set ``propagate_gsparams=False``, this method will also update the gsparams of the wrapped component object. """ if gsparams == self.gsparams: return self from copy import copy ret = copy(self) ret._gsparams = GSParams.check(gsparams, self.gsparams, **kwargs) if self._propagate_gsparams: ret._orig_obj = self._orig_obj.METHOD_NAME(ret._gsparams) return ret
[ 41, 3350, 434 ]
def METHOD_NAME(self, mock_get): mock_get().status_code = 429 self.assertRaises(guardian.APILimitError, self.api.search, 'Slovenia')
[ 9, 58, 1467, 168 ]
def METHOD_NAME(self, bs: bytes, sz: int) -> int: return { 1: lambda x: x[0], 2: self.unpack16, 4: self.unpack32, 8: self.unpack64, }.get(sz)(bs)
[ 4739, 789 ]
async def METHOD_NAME(self, sockets: Optional[List[socket.socket]] = None) -> None: """Overridden startup that also sends connection information""" await super().METHOD_NAME(sockets) if not self.started: return write_to_pipe(self.connection_info)
[ 4294 ]
def METHOD_NAME(filters): entries = get_stock_ledger_entries_for_batch_no(filters) entries += get_stock_ledger_entries_for_batch_bundle(filters) return entries
[ 19, 2006, 5224, 109 ]
def METHOD_NAME(chdb, category_name_id_and_page_ids): def insert(cursor, chunk): cursor.executemany(''' INSERT IGNORE INTO categories VALUES (%s, %s) ''', ((category_id, category_name) for category_name, category_id, _ in chunk)) cursor.executemany(''' INSERT INTO articles_categories VALUES (%s, %s) ''', ((pageid, catid) for _, catid, pageids in chunk for pageid in pageids)) database.populate_snippets_links(cursor, category_ids = (cid for (_, cid, _) in chunk)) for c in ichunk(category_name_id_and_page_ids, 4096): chdb.execute_with_retry(insert, list(c)) chdb.execute_with_retry_s(''' INSERT INTO category_article_count SELECT category_id, COUNT(*) AS article_count FROM articles_categories GROUP BY category_id''')
[ 86, -1, 1267 ]
def METHOD_NAME(init_context): connection = init_context.resource_config["connection"] return DatabaseConnection(connection)
[ 1267, 191 ]
async def METHOD_NAME(loop_services: List[LoopService]): """ Only one signal handler can be active at a time, so this function takes a list of loop services and runs all of them with a global signal handler. """ def stop_all_services(self, *_): for service in loop_services: service._stop() signal.signal(signal.SIGINT, stop_all_services) signal.signal(signal.SIGTERM, stop_all_services) await asyncio.gather(*[service.start() for service in loop_services])
[ 22, 107, 3186 ]
def METHOD_NAME(self, wallet_file): """Gets birthheight of a wallet on node0""" with open(os.path.join(self.nodes[0].datadir, wallet_file), 'r', encoding="utf8") as f: for line in f: if line.startswith('# * Best block at time of backup'): wallet_birthheight = int(line.split(' ')[9]) return wallet_birthheight
[ 19, -1 ]
def METHOD_NAME(self, family: int, daddr: Tuple[int, int]) -> str: """ _ip_addr_to_str returns a string representation of an IPv4 or IPv6 address. It caches results in an LRU cache to reduce cost of conversion Args: family: socket.AF_INET (v4) or socket.AF_INET6 (v6) daddr: For IPv4, uint32 representation of address as the first item in a tuple. For IPv6, 16-byte array representation of address. Returns: String representation of IP address, e.g., '127.0.0.1' """ if family == AF_INET: return inet_ntop(AF_INET, pack('I', daddr[0])) elif family == AF_INET6: # noinspection PyTypeChecker return inet_ntop(AF_INET6, self.Addr(*daddr)) else: raise Exception("No valid socket family given!")
[ 1213, 990, 24, 3 ]
def METHOD_NAME(self) -> Optional[str]: pass
[ -1, 156 ]
def METHOD_NAME(protocol, connection, config): class TDMConnection(connection): def on_spawn(self, pos): self.send_chat(self.explain_game_mode()) self.send_chat(self.protocol.get_kill_count()) return connection.on_spawn(self, pos) def on_flag_take(self): if REMOVE_INTEL.get(): return False return connection.on_flag_take(self) def on_flag_capture(self): result = connection.on_flag_capture(self) self.team.kills += INTEL_POINTS.get() self.protocol.check_end_game(self) return result def on_kill(self, killer, type, grenade): result = connection.on_kill(self, killer, type, grenade) self.protocol.check_end_game(killer) return result def explain_game_mode(self): msg = 'Team Deathmatch: Kill the opposing team.' if not REMOVE_INTEL.get(): msg += ' Intel is worth %s kills.' % INTEL_POINTS.get() return msg class TDMProtocol(protocol): game_mode = CTF_MODE def on_flag_spawn(self, x, y, z, flag, entity_id): if REMOVE_INTEL.get(): return HIDE_COORD return protocol.on_flag_spawn(self, x, y, z, flag, entity_id) def get_kill_count(self): green_kills = self.green_team.kills blue_kills = self.blue_team.kills diff = green_kills - blue_kills if green_kills > blue_kills: return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." % (self.green_team.name, green_kills, blue_kills, diff, KILL_LIMIT.get() - green_kills, KILL_LIMIT.get())) elif green_kills < blue_kills: return ("%s leads %s-%s (+%s, %s left). Playing to %s kills." % (self.blue_team.name, blue_kills, green_kills, -diff, KILL_LIMIT.get() - blue_kills, KILL_LIMIT.get())) else: return ("%s-%s, %s left. Playing to %s kills." % (green_kills, blue_kills, KILL_LIMIT.get() - green_kills, KILL_LIMIT.get())) # since its a team based game, we gonna split the caps # for all players in the team def do_captures(self, team, caps): while (team.score < caps): for player in team.get_players(): if team.score >= caps: break team.score += 1 intel_capture = IntelCapture() intel_capture.player_id = player.player_id intel_capture.winning = False self.broadcast_contained(intel_capture) def check_end_game(self, player): if SCORE_PERCENTAGE.get() and player: team = player.team caps_percent = math.floor( self.max_score*team.kills/KILL_LIMIT.get()) if caps_percent > team.score: self.do_captures(team, caps_percent) if self.green_team.kills >= KILL_LIMIT.get(): self.broadcast_chat("%s Team Wins, %s - %s" % (self.green_team.name, self.green_team.kills, self.blue_team.kills)) self.reset_game(player) protocol.on_game_end(self) elif self.blue_team.kills >= KILL_LIMIT.get(): self.broadcast_chat("%s Team Wins, %s - %s" % (self.blue_team.name, self.blue_team.kills, self.green_team.kills)) self.reset_game(player) protocol.on_game_end(self) return TDMProtocol, TDMConnection
[ 231, 782 ]
def METHOD_NAME(dataset, cat_list=None): def _has_valid_annotation(anno): # if it's empty, there is no annotation if len(anno) == 0: return False # if more than 1k pixels occupied in the image return sum(obj["area"] for obj in anno) > 1000 ids = [] for ds_idx, img_id in enumerate(dataset.ids): ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None) anno = dataset.coco.loadAnns(ann_ids) if cat_list: anno = [obj for obj in anno if obj["category_id"] in cat_list] if _has_valid_annotation(anno): ids.append(ds_idx) dataset = torch.utils.data.Subset(dataset, ids) return dataset
[ 777, 188, 3669, 529, 5360 ]
def METHOD_NAME(self, page: int) -> str: result = self._getSubpageItem(page, WhatsNewPagesModel.image_key) return "file:///" + (result if result else Resources.getPath(Resources.Images, "cura-icon.png"))
[ 19, 10916, 660, 1458 ]
def METHOD_NAME(self): finalVector = Vector(3, 3) finalVector -= 2 self.assertEqual(finalVector.x, 1) self.assertEqual(finalVector.y, 1)
[ 9, 1066, 5920, 1997 ]
def METHOD_NAME(jsonData, filename, sortKeys=True): logger.info('Writing Json File : {0!s:s}'.format(filename)) dir = os.path.dirname(filename) if len(dir) > 0 and not os.path.exists(dir): os.makedirs(dir) with closing(open(filename, 'w')) as outfile: json.dump(jsonData, outfile, ensure_ascii=True, sort_keys=sortKeys, indent=2, separators=(',', ': ')) logger.debug(jsonData) return
[ 77, 763, 171 ]
def METHOD_NAME( self, audio: Union[str, torch.Tensor], tgt_lang: Optional[str] = None, synthesize_speech: bool = False, ) -> Union[str, Tuple[str, Tuple[torch.Tensor, int]]]: # `audio` is either a file path or a 1xT Tensor # return either text or (text, synthetic speech) sample = self.get_model_input(self.task, audio) return self.get_prediction( self.task, self.model, self.generator, sample, tgt_lang=tgt_lang, synthesize_speech=synthesize_speech, )
[ 2103 ]
def METHOD_NAME(field, value): return False
[ 89, 353, 664, 471 ]
def METHOD_NAME(self): return f"{self.dlc_account_id}.dkr.ecr.{self.dlc_region}.amazonaws.com/{self.dlc_repository}:{self.dlc_tag}"
[ 660 ]
def METHOD_NAME(self): self.enabled = False
[ 193 ]
METHOD_NAME(self, op, print_ctx, page_num, pixbuf):
[ 74, 1100, 1174 ]
def METHOD_NAME(home_team): game = get_game_setup(home_team) team = game.state.home_team if home_team else game.state.away_team proc = game.get_procedure() assert type(proc) == Setup # Top Wing place_player(game, team, 10, 1) assert not game.is_setup_legal(team) assert game.is_setup_legal_wings(team) place_player(game, team, 10, 2) assert not game.is_setup_legal(team) assert game.is_setup_legal_wings(team) place_player(game, team, 10, 3) assert not game.is_setup_legal(team) assert not game.is_setup_legal_wings(team) clear_board(game) # Bottom Wing place_player(game, team, 10, 12) assert not game.is_setup_legal(team) assert game.is_setup_legal_wings(team) place_player(game, team, 10, 13) assert not game.is_setup_legal(team) assert game.is_setup_legal_wings(team) place_player(game, team, 10, 14) assert not game.is_setup_legal(team) assert not game.is_setup_legal_wings(team) clear_board(game)
[ 9, 8952, 102 ]
def METHOD_NAME(self): for value in self.next(): yield value
[ 3368, 370 ]
def METHOD_NAME(address, key): """ Pair the bluetooth adapter with a device CLI Example: .. code-block:: bash salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234 Where DE:AD:BE:EF:CA:FE is the address of the device to pair with, and 1234 is the passphrase. TODO: This function is currently broken, as the bluez-simple-agent program no longer ships with BlueZ >= 5.0. It needs to be refactored. """ if not salt.utils.validate.net.mac(address): raise CommandExecutionError("Invalid BD address passed to bluetooth.pair") try: int(key) except Exception: # pylint: disable=broad-except raise CommandExecutionError( "bluetooth.pair requires a numerical key to be used" ) addy = address_() cmd = "echo {} | bluez-simple-agent {} {}".format( shlex.quote(addy["device"]), shlex.quote(address), shlex.quote(key) ) out = __salt__["cmd.run"](cmd, python_shell=True).splitlines() return out
[ 637 ]
def METHOD_NAME(port): return (port & NVE_MASK) != 0
[ 137, 653 ]
def METHOD_NAME(self) -> None: d = DottedDict() d.assign({ "a": "b", "c": { "x": "a", "y": "b" }, "d": { "e": { "f": { "a": "b", "c": "d" } } } }) self.verify(d, "a", "b") self.verify(d, "c.x", "a") self.verify(d, "c.y", "b") self.verify(d, "d.e.f.a", "b") self.verify(d, "d.e.f.c", "d") self.verify(d, "d.e.f", {"a": "b", "c": "d"}) self.verify(d, "d.e", {"f": {"a": "b", "c": "d"}}) self.verify(d, "d", {"e": {"f": {"a": "b", "c": "d"}}})
[ 9, 1283 ]
def METHOD_NAME(commands: dict, unimplemented: dict): gh = GithubData() for group in unimplemented: if group in IGNORE_GROUPS: continue print(f'### Creating issues for {group} commands') for cmd in unimplemented[group]: if cmd.upper() in IGNORE_COMMANDS: continue summary = commands[cmd]['summary'] gh.create_issue(group, cmd, summary)
[ 38, 1000, 2458 ]
def METHOD_NAME( pex_project_dir, # type: str shared_integration_test_tmpdir, # type: str ): # type: (...) -> str pex_bdist_chroot = os.path.join(shared_integration_test_tmpdir, "pex_bdist_chroot") wheels_dir = os.path.join(pex_bdist_chroot, "wheels_dir") with atomic_directory(pex_bdist_chroot) as chroot: if not chroot.is_finalized(): pex_pex = os.path.join(chroot.work_dir, "pex.pex") run_pex_command( args=[pex_project_dir, "-o", pex_pex, "--include-tools"] ).assert_success() extract_dir = os.path.join(chroot.work_dir, "wheels_dir") subprocess.check_call( args=[pex_pex, "repository", "extract", "-f", extract_dir], env=make_env(PEX_TOOLS=True), ) wheels = glob.glob(os.path.join(wheels_dir, "pex-*.whl")) assert 1 == len(wheels) return wheels[0]
[ 12165, 6711 ]
def METHOD_NAME(METHOD_NAME: imgui.GUI): global active_word_list if show_help: METHOD_NAME.text("Homophone help - todo") else: METHOD_NAME.text("Select a homophone") METHOD_NAME.line() index = 1 for word in active_word_list: if METHOD_NAME.button(f"Choose {index}: {word}"): actions.insert(actions.user.homophones_select(index)) actions.user.homophones_hide() index = index + 1 if METHOD_NAME.button("Phones hide"): actions.user.homophones_hide()
[ 2139 ]
def METHOD_NAME(args, model, device, train_loader, optimizer, epoch): model.METHOD_NAME() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0 and args.rank == 0: print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch, batch_idx * len(data) * args.world_size, len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), ) ) if args.verbose: print("Batch", batch_idx, "from rank", args.rank)
[ 849 ]
def METHOD_NAME(self): self.cpp_info.names["cmake_find_package"] = "LibRHash" self.cpp_info.names["cmake_find_package_multi"] = "LibRHash" self.cpp_info.names["pkg_config"] = "librhash" self.cpp_info.libs = ["rhash"]
[ 360, 100 ]
def METHOD_NAME(item): if item.isTracked(): return 'tracked' else: return 'untracked'
[ -1 ]
def METHOD_NAME(self, onboarding_agent: "OnboardingAgent"): """No cleanup required yet for ending onboarding in mocks""" pass
[ 950, 4809 ]
def METHOD_NAME(node_data): children_time = 0 for entry in node_data.values(): if type(entry) == dict: children_time += totalTime(entry) return children_time
[ 2189, 104 ]
def METHOD_NAME(spike_test_pair): data = spike_test_pair spike_indexes = np.array([725, 3382]) peak_indexes = np.array([812, 3478]) t = data[:, 0] v = data[:, 1] clipped = spkd.find_clipped_spikes(v, t, spike_indexes, peak_indexes, end_index=3550, tol=1) print((clipped, np.array([False, True]))) print((clipped.dtype, np.array([False, True]).dtype)) assert np.array_equal(clipped, [False, True]) # last spike is clipped clipped = spkd.find_clipped_spikes(v, t, spike_indexes, peak_indexes, end_index=3600, tol=1) print((clipped, np.array([False, False]))) assert np.array_equal(clipped, [False, False]) # last spike is Ok spike_indexes = np.array([]) peak_indexes = np.array([]) t = data[1500:3000, 0] v = data[1500:3000, 1] clipped = spkd.find_clipped_spikes(v, t, spike_indexes, peak_indexes, end_index=3600, tol=1) assert np.array_equal(clipped, []) # no spikes in the trace
[ 9, 416, 5625, 2238 ]
def METHOD_NAME(self, orth): """ :param str orth: """ orth_words = parse_orthography(orth, prefix=[], postfix=[], word_based=True) self.seq_count += 1 if self.options.dump_orth: print("Orth:", orth_words, file=log.v3) self.words.update(orth_words) self.total_word_len += len(orth_words) # Show some progress if it takes long. if time.time() - self.process_last_time > 2: self.process_last_time = time.time() print("Collect process, total word len so far:", human_size(self.total_word_len), file=log.v3)
[ 1076 ]
def METHOD_NAME(self) -> None: pass
[ 950 ]
def METHOD_NAME(self): self.writer.start('test', {'name': u'\xA7', u'\xE4': u'\xA7'}) self._verify(u'<test name="\xA7" \xE4="\xA7">\n')
[ 9, 447, 41, 256, 4428, 177 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(): test_files = [('_testing/test_list2/file_%d' % i, b'testing ... \n') for i in range(10)] clusterIO.put_files(test_files, 'TES1') time.sleep(2) listing = clusterIO.listdirectory('_testing/test_list2/', 'TES1',timeout=.00001) listing = clusterIO.listdirectory('_testing/test_list2/', 'TES1', timeout=5) assert (len(listing) == 10)
[ 9, 245, 1887, 659 ]
def METHOD_NAME(self, sid): sign, timestamp = self._sign_payload([sid]) params = { "client_id": self.client_id, "sid": sid, "timestamp": timestamp, "sign": sign, } # For unregister error in SPARCSSSO try: return self._post_data(self.URLS["unregister"], params)["success"] except RuntimeError: return True
[ 2468 ]
def METHOD_NAME(): hoomd.conftest.logging_check(hoomd.md.Integrator, ("md",), { "linear_momentum": { "category": hoomd.logging.LoggerCategories.sequence } })
[ 9, 663 ]
def METHOD_NAME(self, queries_data, topk, logger=None): chunked_queries = utils.chunk_it(queries_data, self.num_threads) for idx, arg in enumerate(self.arguments): arg["queries_data"] = chunked_queries[idx] arg["topk"] = topk arg["logger"] = logger
[ 6115, 365 ]
def METHOD_NAME(self): # Checks if node is in PATH, errors if it isn't try: run(["node", "-v"], stdout=DEVNULL, check=True) except (CalledProcessError, FileNotFoundError, PermissionError) as err: raise RuntimeError( "Couldn't execute node. Please ensure you have node.js installed and in PATH. " "See https://nodejs.org/ for instructions. " f"Original error is {err}" ) rfbrowser_dir = Path(__file__).parent installation_dir = rfbrowser_dir / "wrapper" # This second application of .parent is necessary to find out that a developer setup has node_modules correctly project_folder = rfbrowser_dir.parent subfolders = os.listdir(project_folder) + os.listdir(installation_dir) if "node_modules" in subfolders: return raise RuntimeError( f"Could not find node dependencies in installation directory `{installation_dir}.` " "Run `rfbrowser init` to install the dependencies." )
[ 602, 1716, 2410 ]