text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): if not self.ip: self.initialize_geo() self.mp.people_set( self.distinct_user_id, { "$name": "", "arch": platform.machine(), "$os": platform.system(), "os-full": platform.platform(aliased=True), }, )
[ 15, 21, 748 ]
def METHOD_NAME(sig_num, frame): """ kill all child processes """ pid = os.getpid() pgid = os.getpgid(os.getpid()) logger.info("main proc {} exit, kill process group " "{}".format(pid, pgid)) os.killpg(pgid, signal.SIGKILL) return
[ 3108, 7820 ]
def METHOD_NAME(self): """Entry point for running application. """ args = self._parse_command_line() self.get_fault_impulses(args.filename_fault) self.get_station_responses(args.filename_responses) self.get_station_observed(args.filename_observed) self.penalties = list(map(float, args.penalties.split(","))) results = self.invert() self.write_results(self.filename_output, results)
[ 57 ]
def METHOD_NAME(request): result = wikidata.send_wikidata_query(request.replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)) if result is not None: for r in result['results']['bindings']: yield r
[ 14273, 377, 1571, 640 ]
def METHOD_NAME() -> str: """ The mandatory Cobbler module registration hook. """ return "manage"
[ 372 ]
def METHOD_NAME(self) -> str: """ State of the local gateway. """ return pulumi.get(self, "state")
[ 551 ]
def METHOD_NAME(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters
[ 572, 386 ]
f METHOD_NAME(self, state):
[ 8895, 12759 ]
def METHOD_NAME(): container = self._GetContainerInstance()['containers'][0] state = container['instanceView']['currentState']['state'] if state != 'Terminated': raise container_service.RetriableContainerException( f'Container in ({state}). Not yet in expected state Terminated.') return container
[ 618, 43, 538 ]
def METHOD_NAME(self, release_filters: list["Filter"]) -> bool: """ Filter releases and removes releases that fail the filters """ releases = list(self.releases.keys()) for version in releases: release_data = { "version": version, "releases": self.releases, "info": self.info, } if not all(plugin.filter(release_data) for plugin in release_filters): del self.releases[version] if releases: return True return False
[ 527, 75, 205 ]
def METHOD_NAME(app): # pylint: disable=redefined-outer-name, invalid-name """Return a session-wide initialised database. Drops all existing tables - Meta follows Postgres FKs """ with app.app_context(): drop_schema_sql = """DROP SCHEMA public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO postgres; GRANT ALL ON SCHEMA public TO public; """ sess = _db.session() sess.execute(drop_schema_sql) sess.commit() # ############################################ # There are 2 approaches, an empty database, or the same one that the app will use # create the tables # _db.create_all() # or # Use Alembic to load all of the DB revisions including supporting lookup data # This is the path we'll use in legal_api!! # even though this isn't referenced directly, it sets up the internal configs that upgrade import sys auth_api_folder = [folder for folder in sys.path if 'auth-api' in folder][0] migration_path = auth_api_folder.replace('/auth-api/src', '/auth-api/migrations') Migrate(app, _db, directory=migration_path) upgrade() return _db
[ 1267 ]
def METHOD_NAME(self) -> Optional['outputs.TimelineResultsMetadataResponse']: """ The metadata from the timeline operation results. """ return pulumi.get(self, "meta_data")
[ 1094, 365 ]
def METHOD_NAME(self): return { 'url': self.config.base_uri, }
[ 19, 549, 773 ]
def METHOD_NAME(url): for sleep_time in [10, 30, 0]: reply = requests.METHOD_NAME(url, auth=auth) api_limit = ( "message" in reply.json() and "API rate limit exceeded" in reply.json()["message"] ) if not api_limit: break print("API rate limit exceeded, waiting..") time.sleep(sleep_time) reply.raise_for_status() return reply
[ 19 ]
def METHOD_NAME(self) -> str: """ The name of the ARM resource. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(builder, authmethod): return WelcomeAddAuthmethod(builder, authmethod)
[ 238, 14733 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.fabric_name = AAZStrArg( options=["-n", "--name", "--fabric-name"], help="ASR fabric to purge.", required=True, id_part="child_name_1", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.vault_name = AAZStrArg( options=["--vault-name"], help="The name of the recovery services vault.", required=True, id_part="name", ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(self): return [ self.OutputItem( name=_("响应内容"), key="data", type="object", schema=ObjectItemSchema(description=_("HTTP 请求响应内容,内部结构不固定"), property_schemas={}), ), self.OutputItem( name=_("状态码"), key="status_code", type="int", schema=IntItemSchema(description=_("HTTP 请求响应状态码")) ), ]
[ 141, 275 ]
def METHOD_NAME(self, oToi): if self.value == 'ignore': return [] lTokens = oToi.get_tokens() iLine = oToi.get_line_number() lReturn = [] for iToken, oToken in enumerate(lTokens): if rules_utils.token_exists_in_token_type_list(oToken, self.analysis_options): if rules_utils.token_at_beginning_of_line_in_token_list(iToken, lTokens) and self.value == 'no': sSolution = 'jcl-fix this' iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken]) iStartIndex = utils.find_previous_non_whitespace_token(iToken - 1, lTokens) + 1 oMyToi = oToi.extract_tokens(iStartIndex, iToken - 1) oViolation = violation.New(iViolation_line_number, oMyToi, 'jcl-fix this') dAction = {} dAction['action'] = 'remove_new_line' oViolation.set_action(dAction) lReturn.append(oViolation) elif not rules_utils.token_at_beginning_of_line_in_token_list(iToken, lTokens) and self.value == 'yes': sSolution = 'jcl-fix this' iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken]) iStartIndex = utils.find_previous_non_whitespace_token(iToken - 1, lTokens) + 1 oMyToi = oToi.extract_tokens(iStartIndex, iToken - 1) oViolation = violation.New(iViolation_line_number, oMyToi, 'jcl-fix this') dAction = {} dAction['action'] = 'add_new_line' oViolation.set_action(dAction) lReturn.append(oViolation) return lReturn
[ 250, 43, 15824, 1413, 1553, 466 ]
def METHOD_NAME(self): self._set_status(_("Loading Podcasts...")) if not os.path.exists(self.podcast_file): self._set_status(_("No configuration present yet")) return try: with open(self.podcast_file) as fp: lines = (line.strip() for line in fp.readlines()) self.podcasts = [] for line in lines: (url, title) = line.split('\t') self.podcasts.append((title, url)) except (IOError, OSError): logger.warning('Could not open podcast file') self._set_status('') return self._done_loading_podcasts()
[ 557, -1 ]
def METHOD_NAME(self): with self.app.app_context(): # check that the sources are otherwise unchanged sources = db.engine.execute(text("SELECT * FROM sources")).fetchall() assert len(sources) == len(self.original_sources) for source in sources: assert hasattr(source, "flagged") original_source = self.original_sources[source.uuid] assert source.id == original_source.id assert source.journalist_designation == original_source.journalist_designation assert source.last_updated == original_source.last_updated assert source.pending == original_source.pending assert source.interaction_count == original_source.interaction_count assert not hasattr(original_source, "flagged") assert source.flagged is None source_submissions = db.engine.execute( text("SELECT * FROM submissions WHERE source_id = :source_id"), source_id=source.id, ).fetchall() assert source_submissions == self.source_submissions[source.id]
[ 250, 1502 ]
def METHOD_NAME(self) -> None: model = BasicModel_MultiLayer() add_input = torch.tensor([[-1.0, 2.0, 2.0]]) input = torch.tensor([[1.0, 6.0, -3.0]]) labels = torch.tensor([0]) mask = torch.tensor([[0, 0, 1]]) loss_func = CrossEntropyLoss(reduction="none") adv = FGSM(model, loss_func) perturbed_input = adv.perturb( input, 0.2, labels, additional_forward_args=(add_input,), mask=mask ) assertTensorAlmostEqual( self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max" )
[ 9, 2402, 2062, 1572, 973 ]
def METHOD_NAME(self) -> None: net = BasicModel_MultiLayer_MultiInput() inp1 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 80.0, 0.0]]) inp2 = torch.tensor([[0.0, 6.0, 14.0], [0.0, 20.0, 0.0]]) inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 20.0, 0.0]]) self._ig_input_test_assert( net, net.model.relu, (inp1, inp2), lambda x: torch.sum(x), ( [[0.0, 10.5, 24.5], [0.0, 160.0, 0.0]], [[0.0, 10.5, 24.5], [0.0, 40.0, 0.0]], ), (inp3, 0.5), )
[ 9, 53, 4305, 457, 362, 3680, 2277 ]
def METHOD_NAME(load_inpxml, simple_xpath, filters, expected): """ Test the xpathbuilder with a variety of different Xpath expressions and filters """ from masci_tools.util.xml.xpathbuilder import XPathBuilder from masci_tools.util.xml.common_functions import eval_xpath xmltree, _ = load_inpxml(TEST_INPXML_PATH, absolute=False) xpath = XPathBuilder(simple_xpath, filters=filters) print(f'Complex XPath: {str(xpath)}') res = eval_xpath(xmltree, xpath) assert res == expected assert str(xpath) != simple_xpath # make sure the path is not the same as the original
[ 9, 2720, 41, 1171 ]
def METHOD_NAME(group, url=url): print(f'🛰 Getting datasets for {group}') u = url + f'?observatoryGroup={group}' res = requests.get(u, headers=_CDAS_HEADERS) datasets = res.json()['DatasetDescription'] dataset_ids = {ds['Id']: ds['Label'] for ds in datasets} all_datasets.update(dataset_ids)
[ 1047, 13506, 126 ]
def METHOD_NAME(): pass
[ 921, 441 ]
def METHOD_NAME(access_levels: tuple[str, ...], dataset_key: str = "dataset_name", trait_key: str = "name") -> Callable: def __build_access_checker__(func: Callable): @wraps(func) def __checker__(*args, **kwargs): def __error__(err): error = process_error(err) raise AuthorisationError( f"{error['error']}: {error['error_description']}", session_info()["user"]) def __success__(priv_info): if all(priv in priv_info[0]["privileges"] for priv in access_levels): return func(*args, **kwargs) missing = tuple(f"'{priv}'" for priv in access_levels if priv not in priv_info[0]["privileges"]) raise AuthorisationError( f"Missing privileges: {', '.join(missing)}", session_info()["user"]) dataset_name = kwargs.get( dataset_key, request.args.get(dataset_key, request.form.get(dataset_key, ""))) if not bool(dataset_name): raise AuthorisationError( "DeveloperError: Dataset name not provided. It is needed " "for the authorisation checks.", session_info()["user"]) trait_name = kwargs.get( trait_key, request.args.get(trait_key, request.form.get(trait_key, ""))) if not bool(trait_name): raise AuthorisationError( "DeveloperError: Trait name not provided. It is needed for " "the authorisation checks.", session_info()["user"]) return client.post( "oauth2/data/authorisation", json={"traits": [f"{dataset_name}::{trait_name}"]}).either( __error__, __success__) return __checker__ return __build_access_checker__
[ 984, 1089 ]
def METHOD_NAME(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(grid): global _groups, _grid _grid = grid assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism" if not can_send_recv(): _groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups]
[ 176, 356, 861 ]
def METHOD_NAME(folder_path): """ Provides a list of tuples of zip_file_path, real_file_path """ file_paths = [] # the _ is a list of directories discovered in the walk for root, _, filenames in os.walk(folder_path): try: operational_root = root.split("/", 1)[1] except IndexError: operational_root = "" for filename in filenames: zip_path = path_join(operational_root, filename) file_path = path_join(root, filename) file_paths.append((zip_path, file_path)) return file_paths
[ 19, 171, 3336, 43, -1 ]
def METHOD_NAME(part): return (part != b'' and part != b'\r\n' and part[:4] != b'--\r\n' and part != b'--')
[ 9, 995 ]
def METHOD_NAME(self): gray_data = empty(shape=(5, 4)) gray_data[:] = array([0.0, 0.25, 0.5, 0.75, 1.0]).reshape(5, 1) self.colormap = DiscreteColorMapper.from_palette_array(gray_data) a = array([0, 2, 3]) b = self.colormap.map_uint8(a) self.assertEqual(b.shape, (3, 4)) self.assertEqual(b.dtype, uint8) for i in range(4): assert_array_almost_equal(b[:, i], array([0, 128, 192]))
[ 9, 1139, 7985, 422, 5493 ]
def METHOD_NAME(): global FORCE #remove default drivers to avoid modprobe order conflicts status, output = exec_cmd("rmmod i2c_ismt ", 1) status, output = exec_cmd("rmmod i2c-i801 ", 1) status, output = exec_cmd("rmmod gpio_ich ", 1) status, output = exec_cmd("rmmod lpc_ich ", 1) #insert extra module status, output = exec_cmd("insmod /lib/modules/4.9.0-11-2-amd64/kernel/drivers/gpio/gpio-ich.ko gpiobase=0",1) #install drivers for i in range(0,len(drivers)): status, output = exec_cmd("modprobe "+drivers[i], 1) if status: print output if FORCE == 0: return status #instantiate devices for i in range(0,len(instantiate)): #time.sleep(1) status, output = exec_cmd(instantiate[i], 1) if status: print output if FORCE == 0: return status
[ 112, 428 ]
def METHOD_NAME(dev_project: str, nom_tag: str) -> RollbackStep: """Syncs the target release artifacts to the live folder. By convention the gs://{dev_project}-deploy/live folder should contain the artifacts from the currently serving release. For Domain Registry team members, this step updates the nomulus tool installed on corp desktops. """ artifacts_folder = f'gs://{dev_project}-deploy/{nom_tag}' live_folder = f'gs://{dev_project}-deploy/live' return RollbackStep( f'Syncing {artifacts_folder} to {live_folder}.', ('gsutil', '-m', 'rsync', '-d', artifacts_folder, live_folder))
[ 164, 1824, 586 ]
def METHOD_NAME(request, integration_name, endpoint=None): """Communicate with an integrated service using POST""" integration = get_object_or_404(Integration, name=integration_name) if integration is None: return HttpResponseNotFound(f'<h1>404 - Integration configuration not found.</h1>') communication = set_integration(integration_name) if communication is None: return HttpResponseNotFound(f'<h1>404 - Integration not found.</h1>') identified = communication.identify() data = communication.post_response(endpoint) # add results into Endpoint model ep, created = Endpoint.objects.update_or_create( integration=integration, endpoint_path=endpoint, data=data ) return HttpResponse(f"Attempting to communicate using POST with '{integration}' integration: {identified}. endpoint: {endpoint}. <br> Returned data: {data}")
[ 1911, 841, 72 ]
def METHOD_NAME(self, group_pattern: str) -> tuple[str, dict[str, str]]: ...
[ 1303 ]
def METHOD_NAME(self, tx_hash, wallet): tx = self.wallets[wallet]["wallet"].transactions.get(tx_hash) if tx is None: raise Exception("No such blockchain transaction") delta = self.wallets[wallet]["wallet"].get_wallet_delta(tx) return format_satoshis(delta.fee)
[ 19, 1304, 2364 ]
def METHOD_NAME(self, x: paddle.Tensor, t: paddle.Tensor, c: paddle.Tensor): """Denoise mel-spectrogram. Args: x(Tensor): Shape (B, C_in, T), The input mel-spectrogram. t(Tensor): Shape (B), The timestep input. c(Tensor): Shape (B, C_aux, T'). The auxiliary input (e.g. fastspeech2 encoder output). Returns: Tensor: Shape (B, C_out, T), the pred noise. """ assert c.shape[-1] == x.shape[-1] if t.shape[0] != x.shape[0]: t = t.tile([x.shape[0]]) t_emb = self.first_t_emb(t) t_embs = [ t_emb_layer(t_emb)[..., None] for t_emb_layer in self.t_emb_layers ] x = self.first_conv(x) x = self.first_act(x) skips = 0 for f, t in zip(self.conv_layers, t_embs): x = x + t x, s = f(x, c) skips += s skips *= math.sqrt(1.0 / len(self.conv_layers)) x = self.last_conv_layers(skips) return x
[ 76 ]
def METHOD_NAME(options): with pytest.raises(IOError): options.add_extension(path.join(path.abspath(path.curdir), "fakepath"))
[ 9, 45, 442, 217, 2916, 870, 130 ]
def METHOD_NAME(self): return { "id": self.id, "websocketId": self.websocketId, "userId": self.userId, "token": self.token, "servername": self.servername, }
[ 24, 553 ]
def METHOD_NAME(): return Labels.load_file( TEST_LEGACY_GRID_LABELS, video_search=TEST_LEGACY_GRID_LABELS )
[ 3116, 753, 415 ]
def METHOD_NAME(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model
[ 784, 1559 ]
def METHOD_NAME(self, model): m = model # Create the state block and pull in the default metadata m.fs.stream = m.fs.properties.build_state_block([0]) metadata = m.fs.properties.get_metadata().properties # Check that constraints are unscaled for p in metadata.list_supported_properties(): var_str = p.name if p.method is not None and m.fs.stream[0].is_property_constructed(var_str): if hasattr(self, "eq_" + var_str): con = getattr(self, "eq_" + var_str) unscaled_constraint_list = list(unscaled_constraints_generator(con)) assert len(unscaled_constraint_list) == 1 # Scale constraints # #TODO: need to replace with transform_property_constraints for p in metadata.list_supported_properties(): var_str = p.name if p.method is not None and m.fs.stream[0].is_property_constructed(var_str): var = getattr(self, var_str) if not isinstance(var, pyo.Var): continue # properties that are not vars do not have constraints # adding a conditional to check if a constraint exists for property; in the case when we only add and object reference, there would not be a constraint if hasattr(self, "eq_" + var_str): con = getattr(self, "eq_" + var_str) for ind, c in con.items(): sf = iscale.get_scaling_factor( var[ind], default=1, warning=True ) iscale.constraint_scaling_transform(c, sf) # Check that constraints are scaled for p in metadata.list_supported_properties(): var_str = p.name if p.method is not None and m.fs.stream[0].is_property_constructed(var_str): if hasattr(self, "eq_" + var_str): con = getattr(self, "eq_" + var_str) unscaled_constraint_list = list(unscaled_constraints_generator(con)) assert len(unscaled_constraint_list) == 0
[ 9, 1126, 500 ]
def METHOD_NAME(self): for a in range(-50, 50): for m in range(-50, 50): with self.subTest(a=a, m=m): if m != 0 and math.gcd(a, m) == 1: # Exponent -1 should give an inverse, with the # same sign as m. inv = pow(a, -1, m) self.assertEqual(inv, inv % m) self.assertEqual((inv * a - 1) % m, 0) # Larger exponents self.assertEqual(pow(a, -2, m), pow(inv, 2, m)) self.assertEqual(pow(a, -3, m), pow(inv, 3, m)) self.assertEqual(pow(a, -1001, m), pow(inv, 1001, m)) else: with self.assertRaises(ValueError): pow(a, -1, m) with self.assertRaises(ValueError): pow(a, -2, m) with self.assertRaises(ValueError): pow(a, -1001, m)
[ 9, 2927, 3430 ]
def METHOD_NAME(query: str, agent: Agent, num_results: int = 8) -> str | list[str]: """Return the results of a Google search using the official Google API Args: query (str): The search query. num_results (int): The number of results to return. Returns: str: The results of the search. """ from googleapiclient.discovery import build from googleapiclient.errors import HttpError try: # Get the Google API key and Custom Search Engine ID from the config file api_key = agent.config.google_api_key custom_search_engine_id = agent.config.google_custom_search_engine_id # Initialize the Custom Search API service service = build("customsearch", "v1", developerKey=api_key) # Send the search query and retrieve the results result = ( service.cse() .list(q=query, cx=custom_search_engine_id, num=num_results) .execute() ) # Extract the search result items from the response search_results = result.get("items", []) # Create a list of only the URLs from the search results search_results_links = [item["link"] for item in search_results] except HttpError as e: # Handle errors in the API call error_details = json.loads(e.content.decode()) # Check if the error is related to an invalid or missing API key if error_details.get("error", {}).get( "code" ) == 403 and "invalid API key" in error_details.get("error", {}).get( "message", "" ): raise ConfigurationError( "The provided Google API key is invalid or missing." ) raise # google_result can be a list or a string depending on the search results # Return the list of search result URLs return safe_google_results(search_results_links)
[ 3399 ]
f METHOD_NAME(self):
[ 19, 181, 2253, 623, 13292 ]
def METHOD_NAME(chi): c = tc.fgs.FGSSimulator(N, [0]) for i in range(N - 1): c.evol_hp(i, i + 1, chi[i]) cm = c.get_cmatrix() return -tc.backend.real(1 - cm[N, N])
[ 474 ]
def METHOD_NAME(self): if self.xpoints and self.ypoints is not None: self.report_items(( ("Number of points in the X direction", int(self.xpoints)), ("Number of points in the Y direction", int(self.ypoints)) )) else: return
[ 353, 339 ]
def METHOD_NAME(self, text: str) -> None: """Enter the specified text using multi-press on the numeric keypad. :param str text: The text to enter. The case doesn't matter (uppercase and lowercase are treated the same). """ from stbt_core import debug, press text = text.lower() # Raise exception early, so we don't enter half the text for c in text: if c not in self.keys: raise ValueError("Don't know how to enter %r" % (c,)) debug("MultiPress.enter_text: %r" % (text,)) prev_key = None for c in text: key, n = self.keys[c] if prev_key == key: time.sleep(self.interletter_delay_secs) for _ in range(n): press(key, interpress_delay_secs=self.interpress_delay_secs) prev_key = key
[ 7576, 526 ]
def METHOD_NAME(self, bar): """add color as pango markup to a bar""" if bar in ["▁", "▂"]: color = self.theme.color("green", "green") elif bar in ["▃", "▄"]: color = self.theme.color("yellow", "yellow") elif bar in ["▅", "▆"]: color = self.theme.color("orange", "orange") elif bar in ["▇", "█"]: color = self.theme.color("red", "red") colored_bar = '<span foreground="{}">{}</span>'.format(color, bar) return colored_bar
[ 238, 36 ]
def METHOD_NAME(self): """ Test function of _get_num_available_resources_by_type """ worker = GeonodeLegacyHarvester(remote_url=self.remote_url, harvester_id=1) self.assertEqual(worker._get_num_available_resources_by_type(), test_resources) self.assertEqual( worker._get_total_records(GeoNodeResourceType.DATASET), test_resources[GeoNodeResourceType.DATASET] ) self.assertEqual( worker._get_total_records(GeoNodeResourceType.DOCUMENT), test_resources[GeoNodeResourceType.DOCUMENT] ) self.assertEqual(worker._get_total_records(GeoNodeResourceType.MAP), test_resources[GeoNodeResourceType.MAP])
[ 9, 19, 181, 1272, 1614, 604, 44 ]
def METHOD_NAME(self, cpu_offload: CPUOffload): self._test_fsdp_parity( NestedWrappedModule, FSDPInitMode.RECURSIVE, cuda_init_mode=CUDAInitMode.CUDA_BEFORE, # Run one iteration to avoid NaN without a gradient scaler num_iters=1, cpu_offload=cpu_offload, use_pure_fp16=True, )
[ 9, 5260, 23, 2685 ]
def METHOD_NAME(path, section): """Returns True of False indicating whether the path is covered by the current section.""" if not 'file' in section: return False for pattern in section['file']: regex = pattern_to_regex(pattern) match = re.match(regex, path) if match: # Check if there is an exclude pattern that applies for pattern in section['exclude']: regex = pattern_to_regex(pattern) match = re.match(regex, path) if match: return False return True return False
[ 157, 623, 1287 ]
def METHOD_NAME(self) -> Optional[str]: return pulumi.get(self, "display_name")
[ 52, 156 ]
def METHOD_NAME(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
[ 3060, 102, 1739 ]
def METHOD_NAME(self, responses, username, ip): for response, _ in responses: if self.checkUserPass(username, response, ip): return defer.succeed(username) return defer.fail(UnauthorizedLogin())
[ 905, 250, 6051, 21 ]
def METHOD_NAME(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"): eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset eval_dataloader = self.get_eval_dataloader(eval_dataset) eval_examples = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop start_time = time.time() try: output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) finally: self.compute_metrics = compute_metrics total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) metrics = self.compute_metrics(eval_preds) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) metrics.update(output.metrics) else: metrics = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(metrics) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics
[ 1195 ]
def METHOD_NAME(self): if self.pretrained is not None: utils.load_entire_model(self, self.pretrained)
[ 176, 1336 ]
def METHOD_NAME(self, x, y, button, modifiers): if button == arcade.MOUSE_BUTTON_LEFT: # Release the item we are holding (if any) self.shape_being_dragged = None
[ 69, 2571, 586 ]
def METHOD_NAME(self): ''' The interface object for the :doc:`Tenable Identity Exposure Dashboard APIs <dashboard>`. ''' return DashboardAPI(self)
[ 3029 ]
def METHOD_NAME(self, value): ...
[ 19, 671, 4282, 199 ]
def METHOD_NAME(self): # auth session try: return self._open_url() except HTTPError as e: # I need the 500 because pydap re-raises HTTPError wihout setting the code if not (e.code != 400 or e.code != 300 or e.code != 500): raise e # Check Url (probably inefficient..., but worth a try to get authenticated) try: self.session.get(self.source + ".dds") return self._open_url() except HTTPError as e: if e.code != 400: raise e _logger.exception("Error opening PyDap url '%s'" % self.source) raise HTTPError("Could not open PyDap url '%s'.\nCheck login credentials." % self.source)
[ 126 ]
def METHOD_NAME(self_obj): status = False status_message = "sending e-mail failed" return status, status_message
[ 248, 353, 487, 180 ]
def METHOD_NAME(self) -> Any: """ Integration runtime properties. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME(cls): """Return a comparable version object If no version found, use LooseVersion('0.0.0') """ return LooseVersion(cls.version() or "0.0.0")
[ -1 ]
def METHOD_NAME(): """The main function. Hier spielt die Musik. """ # parse the command line, exit with UNKNOWN if it fails try: args = parse_args() except SystemExit: sys.exit(STATE_UNKNOWN) # fetch data if args.TEST is None: result = lib.nodebb.get_data(args, '/api/admin/development/info') else: # do not call the command, put in test data import json stdout, stderr, retc = lib.test.test(args.TEST) result = json.loads(stdout) # init some vars msg = '' state = STATE_OK perfdata = '' # analyze data for proc in result['info']: if proc['process']['title'].endswith('/node'): try: heap_used_percent = round(float(proc['process']['memoryUsage']['heapUsed']) / float(proc['process']['memoryUsage']['heapTotal']) * 100, 1) heap_state = lib.base.get_state(heap_used_percent, args.WARN, args.CRIT) state = lib.base.get_worst(state, heap_state) msg += 'NodeBB {}, {} {}, Heap {}% used ({} of {}){}, RSS {}, Up {}'.format( proc['id'], proc['process']['title'], proc['process']['version'], heap_used_percent, lib.human.bytes2human(proc['process']['memoryUsage']['heapUsed']), lib.human.bytes2human(proc['process']['memoryUsage']['heapTotal']), lib.base.state2str(heap_state, prefix=' '), lib.human.bytes2human(proc['process']['memoryUsage']['rss']), lib.human.seconds2human(proc['process']['uptime']), ) perfdata += lib.base.get_perfdata('nodebb_heap_used_percent', heap_used_percent, '%', args.WARN, args.CRIT, 0, 100) perfdata += lib.base.get_perfdata('nodebb_heap_used', proc['process']['memoryUsage']['heapUsed'], 'B', None, None, 0, proc['process']['memoryUsage']['heapTotal']) perfdata += lib.base.get_perfdata('nodebb_rss', proc['process']['memoryUsage']['rss'], 'B', None, None, 0, None) perfdata += lib.base.get_perfdata('nodebb_uptime', proc['process']['uptime'], 's', None, None, 0, None) except: pass # over and out lib.base.oao(msg, state, perfdata, always_ok=args.ALWAYS_OK)
[ 57 ]
def METHOD_NAME(self, index): if not index.isValid(): return self.INVALID_INDEX_FLAGS row = self.model[index.row()] column = self.model._columns.column_by_index(index.column()) return self._getFlags(row, column)
[ 1106 ]
def METHOD_NAME(self, node): """Compiles a node, recursively. This is one big switch on the node type. """ # XXX Optimize certain Wildcard-containing-Wildcard patterns # that can be merged if node.type == self.syms.Matcher: node = node.children[0] # Avoid unneeded recursion if node.type == self.syms.Alternatives: # Skip the odd children since they are just '|' tokens alts = [self.METHOD_NAME(ch) for ch in node.children[::2]] if len(alts) == 1: return alts[0] p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) return p.optimize() if node.type == self.syms.Alternative: units = [self.METHOD_NAME(ch) for ch in node.children] if len(units) == 1: return units[0] p = pytree.WildcardPattern([units], min=1, max=1) return p.optimize() if node.type == self.syms.NegatedUnit: pattern = self.compile_basic(node.children[1:]) p = pytree.NegatedPattern(pattern) return p.optimize() assert node.type == self.syms.Unit name = None nodes = node.children if len(nodes) >= 3 and nodes[1].type == token.EQUAL: name = nodes[0].value nodes = nodes[2:] repeat = None if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: repeat = nodes[-1] nodes = nodes[:-1] # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] pattern = self.compile_basic(nodes, repeat) if repeat is not None: assert repeat.type == self.syms.Repeater children = repeat.children child = children[0] if child.type == token.STAR: min = 0 max = pytree.HUGE elif child.type == token.PLUS: min = 1 max = pytree.HUGE elif child.type == token.LBRACE: assert children[-1].type == token.RBRACE assert len(children) in (3, 5) min = max = self.get_int(children[1]) if len(children) == 5: max = self.get_int(children[3]) else: assert False if min != 1 or max != 1: pattern = pattern.optimize() pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) if name is not None: pattern.name = name return pattern.optimize()
[ 296, 1716 ]
def METHOD_NAME(tmp_h5py_file, data, expected): """Test formatter with h5py attributes""" tmp_h5py_file.attrs['attr'] = data formatter = TextFormatter() result = formatter.toString(tmp_h5py_file.attrs['attr']) assert result == expected
[ 9, 2931, 11307, 864 ]
def METHOD_NAME(self): return self.sd.METHOD_NAME
[ 181, 450 ]
def METHOD_NAME(jca, mockReplyInput, expected): """Testing JobCleaningAgent().removeDeletedJobs()""" mockReply.return_value = mockReplyInput result = jca.removeDeletedJobs() assert result == expected
[ 9, 188, 494, 604, 452 ]
def METHOD_NAME(): try: NotificationTemplate.objects.get(type=NotificationType.EVENT_PUBLISHED).delete() except NotificationTemplate.DoesNotExist: pass template = NotificationTemplate.objects.create( type=NotificationType.EVENT_PUBLISHED, subject="event published subject, event name: {{ event.name }}!", body="event published body, event name: {{ event.name }}!", html_body="event published <b>HTML</b> body, event name: {{ event.name }}!", ) return template
[ 417, 5892, 857, 671 ]
def METHOD_NAME(file, lnum, line): """Handle a DEFHEADING directive""" # The input should be "DEFHEADING(some string)", though note that # the 'some string' could be the empty string. If the string is # empty we ignore the directive -- these are used only to add # blank lines in the plain-text content of the --help output. # # Return the heading text. We strip out any trailing ':' for # consistency with other headings in the rST documentation. match = re.match(r'DEFHEADING\((.*?):?\)', line) if match is None: serror(file, lnum, "Invalid DEFHEADING line") return match.group(1)
[ 214, 10979 ]
def METHOD_NAME(uqcsbot: MockUQCSBot): """ test minuscule string reply to thread and channel """ uqcsbot.post_message(TEST_CHANNEL_ID, "NEUROMANCER", user=TEST_USER_ID) assert count_messages(uqcsbot) == 1 thread = float(uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])[-1].get('ts', 0)) uqcsbot.post_message(TEST_CHANNEL_ID, "wintermute", reply_broadcast=True, thread_ts=thread, user=TEST_USER_ID) sleep(1) assert count_messages(uqcsbot) == 3
[ 9, 600, 16093, 14673 ]
def METHOD_NAME(self): x = np.array([1]) y = np.array([2]) theta_E = 1.0 gamma = 2.0 f_sis = self.SIS.function(x, y, theta_E) f_spp = self.SPP.function(x, y, theta_E, gamma) f_x_sis, f_y_sis = self.SIS.derivatives(x, y, theta_E) f_x_spp, f_y_spp = self.SPP.derivatives(x, y, theta_E, gamma) f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = self.SIS.hessian(x, y, theta_E) f_xx_spp, f_xy_spp, f_yx_spp, f_yy_spp = self.SPP.hessian(x, y, theta_E, gamma) npt.assert_almost_equal(f_sis[0], f_spp[0], decimal=7) npt.assert_almost_equal(f_x_sis[0], f_x_spp[0], decimal=7) npt.assert_almost_equal(f_y_sis[0], f_y_spp[0], decimal=7) npt.assert_almost_equal(f_xx_sis[0], f_xx_spp[0], decimal=7) npt.assert_almost_equal(f_yy_sis[0], f_yy_spp[0], decimal=7) npt.assert_almost_equal(f_xy_sis[0], f_xy_spp[0], decimal=7)
[ 9, 979, 13613 ]
def METHOD_NAME(self) -> service_account.Credentials: """Obtaining creds based on Service account scenario""" credentials_json = self._raw_credentials.get("credentials_json") admin_email = self._raw_credentials.get("email") account_info = self._load_account_info(credentials_json) creds = service_account.Credentials.from_service_account_info(account_info, scopes=SCOPES) self._creds = creds.with_subject(admin_email)
[ 11591, 549, 598, 6471 ]
def METHOD_NAME( paths: Iterable[Path], exclude: Optional[Pattern], extend_exclude: Optional[Pattern], gitignore: Optional[PathSpec], ) -> Iterator[Path]: for path in paths: if not should_parse_path(path, exclude, extend_exclude, gitignore): continue if path.is_dir(): yield from METHOD_NAME( path.iterdir(), exclude, extend_exclude, gitignore + get_gitignore(path) if gitignore is not None else None, ) elif path.is_file(): yield path
[ 3972, 1190 ]
def METHOD_NAME(name: str, exclude_id: Optional[int], webhook_type: str = schemas.WebhookType.webhook, tenant_id: Optional[int] = None) -> bool: with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""SELECT EXISTS(SELECT 1 FROM public.webhooks WHERE name ILIKE %(name)s AND deleted_at ISNULL AND type=%(webhook_type)s {"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""", {"name": name, "exclude_id": exclude_id, "webhook_type": webhook_type}) cur.execute(query) row = cur.fetchone() return row["exists"]
[ 954, 604, 156 ]
def METHOD_NAME(self): mapping = {-2: 0, -3: 1, -4: 2} a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) a = nx.attribute_mixing_matrix( self.N, "margin", mapping=mapping, normalized=False ) np.testing.assert_equal(a, a_result) a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping) np.testing.assert_equal(a, a_result / float(a_result.sum()))
[ 9, 309, 11704, 430, 2927 ]
def METHOD_NAME(self): return self._qsfp_ports
[ 5093, 907 ]
def METHOD_NAME( self, engine_type: str = "postgres" ) -> t.Tuple[str, t.List[t.Any]]: """ Compiles the template ready for the engine - keeping the arguments separate from the template. """ if self._frozen_compiled_strings is not None: return self._frozen_compiled_strings _, bundled, combined_args = self.bundle( start_index=1, bundled=[], combined_args=[] ) if engine_type in ("postgres", "cockroach"): string = "".join( fragment.prefix + ("" if fragment.no_arg else f"${fragment.index}") for fragment in bundled ) elif engine_type == "sqlite": string = "".join( fragment.prefix + ("" if fragment.no_arg else "?") for fragment in bundled ) else: raise Exception("Engine type not recognised") return (string, combined_args)
[ 296, 144 ]
def METHOD_NAME(request, scope, scope_category, name, template_name="site_settings/list.html"): if not has_perm(request.user,'site_settings.change_setting'): raise Http403 settings = Setting.objects.filter(scope=scope, scope_category=scope_category, name=name).order_by('label') if not settings: raise Http404 if request.method == 'POST': form = build_settings_form(request.user, settings)(request.POST, request.FILES) if form.is_valid(): # this save method is overriden in the forms.py form.save() try: if form.cleaned_data['theme']: from django.core.management import call_command call_command('hide_settings', 'theme') call_command('update_settings', 'themes.%s' % form.cleaned_data['theme'].lstrip()) except: pass EventLog.objects.log() msg_string = 'Successfully saved %s settings' % name.replace('_',' ').title() messages.add_message(request, messages.SUCCESS, _(msg_string)) redirect_to = request.POST.get('next', '') if redirect_to: return HttpResponseRedirect(redirect_to) else: form = build_settings_form(request.user, settings)() return render_to_resp(request=request, template_name=template_name, context={'form': form })
[ 97, 1333 ]
def METHOD_NAME(self, config: MetricsIngestConfiguration) -> TimeseriesCardinalityLimiter: namespace = config.cardinality_limiter_namespace if namespace not in self.rate_limiters: limiter = TimeseriesCardinalityLimiter( namespace, RedisCardinalityLimiter(**config.cardinality_limiter_cluster_options) ) self.rate_limiters[namespace] = limiter return self.rate_limiters[namespace]
[ 19, -1 ]
def METHOD_NAME(self, cli): """ Test clearing specific queues via ``jobs clear``. """ job1 = self.enqueue() job2 = self.enqueue(queue=u"q1") self.enqueue(queue=u"q2") self.enqueue(queue=u"q2") self.enqueue(queue=u"q3") stdout = cli.invoke(ckan, [u"jobs", u"clear", u"q2", u"q3"]).output assert u"q2" in stdout assert u"q3" in stdout assert jobs.DEFAULT_QUEUE_NAME not in stdout assert u"q1" not in stdout all_jobs = self.all_jobs() assert set(all_jobs) == {job1, job2}
[ 9, 537, 3303, 6138 ]
def METHOD_NAME(self): """ Returns a list of all spike input buffers defined in the model. :return: a list of all spike input buffers. :rtype: list(VariableSymbol) """ ret = list() for BUFFER in self.get_input_buffers(): if BUFFER.is_spike_buffer(): ret.append(BUFFER) return ret
[ 19, 945, 2935 ]
def METHOD_NAME(op): """Find all the 'tir.For' nodes whose extent can be divided by 8.""" if isinstance(op, tvm.tir.For): if isinstance(op.extent, tvm.tir.IntImm): if op.extent.value % 8 == 0: loops.append(op)
[ 416, 14684 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.vnet_name = AAZStrArg( options=["--vnet-name"], help="The virtual network (VNet) name.", required=True, ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(self): staging = Repo.objects.get(name__iexact='staging') pkg = self.create_pkg() staging_pkg = self.create_pkg(repo=staging, pkgrel='2') FlagRequest.objects.create(pkgbase=pkg.pkgbase, repo=pkg.repo, pkgver=pkg.pkgver, epoch=pkg.epoch, ip_address='1.1.1.1') FlagRequest.objects.create(pkgbase=staging_pkg.pkgbase, repo=staging_pkg.repo, pkgver=staging_pkg.pkgver, epoch=staging_pkg.epoch, ip_address='1.1.1.1') with patch('devel.management.commands.reporead.logger') as logger: call_command('reporead', 'x86_64', 'devel/fixtures/core.db.tar.gz') logger.info.assert_called() objects = FlagRequest.objects.all() self.assertEqual(len(objects), 1) self.assertEqual(objects[0].pkgver, staging_pkg.pkgver)
[ 9, -1, 11189 ]
def METHOD_NAME(): """ Get a list of updates installed on the machine Returns: list: A list of installed updates CLI Example: .. code-block:: bash salt '*' wusa.list """ kbs = [] ret = salt.utils.win_pwsh.run_dict("Get-HotFix | Select HotFixID") for item in ret: kbs.append(item["HotFixID"]) return kbs
[ 245 ]
def METHOD_NAME(benchmark_spec): """Runs the Stencil2D benchmark. GPU clock speeds must be set already. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms num_gpus = benchmark_spec.num_gpus master_vm = vms[0] num_iterations = FLAGS.stencil2d_iterations problem_sizes = FLAGS.stencil2d_problem_sizes num_processes = len(vms) * num_gpus metadata = {} metadata.update(cuda_toolkit.GetMetadata(master_vm)) metadata['benchmark_version'] = BENCHMARK_VERSION metadata['num_iterations'] = num_iterations metadata['num_nodes'] = len(vms) metadata['num_processes'] = num_processes results = [] for problem_size in problem_sizes: results.extend( _RunSingleIteration(master_vm, problem_size, num_processes, num_iterations, metadata)) return results
[ 22 ]
def METHOD_NAME(strip, scraperobj): if not scraperobj.stripUrl: # no indexing support return # test that the stripUrl regex matches the retrieved strip URL urlmatch = re.escape(scraperobj.stripUrl) urlmatch = PRINTF_MATCH.sub('.+', urlmatch) urlmatch = ARCHIVE_ORG_MATCH.sub(r'/\\d+/', urlmatch) ro = re.compile(urlmatch) mo = ro.match(strip.strip_url) if not mo: warnings.warn('strip URL {!r} does not match stripUrl pattern {}'.format( strip.strip_url, urlmatch))
[ 250, -1 ]
def METHOD_NAME(imgs, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), w=640, h=480): """ Function adapted from gluoncv.data.transforms.presets.ssd, resizes the image to a preset size. :param imgs: :type imgs: :param mean: :type mean: :param std: :type std: :param w: Desired width of the output tensor. :type w: int :param h: Desired height of the output tensor. :type h: int :return: :rtype: """ if isinstance(imgs, mx.nd.NDArray): imgs = [imgs] for im in imgs: assert isinstance(im, mx.nd.NDArray), "Expect NDArray, got {}".format(type(im)) tensors = [] origs = [] for img in imgs: img = timage.imresize(img, w, h) orig_img = img.asnumpy().astype('uint8') img = mx.nd.image.to_tensor(img) img = mx.nd.image.normalize(img, mean=mean, std=std) tensors.append(img.expand_dims(0)) origs.append(orig_img) if len(tensors) == 1: return tensors[0], origs[0] return tensors, origs
[ 1053, 9, 1128 ]
def METHOD_NAME(block_diag_operator): # We take the cholesky of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.cholesky() for operator in block_diag_operator.operators], is_non_singular=True, is_self_adjoint=False, is_square=True)
[ 3448, 573, 3449 ]
def METHOD_NAME(self): self.assertComputeFails(UnaryOperatorStream(type="sqrt"),([0, -1, 1]))
[ 9, 1118, 2927 ]
def METHOD_NAME(mocker): fake_client = mocker.patch( "snapcraft.store.StoreClientCLI.close", autospec=True, ) return fake_client
[ 1278, 1308, 1462 ]
def METHOD_NAME(filename: str): return any(filename.endswith(ext) for ext in ['.csv', '.csv.gz', '.csv.bz2'])
[ 137, 732 ]
def METHOD_NAME(self): """ Retrieves the operational status of the device Returns: A boolean value, True if device is operating properly, False if not """ if self.is_psu: temp_file = self.psu_hwmon_path + "psu_temp_fault" return self.get_presence() and (not int( self.__read_txt_file(temp_file))) file_str = "temp{}_input".format(self.ss_index) file_path = os.path.join(self.hwmon_path, file_str) raw_txt = self.__read_txt_file(file_path) if raw_txt is None: return False else: return int(raw_txt) != 0
[ 19, 452 ]
def METHOD_NAME(self, ignore_discard=True, ignore_expires=True): """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save """ now = time.time() r = [] for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) return "\n".join(r+[""])
[ 947, 6034, 3 ]
def METHOD_NAME(): """DEPRECATED Return the effective number of CPUs in the system as an integer. This cross-platform function makes an attempt at finding the total number of available CPUs in the system, as returned by various underlying system and python calls. If it can't find a sensible answer, it returns 1 (though an error *may* make it return a large positive number that's actually incorrect). """ import warnings warnings.warn( "`num_cpus` is deprecated since IPython 8.0. Use `os.cpu_count` instead.", DeprecationWarning, stacklevel=2, ) return os.cpu_count() or 1
[ 181, 938 ]
def METHOD_NAME( data_or_fn: Union[T, Callable[[], T]], stage_name: Optional[str] = None, pg: Optional[dist.ProcessGroup] = None, ) -> List[T]: """ A simple all_gather primitive with basic synchronization guard logic, by checking payload from all ranks has the same stage name. Args: data_or_fn: the data to be all gathered across ranks or function to be executed stage_name: the sync stage name for out-of-sync protection pg: the process group for sync Throws: RuntimeError from original exception trace Returns: a list of synced data from all ranks Example usage: >> all_ids = all_gather(data_or_fn=allocate_id, pg=ext_pg.my_pg) """ payload: Optional[T] = None exception : Optional[Exception] = None success = True # determine if it is an executable function or data payload only if callable(data_or_fn): try: payload = data_or_fn() except Exception as e: success = False exception = e else: payload = data_or_fn sync_obj = SyncPayload( stage_name=stage_name, success=success, payload=payload, exception=exception, ) if pg is not None: # List of success/failure across all ranks. total_list = [None] * dist.get_world_size(pg) all_gather_object_enforce_type(pg, total_list, sync_obj) # Each rank will throw RuntimeError in case of failure on any rank. stage_name = cast(SyncPayload[T], total_list[0]).stage_name exception_list: List[Tuple[int, Exception]] = [] ret_list: List[T] = [] error_msg: str = "" for i, sp in enumerate(cast(List[SyncPayload[T]], total_list)): if sp.stage_name != stage_name: error_msg += ( f"Unexpected stage name received from rank {i}: {sp.stage_name} " ) continue if not sp.success and sp.exception is not None: exception_list.append((i, sp.exception)) continue ret_list.append(sp.payload) if len(exception_list) > 0: raise RuntimeError( # type: ignore[misc] error_msg, exception_list) from exception_list[0] return ret_list else: if not sync_obj.success: raise RuntimeError( f"all_gather failed with exception {sync_obj.exception}", ) from sync_obj.exception return [sync_obj.payload] # type: ignore[list-item]
[ 75, 1432 ]