text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(text, indent_string=" "): """Indent each line of text with the given indent string.""" return os.linesep.join(f"{indent_string}{x}" for x in text.splitlines())
[ 4 ]
def METHOD_NAME(self): self.upstream_spec = self.get_upstream_provider().spec # clear bounding boxes of all provided arrays and points -- # SpecifiedLocation does know its locations at setup (checks on the fly) for key, spec in self.spec.items(): spec.roi.shape = (None,) * spec.roi.dims self.updates(key, spec)
[ 102 ]
def METHOD_NAME(self, chat_id, message): """ Send message to chat_id :param chat_id: int :param message: string :return: Boolean """ url = self.tg_url_bot_general + self.token + "/sendMessage" params = {"chat_id": chat_id, "text": message, "parse_mode": self.parse_mode, "disable_notification": self.disable_notification} srv.logging.debug("Trying to /sendMessage: {url}".format(url=url)) srv.logging.debug("post params: %r", params) res = requests.post(url, params=params) answer = res.text answer_json = json.loads(answer) if not answer_json["ok"]: srv.logging.warn(answer_json) return False else: return answer_json
[ 353, 277 ]
def METHOD_NAME(fd, settings): ssize = struct.calcsize("<iLiiLL") currtty, prevtime, prefdir = 0, 0, 0 color = None stdout = sys.stdout.buffer while 1: try: (op, tty, length, dir, sec, usec) = struct.unpack("<iLiiLL", fd.read(ssize)) data = fd.read(length) except struct.error: if settings["tail"]: prevtime = 0 time.sleep(0.1) settings["maxdelay"] = 0 continue break if currtty == 0: currtty = tty if str(tty) == str(currtty) and op == OP_WRITE: # the first stream seen is considered 'output' if prefdir == 0: prefdir = dir # use the other direction if settings["input_only"]: prefdir = TYPE_INPUT if dir == TYPE_INPUT: prefdir = TYPE_OUTPUT if dir == TYPE_INTERACT: color = b"\033[36m" elif dir == TYPE_INPUT: color = b"\033[33m" if dir == prefdir or settings["both_dirs"]: curtime = float(sec) + float(usec) / 1000000 if prevtime != 0: sleeptime = curtime - prevtime if sleeptime > settings["maxdelay"]: sleeptime = settings["maxdelay"] if settings["maxdelay"] > 0: time.sleep(sleeptime) prevtime = curtime if settings["colorify"] and color: stdout.write(color) stdout.write(data) if settings["colorify"] and color: stdout.write(b"\033[0m") color = None sys.stdout.flush() elif str(tty) == str(currtty) and op == OP_CLOSE: break
[ -1 ]
def METHOD_NAME(rawtext, app, type, slug, options): """Create a link to a github resource. :param rawtext: Text being replaced with link node. :param app: Sphinx application context :param type: Link type (issues, changeset, etc.) :param slug: ID of the thing to link to :param options: Options dictionary passed to role func. """ try: base = app.config.github_project_url if not base: raise AttributeError if not base.endswith('/'): base += '/' except AttributeError as err: raise ValueError( 'github_project_url configuration value is not set (%s)' % str(err) ) ref = base + type + '/' + slug + '/' set_classes(options) prefix = '#' if type == 'pull': prefix = 'PR ' + prefix node = nodes.reference( rawtext, prefix + utils.unescape(slug), refuri=ref, **options ) return node
[ 93, 548, 1716 ]
def METHOD_NAME(self, task, config): """Purge remembered entries if the config has changed.""" with Session() as session: # See if the task has changed since last run old_task = ( session.query(db.RememberTask).filter(db.RememberTask.name == task.name).first() ) if not task.is_rerun and old_task and task.config_modified: logger.debug('Task config has changed since last run, purging remembered entries.') session.delete(old_task) old_task = None if not old_task: # Create this task in the db if not present session.add(db.RememberTask(name=task.name)) elif not task.is_rerun: # Delete expired items if this is not a rerun deleted = ( session.query(db.RememberEntry) .filter(db.RememberEntry.task_id == old_task.id) .filter(db.RememberEntry.expires < datetime.now()) .delete() ) if deleted: logger.debug('{} entries have expired from remember_rejected table.', deleted) task.config_changed()
[ 69, 758, 447 ]
def METHOD_NAME(n, a, b): diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)), (a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)), (a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))] A = sps.diags(diagonals, [0, -1, 1]) return A
[ 2443, 1306, 2481, 2808 ]
def METHOD_NAME(file: str) -> str: path = os.path.join(os.path.dirname(__file__), '..', 'data', file) with open(path) as f: return f.read()
[ 19, 2485, 4108, 146 ]
def METHOD_NAME(self): self.cpp_info.libs = ["libmysql" if self.options.shared and self.settings.os == "Windows" else "mysqlclient"] if not self.options.shared: stdcpp_library = tools.stdcpp_library(self) if stdcpp_library: self.cpp_info.system_libs.append(stdcpp_library) if self.settings.os in ["Linux", "FreeBSD"]: self.cpp_info.system_libs.append('m')
[ 360, 100 ]
def METHOD_NAME(self, init_dict): """ Sets the orbital_dict, which can vary depending on the particular implementation of this base class. :param init_dict: the initialization dictionary """ self._orbital_dict = self._validate_keys(init_dict)
[ 0, 12378, 553 ]
def METHOD_NAME(keypoints, images=None): return keypoints
[ 695, 1952 ]
def METHOD_NAME(self) -> ConfigurationList: warn( "configuration is deprecated. Use v1.configuration instead.", DeprecationWarning, stacklevel=2, ) return self.v1.METHOD_NAME
[ 830 ]
def METHOD_NAME(self, cli): """Command shouldn't raise SystemExit when fullname arg not present. """ args = [ u"user", u"add", u"berty", u"password=password123", u"[email protected]", ] result = cli.invoke(ckan, args) assert not result.exit_code
[ 9, 615, 21, 238, 654, 9582 ]
def METHOD_NAME(mock_get_backend_for_scheme: mock.Mock) -> None: _direct_url_from_link = partial(direct_url_from_link, source_dir="...") direct_url = _direct_url_from_link(Link("git+https://g.c/u/p.git")) assert direct_url.url == "https://g.c/u/p.git" assert isinstance(direct_url.info, VcsInfo) assert direct_url.info.vcs == "git" direct_url = _direct_url_from_link(Link("git+https://g.c/u/p.git#egg=pkg")) assert direct_url.url == "https://g.c/u/p.git" direct_url = _direct_url_from_link( Link("git+https://g.c/u/p.git#egg=pkg&subdirectory=subdir") ) assert direct_url.url == "https://g.c/u/p.git" assert direct_url.subdirectory == "subdir" direct_url = _direct_url_from_link(Link("git+https://g.c/u/p.git@branch")) assert direct_url.url == "https://g.c/u/p.git" assert isinstance(direct_url.info, VcsInfo) assert direct_url.info.requested_revision == "branch" direct_url = _direct_url_from_link(Link("git+https://g.c/u/p.git@branch#egg=pkg")) assert direct_url.url == "https://g.c/u/p.git" assert isinstance(direct_url.info, VcsInfo) assert direct_url.info.requested_revision == "branch" direct_url = _direct_url_from_link(Link("git+https://[email protected]/u/p.git")) assert direct_url.to_dict()["url"] == "https://g.c/u/p.git"
[ 9, 280, 548, 1623 ]
def METHOD_NAME(*args, **kwargs): try: func(*args, **kwargs) except KeyboardInterrupt: pass
[ 291 ]
def METHOD_NAME(scope, field_value): """ List of values to search for; `field_value`, plus possibly variants on it """ if scope in ["congressional_code", "county_code"]: try: # Congressional and county codes are not uniform and contain multiple variables # In the location table Ex congressional code (01): '01', '1.0', '1' return [str(int(field_value)), field_value, str(float(field_value))] except ValueError: # if filter causes an error when casting to a float or integer # Example: 'ZZ' for an area without a congressional code pass return [field_value]
[ 19, 342, 245 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self): if self.input_fd is not None or self.executable_cmd.split(os.pathsep)[-1] in CommandConfig._ANSIBLE_NON_INERACTIVE_CMDS: self.runner_mode = 'subprocess' else: self.runner_mode = 'pexpect'
[ 0, 1102, 854 ]
def METHOD_NAME(language): f1 = multi_rank.mul_by_vector_dim_1_C_C f2 = epyccel( f1, language = language ) x1 = np.array(rand(3,5)*10, dtype=int) x2 = np.copy(x1) f1(x1) f2(x2) assert np.array_equal( x1, x2 )
[ 9, 1998, 604, 798, 3014, 1170, 2629 ]
def METHOD_NAME(process): from L1Trigger.Configuration.L1Trigger_custom import customiseL1Menu process = customiseL1Menu(process) return process
[ 5317 ]
def METHOD_NAME(self) -> str: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag")
[ 431 ]
def METHOD_NAME(fleet_of_highperf_mocked_ursulas, highperf_mocked_alice, highperf_mocked_bob): mocks = ( mock_secret_source(), mock_cert_loading, mock_metadata_validation, mock_message_verification, ) with contextlib.ExitStack() as stack: for mock in mocks: stack.enter_context(mock) policy = highperf_mocked_alice.grant( highperf_mocked_bob, b"any label", threshold=20, shares=30, expiration=maya.when('next week')) # TODO: Make some assertions about policy. total_verified = sum(node.verified_node for node in highperf_mocked_alice.known_nodes) # Alice may be able to verify more than `n`, but certainly not less, # otherwise `grant()` would fail. assert total_verified >= 30 _POLICY_PRESERVER.append(policy)
[ 9, 14648, 6418, 15840, 7852, 623, 104 ]
def METHOD_NAME(self) -> None: menu: "Menu" = self.parent.Plugins.Menu self._mitems: List[MenuItem] = [] menu.unregister(self) menu.add(self, 52, text=_("Recent _Connections"), icon_name="document-open-recent-symbolic", sensitive=False, callback=lambda: None) for (idx, item) in enumerate(self.__menuitems): self._mitems.append(menu.add(self, (53, idx), **item)) menu.add(self, 59)
[ 2040, 2470 ]
def METHOD_NAME(): # Tests a case in which a child node adds another light. lattr1 = core.LightAttrib.make() lattr1 = lattr1.add_on_light(spot) lattr2 = core.LightAttrib.make() lattr2 = lattr2.add_on_light(point) lattr3 = lattr1.compose(lattr2) assert lattr3.get_num_on_lights() == 2 assert spot in lattr3.on_lights assert point in lattr3.on_lights
[ 9, -1, 166, 238 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self): patch = create_patch( name='Patch name with " character', project=self.project ) PatchChangeNotification(patch=patch, orig_state=patch.state).save() self._expire_notifications() errors = send_notifications() self.assertEqual(errors, []) self.assertEqual(len(mail.outbox), 1) msg = mail.outbox[0] self.assertEqual(msg.to, [patch.submitter.email]) self.assertNotIn('&quot;', msg.body)
[ 9, 857, 10850 ]
def METHOD_NAME(self) -> None: """ Test Publish functionality with no data """ with patch('builtins.open', mock_open(read_data='')) as mock_file: publisher = ElasticsearchPublisher() publisher.init(conf=Scoped.get_scoped_conf(conf=self.conf, scope=publisher.get_scope())) # assert mock was called with test_file_path and test_file_mode mock_file.assert_called_with(self.test_file_path, self.test_file_mode) publisher.publish() # no calls should be made through elasticseach_client when there is no data self.assertTrue(self.mock_es_client.call_count == 0)
[ 9, 2411, 41, 654, 365 ]
def METHOD_NAME(self, other): lnum = to_int32(self) rnum = to_uint32(other) shiftCount = rnum & 0x1F return float(to_int32(float(lnum >> shiftCount)))
[ 6222, 11013, 441 ]
def METHOD_NAME(self, axis): """Test the `Logsoftmax` substitution. Check that ``Log(Softmax(x))`` is substituted with ``Logsoftmax(x)``. Note that only the forward pass is checked (i.e., doesn't check the gradient) """ x = matrix("x") sm = softmax(x, axis=axis) logsm = log(sm) fgraph = FunctionGraph([x], [logsm]) _fast_run_rewrites.rewrite(fgraph) assert isinstance(fgraph.outputs[0].owner.op, LogSoftmax) assert check_stack_trace(fgraph, ops_to_check=LogSoftmax) assert check_stack_trace(fgraph, ops_to_check="all")
[ 9, 125, 12336, 2887 ]
def METHOD_NAME(test_data_dir, offline_keys): filename = "root.json" filepath = Path(test_data_dir) / filename timestamp = datetime.now(timezone.utc) # avoid failing test due to expired role expiration = timestamp + timedelta(days=365) json_role = { "signatures": {}, "signed": { "delegations": { "key_mgr": { "pubkeys": [offline_keys["key_mgr"][0]], "threshold": 1, }, "root": { "pubkeys": [offline_keys["root"][0]], "threshold": 1, }, }, "expiration": expiration.strftime('%Y-%m-%dT%H:%M:%SZ'), "metadata_spec_version": "0.6.0", "timestamp": timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'), "type": "root", "version": 1, }, } signature = libmamba_api.sign( json.dumps(json_role["signed"], indent=2), offline_keys["root"][1] ) json_role["signatures"][offline_keys["root"][0]] = {"signature": signature} with open(filepath, "w") as f: json.dump(json_role, f, indent=2) return filepath
[ 1563, 1018, 171 ]
def METHOD_NAME(self): # nand Z to inv A z1_pin = self.nand_inst.get_pin("Z") a2_pin = self.driver_inst.get_pin("A") if OPTS.tech_name == "sky130": mid1_point = vector(a2_pin.cx(), z1_pin.cy()) else: mid1_point = vector(z1_pin.cx(), a2_pin.cy()) self.add_path(self.route_layer, [z1_pin.center(), mid1_point, a2_pin.center()])
[ 2476, 4160 ]
def METHOD_NAME(self, arg): self.onTheFly = True return DIRAC.S_OK()
[ 0, 69, 983, 13978 ]
def METHOD_NAME(self, type_or_type_name): """ Returns the full dotted path for a type. For example: from traits.api import HasTraits _get_type_name(HasTraits) == 'traits.has_traits.HasTraits' If the type is given as a string (e.g., for lazy loading), it is just returned. """ if isinstance(type_or_type_name, str): type_name = type_or_type_name else: type_name = "{module}.{name}".format( module=type_or_type_name.__module__, name=type_or_type_name.__name__, ) return type_name
[ 19, 44, 156 ]
def METHOD_NAME(self, X): """Learn unique column names from transaction DataFrame Parameters ------------ X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] """ unique_items = set() for transaction in X: for item in transaction: unique_items.add(item) self.columns_ = sorted(unique_items) columns_mapping = {} for col_idx, item in enumerate(self.columns_): columns_mapping[item] = col_idx self.columns_mapping_ = columns_mapping return self
[ 90 ]
def METHOD_NAME(self, addr, val): msg = DC_Write32() msg.addr = addr msg.data = val msg.unk_10 = 0 msg.unk_14 = 0 msg.unk_18 = 0 msg.unk_1c = 0 print(msg) self.send_message(msg)
[ 17714 ]
def METHOD_NAME(expected: str, par: DocParagraph, reason: str): bs = par_list_to_html_list( [par], settings=d.document.get_settings(), view_ctx=default_view_ctx, ) self.assertEqual(expected.strip(), bs[0], reason)
[ 250, 382 ]
async def METHOD_NAME( client: TestClient, storage_s3_client: StorageS3Client, storage_s3_bucket: S3BucketName, user_id: UserID, location_id: LocationID, ) -> Callable[..., Awaitable[None]]: async def _dir_remover(directory_file_upload: FileUploadSchema) -> None: assert directory_file_upload.urls[0].path directory_file_id = directory_file_upload.urls[0].path.strip("/") assert client.app delete_url = ( client.app.router["delete_file"] .url_for( location_id=f"{location_id}", file_id=urllib.parse.quote(directory_file_id, safe=""), ) .with_query(user_id=user_id) ) response = await client.delete(f"{delete_url}") await assert_status(response, web.HTTPNoContent) # NOTE: ensures no more files are left in the directory, # even if one file is left this will detect it files = await storage_s3_client.list_files( bucket=storage_s3_bucket, prefix=directory_file_id ) assert len(files) == 0 return _dir_remover
[ 34, 2851 ]
def METHOD_NAME(family_id): """ Return the number of types in a loaded family. """ if isinstance(family_id, int): family_id = DB.ElementId(family_id) elif not isinstance(family_id, DB.ElementId): raise TypeError() family = revit.doc.GetElement(family_id) return len(list(family.GetFamilySymbolIds()))
[ 6498, 44, 29 ]
def METHOD_NAME(): training_args, model_args = PdArgumentParser([TrainingArguments, ModelArguments]).parse_args_into_dataclasses() training_args: TrainingArguments = training_args model_args: ModelArguments = model_args training_args.print_config(model_args, "Model") training_args.print_config(training_args, "Training") model_args.task_name = model_args.task_name.lower() sentence1_key, sentence2_key = task_to_keys[model_args.task_name] train_ds = load_dataset("glue", model_args.task_name, split="train") columns = train_ds.column_names is_regression = model_args.task_name == "stsb" label_list = None if not is_regression: label_list = train_ds.features["label"].names num_labels = len(label_list) else: num_labels = 1 tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, max_length=model_args.max_seq_length, truncation=True) if "label" in examples: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result train_ds = train_ds.map(preprocess_function, batched=True, remove_columns=columns) data_collator = DataCollatorWithPadding(tokenizer) if model_args.task_name == "mnli": dev_ds_matched, dev_ds_mismatched = load_dataset( "glue", model_args.task_name, split=["validation_matched", "validation_mismatched"] ) dev_ds_matched = dev_ds_matched.map(preprocess_function, batched=True, remove_columns=columns) dev_ds_mismatched = dev_ds_mismatched.map(preprocess_function, batched=True, remove_columns=columns) dev_ds = {"matched": dev_ds_matched, "mismatched": dev_ds_mismatched} else: dev_ds = load_dataset("glue", model_args.task_name, split="validation") dev_ds = dev_ds.map(preprocess_function, batched=True, remove_columns=columns) model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, num_labels=num_labels) def compute_metrics(p): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions if is_regression: preds = np.squeeze(preds) preds = paddle.to_tensor(preds) label = paddle.to_tensor(p.label_ids) metric = METRIC_CLASSES[model_args.task_name]() result = metric.compute(preds, label) metric.update(result) if isinstance(metric, AccuracyAndF1): acc, precision, recall, f1, _ = metric.accumulate() return {"accuracy": acc, "precision": precision, "recall": recall, "f1": f1} elif isinstance(metric, Mcc): mcc = metric.accumulate() return {"mcc": mcc[0]} elif isinstance(metric, PearsonAndSpearman): pearson, spearman, _ = metric.accumulate() return {"pearson": pearson, "spearman": spearman} elif isinstance(metric, Accuracy): acc = metric.accumulate() return {"accuracy": acc} trainer = Trainer( model=model, args=training_args, data_collator=data_collator, train_dataset=train_ds if training_args.METHOD_NAME else None, eval_dataset=dev_ds, tokenizer=tokenizer, compute_metrics=compute_metrics, ) # training if training_args.METHOD_NAME: train_result = trainer.train() metrics = train_result.metrics trainer.save_model() trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() if training_args.do_eval: if model_args.task_name == "mnli": for _, eval_dataset in dev_ds.items(): eval_metrics = trainer.evaluate(eval_dataset) trainer.log_metrics("eval", eval_metrics) trainer.save_metrics("eval", eval_metrics) else: eval_metrics = trainer.evaluate(dev_ds) trainer.log_metrics("eval", eval_metrics) trainer.save_metrics("eval", eval_metrics)
[ 74, 849 ]
def METHOD_NAME(): return { 'schema_version': '1', 'status': 'active', 'title': 'Test pipeline', }
[ 1148, 1170 ]
def METHOD_NAME(self): return self.driver_type == "bpf"
[ 137, 9417 ]
def METHOD_NAME(self): try: import awscrt.auth # noqa assert HAS_CRT except ImportError: assert not HAS_CRT
[ 9, 220, 2159, 285 ]
def METHOD_NAME(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames))
[ 9, 1052, 578, 137, 130, 3892 ]
def METHOD_NAME(): params = { "nodes": [ { "id": "id", "op_type": "Conv", "input_names": ["in1", "in2"], "output_names": ["out1"], "input_shapes": [[16, 3, 3, 3]], "output_shapes": [[16, 16, 2, 2]], "flops": 27712, "params": 448, "prunable": True, "prunable_params_zeroed": 0, "weight_name": "conv.section.1.weight", "weight_shape": [16, 3, 3, 3], "bias_name": "conv.section.1.bias", "bias_shape": [16], "attributes": {"kernel": 1}, "prunable_equation_sensitivity": None, }, { "id": "id2", "op_type": "Gemm", "input_names": ["in1"], "output_names": ["out1"], "input_shapes": [[16, 32]], "output_shapes": [[16, 10]], "flops": 650, "params": 330, "prunable": True, "prunable_params_zeroed": 0, "weight_name": "conv.section.1.weight", "weight_shape": [10, 32], "bias_name": "conv.section.1.bias", "bias_shape": [10], "attributes": {"kernel": 1}, "prunable_equation_sensitivity": None, }, ] } nodes = [ NodeAnalyzer(model=None, node=None, **params["nodes"][0]), NodeAnalyzer(model=None, node=None, **params["nodes"][1]), ] analyzer = ModelAnalyzer.from_dict(params) assert sorted(analyzer.nodes, key=lambda node: node.id_) == sorted( nodes, key=lambda node: node.id_ )
[ 9, 854, 2224, 763 ]
def METHOD_NAME(zip_path, extract_path, password=b"infected", recursion_depth=1): """Extracts a nested ZIP file. @param zip_path: ZIP path @param extract_path: where to extract @param password: ZIP password @param recursion_depth: how deep we are in a nested archive """ # Test if zip file contains a file named as itself. if is_overwritten(zip_path): log.debug("ZIP file contains a file with the same name, original will be overwritten") # TODO: add random string. new_zip_path = f"{zip_path}.old" shutil.move(zip_path, new_zip_path) zip_path = new_zip_path # requires bytes not str if isinstance(password, str): password = password.encode() # Extraction. with ZipFile(zip_path, "r") as archive: # Check if the archive is encrypted for zip_info in archive.infolist(): is_encrypted = zip_info.flag_bits & 0x1 # If encrypted and the user didn't provide a password # set to default value if is_encrypted and (password in (b"", b"infected")): log.debug("Archive is encrypted, using default password value: infected") if password == b"": password = b"infected" # Else, either password stays as user specified or archive is not encrypted try: archive.extractall(path=extract_path, pwd=password) except BadZipfile as e: raise CuckooPackageError("Invalid Zip file") from e except RuntimeError: # Try twice, just for kicks try: archive.extractall(path=extract_path, pwd=password) except RuntimeError as e: raise CuckooPackageError(f"Unable to extract Zip file: {e}") from e finally: if recursion_depth < 4: # Extract nested archives. for name in archive.namelist(): if name.endswith(".zip"): # Recurse. try: METHOD_NAME( os.path.join(extract_path, name), extract_path, password=password, recursion_depth=recursion_depth + 1, ) except BadZipfile: log.warning("Nested file '%s' name ends with .zip extension is not a valid Zip. Skip extraction", name) except RuntimeError as run_err: log.error("Error extracting nested Zip file %s with details: %s", name, run_err)
[ 297, 1426 ]
def METHOD_NAME(env): # Nuitka: Check for MinGW64. key_program = 'gcc' # First search in the SCons path path=env.WhereIs(key_program) if (path): return path # then the OS path: path=SCons.Util.WhereIs(key_program) if (path): return path # If that doesn't work try default location for mingw save_path = env['ENV']['PATH'] # This should allow installing both into the same place and picking arch # just automatically. if env["TARGET_ARCH"] == "x86_64": env.AppendENVPath('PATH',r'c:\MinGW64\mingw64\bin') env.AppendENVPath('PATH',r'\MinGW64\mingw64\bin') else: env.AppendENVPath('PATH',r'c:\MinGW64\mingw32\bin') env.AppendENVPath('PATH',r'\MinGW64\mingw32\bin') # Older versions of MinGW just has this. env.AppendENVPath('PATH',r'c:\MinGW64\bin') env.AppendENVPath('PATH',r'\MinGW64\bin') path =env.WhereIs(key_program) if not path: env['ENV']['PATH'] = save_path return path
[ 416 ]
def METHOD_NAME(pos_list_elem, swap_axes): # retrieve the number of elements per point dims = int(pos_list_elem.attrib.get('srsDimension', '2')) parts = [float(coord) for coord in pos_list_elem.text.strip().split()] ring = [] i = 0 while i < len(parts): ring.append( (parts[i + 1], parts[i]) if swap_axes else (parts[i], parts[i + 1]) ) i += dims return ring
[ 214, 934, 245 ]
def METHOD_NAME(self, cloud_provider, skip_dashboard=False): # Clean up existing findings for service in cloud_provider.services: cloud_provider.services[service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self._filter_rules(self.rules, cloud_provider.service_list): for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue print_debug(f'Processing {rule.service} rule "{rule.description}" ({rule.filename})') finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(cloud_provider.services[service], self.ruleset.rule_type, {}) cloud_provider.services[service][self.ruleset.rule_type][rule.key] = {} cloud_provider.services[service][self.ruleset.rule_type][rule.key]['description'] = rule.description cloud_provider.services[service][self.ruleset.rule_type][rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'class_suffix', 'display_path']: if hasattr(rule, attr): cloud_provider.services[service][self.ruleset.rule_type][rule.key][attr] = getattr(rule, attr) try: setattr(rule, 'checked_items', 0) cloud_provider.services[service][self.ruleset.rule_type][rule.key]['items'] = recurse( cloud_provider.services, cloud_provider.services, path, [], rule, True) if skip_dashboard: continue cloud_provider.services[service][self.ruleset.rule_type][rule.key]['dashboard_name'] = \ rule.dashboard_name cloud_provider.services[service][self.ruleset.rule_type][rule.key]['checked_items'] = \ rule.checked_items cloud_provider.services[service][self.ruleset.rule_type][rule.key]['flagged_items'] = \ len(cloud_provider.services[service][self.ruleset.rule_type][rule.key]['items']) cloud_provider.services[service][self.ruleset.rule_type][rule.key]['service'] = rule.service cloud_provider.services[service][self.ruleset.rule_type][rule.key]['rationale'] = \ rule.rationale if hasattr(rule, 'rationale') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['remediation'] = \ rule.remediation if hasattr(rule, 'remediation') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['compliance'] = \ rule.compliance if hasattr(rule, 'compliance') else None cloud_provider.services[service][self.ruleset.rule_type][rule.key]['references'] = \ rule.references if hasattr(rule, 'references') else None except Exception as e: print_exception(f'Failed to process rule defined in {rule.filename}: {e}') # Fallback if process rule failed to ensure report creation and data dump still happen cloud_provider.services[service][self.ruleset.rule_type][rule.key]['checked_items'] = 0 cloud_provider.services[service][self.ruleset.rule_type][rule.key]['flagged_items'] = 0
[ 22 ]
def METHOD_NAME(self): # Repeat the experiments with lastPatchMode='repeat'. Now if there # are remaining frames they will be looped into a final patch. # The found shape will be equal or bigger than the expected one. # Found values will be trimmed to fit the expected shape. # No remaining frames. numberOfFrames = 43 found, expected = self.identityOperation(patchSize=numberOfFrames, lastPatchMode='repeat') self.assertAlmostEqualMatrix(found, expected, 1e-8) # Some remaining frames. found, expected = self.identityOperation(frameSize=256, hopSize=128, lastPatchMode='repeat') self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8) # Increase the patch size. found, expected = self.identityOperation(frameSize=256, hopSize=128, patchSize=300, lastPatchMode='repeat') self.assertAlmostEqualMatrix(found[:expected.shape[0], :], expected, 1e-8)
[ 9, 1427, 24, 768, 61, 318, 24 ]
def METHOD_NAME(batch_size, device, tile, ratio, angle): pipe = get_pipeline(device, batch_size, tile, ratio, angle) pipe.build() results, inputs = pipe.run() if device == 'gpu': results, inputs = results.as_cpu(), inputs.as_cpu() for i in range(batch_size): check(results[i], inputs[i], tile, ratio, angle)
[ 22, 9 ]
def METHOD_NAME(self): if not self.xbmc_monitor.abortRequested(): self.listitem_monitor.clear_properties() get_property('ServiceStarted', clear_property=True) get_property('ServiceStop', clear_property=True) del self.player_monitor del self.update_monitor del self.listitem_monitor del self.xbmc_monitor
[ 69, 538 ]
def METHOD_NAME(input_path, output_path): if not os.path.exists(input_path): print("Path doesn't exist: %s" % (input_path)) sys.exit(2) if not os.path.exists(output_path): print("Path doesn't exist: %s" % (output_path)) sys.exit(2) logo_files = get_logo_files(input_path=input_path) setup(output_path=output_path) resized_images = resize_images(logo_files=logo_files, output_path=output_path) assemble_final_image(resized_images=resized_images, output_path=output_path)
[ 57 ]
def METHOD_NAME(self) -> Optional[str]: """ The published version of the archive. """ return pulumi.get(self, "published_version")
[ 5892, 281 ]
def METHOD_NAME(self): self._test_polyarithm_basic(polysub, ignore_sign_on_zero=True)
[ 9, 16126, 756 ]
def METHOD_NAME(self): """ Test :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = np.array(list(set(product(cases, repeat=3)))) Prismatic_to_RGB(cases)
[ 9, 4082, 6622, 24, 2310 ]
def METHOD_NAME(self, result): result_atr = [] path = [] path_node = [] for r in result: self.result_attrib = r.attrib self.path_attrib = r.find('Path').attrib self.path_node = self.get_path_node_info(r.find('Path').findall('PathNode')) result_atr.append(self.result_attrib) path.append(self.path_attrib) path_node.append(self.path_node) return result_atr, path, path_node
[ 19, 1571 ]
def METHOD_NAME(self, result, shape_list): outputs = result for idx, ops in enumerate(self.postprocessor): if idx == len(self.postprocessor) - 1: outputs = ops(outputs, shape_list, self.output_keys) else: outputs = ops(outputs) return outputs
[ 1710 ]
def METHOD_NAME(self, data): return { name: (board, state, player_1, player_2) for name, board, state, player_1, player_2 in [ game.split(',') for game in data.decode().split('|') ] }
[ 1268, 365 ]
def METHOD_NAME(self): command = './bp-sim-64 --pcap -f cap2/dns.yaml --client_cfg automation/regression/cfg/client_cfg_vlan_mac.yaml -o generated/bp_sim_dns_vlans_gen.pcap' ret, out = run_command(command, cwd = CTRexScenario.scripts_path) if ret: print('\nOutput:\n%s' % out) raise Exception('Non zero return status of Valgrind gtests (%s)' % ret) compare_caps(output = os.path.join(CTRexScenario.scripts_path, 'generated/bp_sim_dns_vlans_gen.pcap'), golden = 'functional_tests/golden/bp_sim_dns_vlans.pcap')
[ 9, 4848, 1919, 340, 2610 ]
def METHOD_NAME( X, orientation='bottom', labels=None, method='complete', metric='euclidean', colorscale=None ): d = Dendrogram(X, orientation, labels, colorscale, method=method, metric=metric) return {'layout': d.layout, 'data': d.data}
[ 129, -1 ]
def METHOD_NAME(): quarantine_element_dict = { 'scenarios': ['all'], 'platforms': ['dummy platform'], 'architectures': [], 'simulations': ['dummy simulation', 'another simulation'] } quarantine_element = QuarantineElement( platforms=['dummy platform'], architectures=[] ) quarantine_element.scenarios = [] quarantine_element.simulations = ['dummy simulation', 'another simulation'] quarantine_data_qlist = [quarantine_element, quarantine_element_dict] quarantine_data = QuarantineData(quarantine_data_qlist) assert quarantine_data.qlist[0] == quarantine_data.qlist[1]
[ 9, 11895, 72, 176 ]
def METHOD_NAME(self): self._result = self._func(*self._args, **self._kwargs)
[ 22 ]
def METHOD_NAME(self): try: yield except IOError as err: if err.errno not in (errno.EPIPE, errno.EINVAL, errno.EBADF): raise
[ 5691, 31, 890, 168 ]
def METHOD_NAME(self, batch, batch_nb, train=True): # pylint: disable=W0613 """Common forward step between training and validation. The function of this method is to unpack the data given by the loader, forward the batch through the model and compute the loss. Pytorch-lightning handles all the rest. Args: batch: the object returned by the loader (a list of torch.Tensor in most cases) but can be something else. _batch_nb (int): The number of the batch in the epoch. _train (bool): Whether in training mode. Needed only if the training and validation steps are fundamentally different, otherwise, pytorch-lightning handles the usual differences. Returns: :class:`torch.Tensor` : The loss value on this batch. .. note:: This is typically the method to overwrite when subclassing ``System``. If the training and validation steps are somehow different (except for ``loss.backward()`` and ``optimzer.step()``), the argument ``train`` can be used to switch behavior. Otherwise, ``training_step`` and ``validation_step`` can be overwriten. """ inputs, targets = batch est_targets = self(inputs) loss = self.loss_func(est_targets, targets) return loss
[ 67, 367 ]
def METHOD_NAME(function_identifier): return "MAKE_ASYNCGEN_" + function_identifier
[ 19, 10153, 8054, 769 ]
f METHOD_NAME(self):
[ 9, 128 ]
def METHOD_NAME(self, config: Dict[str, Scalar]) -> NDArrays: """Returns the parameters of the current net.""" return [val.cpu().numpy() for _, val in self.net.state_dict().items()]
[ 19, 386 ]
def METHOD_NAME(self): while (self.is_running): message = None try: # no sleep, since it's a block call with INTERVAL_SECONDS second timeout message = self.send_queue.get(True, INTERVAL_SECONDS) except Empty: # do nothing, if no command received. pass if message is not None: self._inner_send(message)
[ 353, 1751 ]
METHOD_NAME(self, event_x, event_y):
[ 238, 1669 ]
def METHOD_NAME(self, __hint: int = ...) -> List[bytes]: counter = 0 result = [] while counter < __hint or __hint < 0: line = self.readline() counter += len(line) result += [line] return result
[ 5357 ]
def METHOD_NAME(self, enc_data: Dict[str, pd.DataFrame]) -> None: """ Evaluate the quality of mixers within an ensemble of models. :param enc_data: Pre-processed and featurized data, split into the relevant train/test splits. """ pass
[ 902, 4700 ]
def METHOD_NAME(self): es2 = ElementStats.from_data(["megnet_1", "megnet_3"], stats=["shifted_geometric_mean:100"]) d = es2.transform_one("Fe2O3") assert d.shape == (1, 32)
[ 9, 5957, 314 ]
def METHOD_NAME(encoding): # Cache lookup entry = _cache.get(encoding, _unknown) if entry is not _unknown: return entry # Import the module: # # First try to find an alias for the normalized encoding # name and lookup the module using the aliased name, then try to # lookup the module using the standard import scheme, i.e. first # try in the encodings package, then at top-level. # norm_encoding = normalize_encoding(encoding) aliased_encoding = _aliases.get(norm_encoding) or \ _aliases.get(norm_encoding.replace('.', '_')) if aliased_encoding is not None: modnames = [aliased_encoding, norm_encoding] else: modnames = [norm_encoding] for modname in modnames: if not modname or '.' in modname: continue try: # Import is absolute to prevent the possibly malicious import of a # module with side-effects that is not in the 'encodings' package. mod = __import__('encodings.' + modname, fromlist=_import_tail, level=0) except ImportError: pass else: break else: mod = None try: getregentry = mod.getregentry except AttributeError: # Not a codec module mod = None if mod is None: # Cache misses _cache[encoding] = None return None # Now ask the module for the registry entry entry = getregentry() if not isinstance(entry, codecs.CodecInfo): if not 4 <= len(entry) <= 7: raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) if not hasattr(entry[0], '__call__') or \ not hasattr(entry[1], '__call__') or \ (entry[2] is not None and not hasattr(entry[2], '__call__')) or \ (entry[3] is not None and not hasattr(entry[3], '__call__')) or \ (len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \ (len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')): raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) if len(entry)<7 or entry[6] is None: entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) entry = codecs.CodecInfo(*entry) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if alias not in _aliases: _aliases[alias] = modname # Return the registry entry return entry
[ 1070, 559 ]
def METHOD_NAME(self): song = MusicFile(get_data_path("sine-110hz.flac")) self.failUnlessEqual(song("~#length"), 2) self.failIf(song("~replaygain_track_gain")) self._analyse_song(song) self.failUnlessAlmostEqual(song("~#replaygain_track_peak"), 1.0, msg="Track peak should be 1.0") track_gain = song("~#replaygain_track_gain") self.failUnless(track_gain, msg="No Track Gain added") self.failUnless(re.match(r'\-[0-9]\.[0-9]{1,2}', str(track_gain))) # For one-song album, track == album self.failUnlessEqual(track_gain, song('~#replaygain_album_gain'))
[ 9, 902, -1 ]
def METHOD_NAME(self): """Sequence: Raise error if inputs are not iterables or Datasets""" # Error on construction with single Dataset with pytest.raises(TypeError): Sequence(Dataset()) # Test for non-iterable with pytest.raises(TypeError): Sequence(1) # Test for invalid iterable contents with pytest.raises(TypeError): Sequence([1, 2])
[ 9, 532, 7319 ]
def METHOD_NAME(self, mock_get_definition): """ Make sure we don't re-build the static filter definitions with each call. """ self.client.get("/api/v1.0/filter-definitions/") self.client.get("/api/v1.0/filter-definitions/") self.assertEqual(mock_get_definition.call_count, 1)
[ 9, 4632, 527, 2706, 137, 175 ]
def METHOD_NAME(self): os.environ["LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE"] = "1" conn = LoggingConnection(host="example.com", port=80) r = self._get_mock_response("application/xml", "<foo><bar /></foo>") result = conn._log_response(r).replace("\r", "") self.assertTrue(EXPECTED_DATA_XML_PRETTY in result)
[ 9, 390, 17, 41, 885, 38, 399 ]
def METHOD_NAME(self): c = Config() h1 = c.get_hash() # save prev prev_log_format = str(c.general.log_format) # change c.general.log_format = '' h2 = c.get_hash() self.assertNotEqual(h1, h2) # set back c.general.log_format = prev_log_format h3 = c.get_hash() self.assertEqual(h1, h3)
[ 9, 200, 19, 1161, 9452, 1101, 99 ]
def METHOD_NAME(self) -> str: """ Status of the notebook workspace. Possible values are: Creating, Online, Deleting, Failed, Updating. """ return pulumi.get(self, "status")
[ 452 ]
def METHOD_NAME(self): self.setUpForConfig("nedis_htpl20f_heater.yaml", NEDIS_HTPL20F_PAYLOAD) self.subject = self.entities.get("climate") self.setUpTargetTemperature( TEMPERATURE_DPS, self.subject, min=15, max=35, ) self.setUpBasicLock( LOCK_DPS, self.entities.get("lock_child_lock"), ) self.setUpBasicNumber( TIMER_DPS, self.entities.get("number_timer"), max=1440, unit=UnitOfTime.MINUTES, ) self.mark_secondary(["lock_child_lock", "number_timer"])
[ 0, 1 ]
def METHOD_NAME(): """Build and test first order linear and time-invariant coeff. ds """ ndof = 3 x0 = np.random.random(ndof) time = 1.2 ds_list = [] a_mat = np.random.random((ndof, ndof)) b_vec = np.random.random((ndof,)) ds_list.append(sk.FirstOrderLinearDS(x0)) ds_list.append(sk.FirstOrderLinearDS(x0, a_mat)) ds_list.append(sk.FirstOrderLinearDS(x0, a_mat, b_vec)) for ds in ds_list: assert ds.isLinear() assert ds.dimension() == ndof assert np.allclose(ds.x0(), x0) assert np.allclose(ds.x(), x0) assert np.allclose(ds.r(), 0.0) rhs = np.zeros_like(ds.x()) jac_ref = np.zeros((ndof, ndof), dtype=np.float64) if isinstance(ds.A(), np.ndarray): jac_ref += a_mat rhs += np.dot(a_mat, ds.x()) if isinstance(ds.b(), np.ndarray): rhs += ds.b() ds.computef(time, ds.x()) if ds.f() is not None: assert np.allclose(rhs, ds.f()) ds.initRhs(time) assert np.allclose(rhs, ds.rhs()) if ds.A() is not None: assert np.allclose(ds.jacobianRhsx(), jac_ref) assert np.allclose(ds.jacobianRhsx(), ds.jacobianfx())
[ 9, 865, 852, -1 ]
def METHOD_NAME(): root = os.environ.get("JSON_SCHEMA_TEST_SUITE") if root is not None: return Path(root) root = Path(jsonschema.__file__).parent.parent / "json" if not root.is_dir(): # pragma: no cover raise ValueError( ( "Can't find the JSON-Schema-Test-Suite directory. " "Set the 'JSON_SCHEMA_TEST_SUITE' environment " "variable or run the tests from alongside a checkout " "of the suite." ), ) return root
[ 416, 482 ]
def METHOD_NAME( self, batch: Sequence[RequestPrepare] ) -> Mapping[str, "CreateArtifactFilesResponseFile"]: """Execute the prepareFiles API call. Arguments: batch: List of RequestPrepare objects Returns: dict of (save_name: ResponseFile) pairs where ResponseFile is a dict with an uploadUrl key. The value of the uploadUrl key is None if the file already exists, or a url string if the file should be uploaded. """ return self._api.create_artifact_files([req.file_spec for req in batch])
[ 123, 2277 ]
def METHOD_NAME(context: Context): for row in parse_csv(context, "register/register.csv"): parse_register(context, row) for row in parse_csv(context, "register/register_name_history.csv"): parse_old_names(context, row) for row in parse_csv(context, "beneficial_owners/beneficial_owners.csv"): parse_beneficial_owners(context, row) for row in parse_csv(context, "officers/officers.csv"): parse_officers(context, row) for row in parse_csv(context, "members/members.csv"): parse_members(context, row) for row in parse_csv(context, "members/members_joint_owners.csv"): parse_joint_members(context, row)
[ 5520 ]
def METHOD_NAME( reference: types.Reconstruction, candidate: types.Reconstruction ) -> np.ndarray: common_shots = set(reference.shots.keys()).intersection(set(candidate.shots.keys())) errors = [] for s in common_shots: pose1 = reference.shots[s].pose.get_origin() pose2 = candidate.shots[s].pose.get_origin() errors.append(pose1 - pose2) return np.array(errors)
[ 195, 1096 ]
def METHOD_NAME(cls, object_id): '''Return the number of followers of the object.''' return cls._get_followers(object_id).count()
[ 17614, 29 ]
def METHOD_NAME(self, job): preamble = [ self._format_option(job.name, '-J {0}'), self._format_option(job.stdout, '-o {0}'), self._format_option(job.stderr, '-e {0}'), ] if job.num_tasks is not None and job.num_tasks_per_node is not None: num_nodes = job.num_tasks // job.num_tasks_per_node preamble.append(self._format_option(num_nodes, '-nnodes {0}')) else: preamble.append(self._format_option(job.num_tasks, '-n {0}')) if job.num_cpus_per_task is not None: preamble.append(self._format_option(job.num_cpus_per_task, '-R "affinity[core({0})]"')) # add job time limit in minutes if job.time_limit is not None: preamble.append( f'{self._prefix} -W {int(job.time_limit // 60)}' ) for opt in job.sched_access: preamble.append(f'{self._prefix} {opt}') # emit the rest of the options options = job.options + job.cli_options for opt in options: if opt.startswith('#'): preamble.append(opt) else: preamble.append(self._prefix + ' ' + opt) if job.exclusive_access: preamble.append(f'{self._prefix} -x') # Filter out empty statements before returning return list(filter(None, preamble))
[ 2648, 11408 ]
def METHOD_NAME(tmp_path): """ Saving a palette-based file with transparency to WebP format should work, and be similar to the original file. """ temp_file = str(tmp_path / "temp.webp") file_path = "Tests/images/transparent.gif" with Image.open(file_path) as im: im.save(temp_file) with Image.open(temp_file) as image: assert image.mode == "RGBA" assert image.size == (200, 150) assert image.format == "WEBP" image.load() image.getdata() with Image.open(file_path) as im: target = im.convert("RGBA") assert_image_similar(image, target, 25.0)
[ 9, 77, 1950, 854, 10792 ]
def METHOD_NAME(self, runtime_results): """Postprocess the runtime results for PaddleClasModel :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime :return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size) """ return self._postprocessor.METHOD_NAME(runtime_results)
[ 22 ]
def METHOD_NAME(self): """ This test covers the case when the gppkg has been installed on the cluster and the rpm has not been installed properly on one of the segments. The gppkg however exists in the archive on the segment. """ self.install(self.alpha_spec.get_filename()) segment_host_list = get_host_list()[1] self.assertTrue(len(segment_host_list) > 0) host = segment_host_list[0] self.uninstall_rpm_remotely(self.A_spec.get_filename(), host) run_command(self.clean_command) self.check_remote_rpm_install(self.A_spec.get_package_name(), host) self.assertTrue(CheckRemoteFile(os.path.join(ARCHIVE_PATH, self.alpha_spec.get_filename()), host).run())
[ 14634, 654, 3466, 69, 4373 ]
def METHOD_NAME(self): self.source.METHOD_NAME() log.info("Finished")
[ 158 ]
def METHOD_NAME(self): """ Returns: (str): The version of the insights client. .. note:: This attribute returns a short version of the insights client only, to get the full version of the ``insights-client`` package, please use the :class:`insights.parsers.installed_rpms.InstalledRpms` Parser instead. """ return self['client_version']
[ 340, 281 ]
def METHOD_NAME(self): unit = MockUnit(source="Foo\x1b") fix = RemoveControlChars() self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False)) self.assertEqual(fix.fix_target(["Bar\x1b"], unit), (["Bar"], True)) self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
[ 9, -1 ]
def METHOD_NAME(**kwargs: Any) -> bindings.v1GetExperimentResponse: """Get an experiment from a fixture and optionally override some fields. Load a sample experiment from a fixture. It's assumed that generally a caller cares only that the response is well-formed. If instead the caller cares about any particular fields, they can override them by passing them as keyword arguments. Args: **kwargs: Fields to override in the experiment. Returns: A bindings.v1GetExperimentResponse object with the experiment. NOTE: The returned object is a bindings type, *not* a ExperimentReference. """ with open(FIXTURES_DIR / "experiment.json") as f: resp = bindings.v1GetExperimentResponse.from_json(json.load(f)) for k, v in kwargs.items(): setattr(resp.experiment, k, v) return resp
[ 734, 19, 2355 ]
def METHOD_NAME(self, version_str: VersionStr, release_info: List[dict]) -> None: """ Args: version_str: The version we have info on. release_info: For each release we may have a list of files available. """
[ 238, 586 ]
def METHOD_NAME(self, obj): # pylint: disable=unused-argument """Get UES for current user or 0.""" # Check if the uues annotation is available if hasattr(obj, 'uues'): return obj.uues[0].status if obj.uues else 0 return None
[ 19, 21, -1 ]
def METHOD_NAME(message): return [token.text for token in message.get("tokens", [])]
[ 1735, 47, 277 ]
def METHOD_NAME(domain): by_username = defaultdict(list) for user_id, username in get_all_user_id_username_pairs_by_domain(domain, include_web_users=False): by_username[username].append(user_id) dupes = {} for username, user_ids in by_username.items(): if len(user_ids) > 1: dupes[username] = user_ids return dupes
[ 19, 1119, 3467 ]
def METHOD_NAME(aa, bb): assert aa.shape == a.shape tvm.testing.assert_allclose(aa.numpy(), a.numpy() + 1) aa.copyto(bb)
[ 1192, 8203, 877, 7640 ]
def METHOD_NAME(self) -> IpProperties | None: """Return ipv6 properties if any.""" return self._ipv6
[ 1899 ]