text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(_): return { "avatar": (str, none_type), "banner": (int, none_type), "created_at": (datetime,), "description": (str, none_type), "handle": (str,), "hidden_modules": ([str],), "link_count": (int,), "modified_at": (datetime,), "name": (str,), "summary": (str, none_type), "user_count": (int,), "visible_modules": ([str],), }
[ 4597, 119 ]
def METHOD_NAME( self, execute_task, download_path: Path, sftp_fs: TestSFTPFileSystem ): remote_file: Path = sftp_fs.create_file('file.mkv', 100) execute_task('sftp_download_file_delete_origin_true') assert not remote_file.exists()
[ 9, 1130, 136, 171, 61, 34 ]
def METHOD_NAME(x): """ return the contribution to the loss function of residual x. Placeholder for rebust methods to come """ return x**2
[ 1572, 717 ]
def METHOD_NAME(self) -> str: """ The geo-location where the resource lives """ return pulumi.get(self, "location")
[ 708 ]
def METHOD_NAME(self): if CONTIPPSETUP.niInterfaceLib is None: try: CONTIPPSETUP.niInterfaceLib = CDLL("libNiInterface.so") except: Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot load libNiInterface.so') raise mdsExceptions.TclFAILED_ESSENTIAL
[ 1032, 100 ]
async def METHOD_NAME(self) -> None: await self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(significand, exponent, stream=None): """Return a new array of floating point values composed from the entries of `significand` and `exponent`, paired together as `result = significand * 2**exponent`. """ if not significand.flags.forc or not exponent.flags.forc: raise RuntimeError( "only contiguous arrays may " "be used as arguments to this operation" ) result = gpuarray.GPUArray(significand.shape, significand.dtype) func = elementwise.get_ldexp_kernel() func.prepared_async_call( significand._grid, significand._block, stream, significand.gpudata, exponent.gpudata, result.gpudata, significand.mem_size, ) return result
[ 6199 ]
def METHOD_NAME(self, builder): if self.parent: return self.parent.METHOD_NAME(builder) self.definitions.add(builder)
[ 238, 1208 ]
def METHOD_NAME(index: int) -> qulacs_core.ClsOneQubitGate: ...
[ 2079 ]
def METHOD_NAME( cls: typing.Type[object], class_path: str, default: typing.Any = __DEFAULT__): """ Get class go thru it's annotations using class_path cls: list, dict, instance or class class_path: string of data path detault: if provided, in case of it can't reach data default value will be returned, otherwise ValueError will be raised data_path example = attr1.attr2.attr3 , using this example you will get attr3 of attr2 of attr1 of the class you provide into cls. """ current = cls for param_name in class_path.split('.'): value = __DEFAULT__ if hasattr(current, '__annotations__'): value = current.__annotations__.get(param_name, __DEFAULT__) if value is __DEFAULT__: if default is __DEFAULT__: raise ValueError(f"Can't find {class_path} in data {cls.__name__}") return default current = value return current
[ 19, 2, 604, 157 ]
def METHOD_NAME(self): return self.C_hvdc_bus_f * np.arange(self.C_hvdc_bus_f.shape[1])
[ 19, 2583, 1894, 474 ]
def METHOD_NAME(name, size, ty=''): ty_annot = ' : ' + ty if ty else '' return ', '.join(f'%{name}{i}{ty_annot}' for i in range(size))
[ 811 ]
def METHOD_NAME(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss_sum = utils.item( sum(log.get("nll_loss", 0) for log in logging_outputs) ) alignment_loss_sum = utils.item( sum(log.get("alignment_loss", 0) for log in logging_outputs) ) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_scalar( "alignment_loss", alignment_loss_sum / sample_size / math.log(2), sample_size, round=3, ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) )
[ 332, 1097 ]
def METHOD_NAME(self): comments = self.ogr_project.get_pr(4).get_comments( filter_regex="^regex", author="mfocko" ) assert len(comments) == 1 assert comments[0].body.endswith("test")
[ 9, 1933, 3528, 2997, 211 ]
def METHOD_NAME(self, registry, serializer): with pytest.raises(DeserializationError): registry.deserialize(MyObj, {"val": 1})
[ 9, 13057, 427, 241, 217, 654, 281 ]
def METHOD_NAME(self, app): self.client_id = app.config["SALESFORCE_CLIENT_ID"] self.username = app.config["SALESFORCE_USERNAME"] self.password = app.config["SALESFORCE_PASSWORD"] self.security_token = app.config["SALESFORCE_SECURITY_TOKEN"] self.domain = app.config["SALESFORCE_DOMAIN"] self.generic_account_id = app.config["SALESFORCE_GENERIC_ACCOUNT_ID"]
[ 176, 991 ]
def METHOD_NAME(self): form = { 'id': 'form_id', 'xmlns': TARGET_XMLNS, 'domain': 'champ_cameroon', 'form': { 'locations': { 'district': 'test district', 'cbo': 'test cbo', 'clienttype': 'fsw_test_client_type', 'userpl': 'test userpl' }, 'fiscal_year': '2017', 'target_kp_prev': 15, 'target_htc_tst': 54, 'target_htc_pos': 35, 'target_care_new': 16, 'target_tx_new': 11, 'target_tx_undetect': 20 } } district = self.get_expression('district', 'string') cbo = self.get_expression('cbo', 'string') clienttype = self.get_expression('clienttype', 'string') userpl = self.get_expression('userpl', 'string') fiscal_year = self.get_expression('fiscal_year', 'integer') target_kp_prev = self.get_expression('target_kp_prev', 'integer') target_htc_tst = self.get_expression('target_htc_tst', 'integer') target_htc_pos = self.get_expression('target_htc_pos', 'integer') target_care_new = self.get_expression('target_care_new', 'integer') target_tx_new = self.get_expression('target_tx_new', 'integer') target_tx_undetect = self.get_expression('target_tx_undetect', 'integer') self.assertEqual( district(form, EvaluationContext(form, 0)), 'test district' ) self.assertEqual( cbo(form, EvaluationContext(form, 0)), 'test cbo' ) self.assertEqual( clienttype(form, EvaluationContext(form, 0)), 'fsw' ) self.assertEqual( userpl(form, EvaluationContext(form, 0)), 'test userpl' ) self.assertEqual( fiscal_year(form, EvaluationContext(form, 0)), '2017' ) self.assertEqual( target_kp_prev(form, EvaluationContext(form, 0)), 15 ) self.assertEqual( target_htc_tst(form, EvaluationContext(form, 0)), 54 ) self.assertEqual( target_htc_pos(form, EvaluationContext(form, 0)), 35 ) self.assertEqual( target_care_new(form, EvaluationContext(form, 0)), 16 ) self.assertEqual( target_tx_new(form, EvaluationContext(form, 0)), 11 ) self.assertEqual( target_tx_undetect(form, EvaluationContext(form, 0)), 20 )
[ 9, 1030, 1029, 748 ]
def METHOD_NAME(self) -> ConflictTheater: with self.descriptor_path.open(encoding="utf-8") as descriptor_file: data = yaml.safe_load(descriptor_file) return ConflictTheater( TERRAINS_BY_NAME[data.get("pydcs_name", data["name"])], load_landmap(self.landmap_path), datetime.timezone(datetime.timedelta(hours=data["timezone"])), self._load_seasonal_conditions(data["climate"]), self._load_daytime_map(data["daytime"]), )
[ 557 ]
def METHOD_NAME(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" output_sqlite_path = os.path.join(cache_dir, "tmp.sql") dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write() original_sql = iter_sql_file(sqlite_path) expected_sql = iter_sql_file(output_sqlite_path) for row1, row2 in zip(original_sql, expected_sql): assert row1 == row2
[ 9, 126, 24, 1621, 15882 ]
def METHOD_NAME(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value
[ 214, 584, 280, 485 ]
def METHOD_NAME(self, test_data, device, args): model = self.model model.to(device) model.eval() metrics = { "test_correct": 0, "test_loss": 0, "test_precision": 0, "test_recall": 0, "test_total": 0, } """ stackoverflow_lr is the task of multi-label classification please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research: https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451 https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131 """ if args.dataset == "stackoverflow_lr": criterion = nn.BCELoss(reduction="sum").to(device) else: criterion = nn.CrossEntropyLoss().to(device) with torch.no_grad(): for batch_idx, (x, target) in enumerate(test_data): x = x.to(device) target = target.to(device) pred = model(x) loss = criterion(pred, target) # pylint: disable=E1102 if args.dataset == "stackoverflow_lr": predicted = (pred > 0.5).int() correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum() true_positive = ((target * predicted) > 0.1).int().sum(axis=-1) precision = true_positive / (predicted.sum(axis=-1) + 1e-13) recall = true_positive / (target.sum(axis=-1) + 1e-13) metrics["test_precision"] += precision.sum().item() metrics["test_recall"] += recall.sum().item() else: _, predicted = torch.max(pred, 1) correct = predicted.eq(target).sum() metrics["test_correct"] += correct.item() metrics["test_loss"] += loss.item() * target.size(0) if len(target.size()) == 1: # metrics["test_total"] += target.size(0) elif len(target.size()) == 2: # for tasks of next word prediction metrics["test_total"] += target.size(0) * target.size(1) return metrics
[ 9 ]
def METHOD_NAME(self, sport_id): """Get the sport with the specified id. If no sport with the given id exists then None is returned.""" if sport_id is None: raise ValueError("Sport id cannot be None") try: return self._ddbb.session.query(Sport).filter(Sport.id == sport_id).one() except NoResultFound: return None
[ 19, 8053 ]
def METHOD_NAME(self): super().METHOD_NAME() vocab = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.special_tokens_map = { "unk_token": "[UNK]", "sep_token": "[SEP]", "bos_token": "[SEP]", "eos_token": "[SEP]", "cls_token": "[CLS]", "x_sep_token": "[X_SEP]", "pad_token": "[PAD]", "mask_token": "[MASK]", } self.vocab_file = os.path.join(self.tmpdirname, ProphetNetTokenizer.resource_files_names["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
[ 0, 1 ]
def METHOD_NAME(self, index=np.s_[:]): """ @brief 根据散度定理计算多边形的面积 @note 请注意下面的计算方式不方便实现部分单元面积的计算 """ NC = self.number_of_cells() node = self.entity('node') edge = self.entity('edge') edge2cell = self.ds.edge_to_cell() t = self.edge_tangent() val = t[:, 1]*node[edge[:, 0], 0] - t[:, 0]*node[edge[:, 0], 1] a = np.zeros(NC, dtype=self.ftype) np.add.at(a, edge2cell[:, 0], val) isInEdge = (edge2cell[:, 0] != edge2cell[:, 1]) np.add.at(a, edge2cell[isInEdge, 1], -val[isInEdge]) a /= 2.0 return a[index]
[ 118, 690 ]
def METHOD_NAME(m, i): return ( m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0 )
[ 1467, 4757, -1, 446 ]
def METHOD_NAME(self): Utility.console_log('{} connected to server'.format(self.address))
[ 276, 2261 ]
def METHOD_NAME(self, method): # in theory, we can override this with # profile/etc info return getattr(self.ts, method)
[ 19, 103 ]
def METHOD_NAME(): self.session_dict[request.sid].cb('data-from-blockchain')
[ 276, 417 ]
def METHOD_NAME(y, i, q): r = y + 0.948262*i + 0.624013*q g = y - 0.276066*i - 0.639810*q b = y - 1.105450*i + 1.729860*q #if r < 0.0: r = 0.0 r = r*(r > 0.0) #if g < 0.0: g = 0.0 g = g*(g > 0.) #if b < 0.0: b = 0.0 b = b*(b < 0.) #if r > 1.0: r = 1.0 r = np.minimum(r, 1.0) #if g > 1.0: g = 1.0 g = np.minimum(g, 1.0) #if b > 1.0: b = 1.0 b = np.minimum(b, 1.0) return (r, g, b)
[ 4613, 24, 2310 ]
def METHOD_NAME(start_binary): start_binary(TELESCOPE_BINARY) out = gdb.execute("telescope -r 2", to_string=True).splitlines() rsp = pwndbg.gdblib.regs.rsp assert out == ["00:0000│ %#x ◂— 0x0" % (rsp - 8), "01:0008│ rsp %#x ◂— 0x1" % rsp]
[ 9, 11919, 462, 41, 85, 947, 29 ]
def METHOD_NAME(resultsdir): """ Retrieves the job pwd from the results directory. """ recorded_pwd = _retrieve(resultsdir, PWD_FILENAME) if recorded_pwd is None: return None with open(recorded_pwd, "r", encoding="utf-8") as pwd_file: return pwd_file.read()
[ 404, 13107 ]
def METHOD_NAME(): cases = [('abs(x)', 'Abs(x)'), ('max(x, y)', 'Max(x, y)'), ('min(x, y)', 'Min(x, y)'), ('pow(x, y)', 'Pow(x, y)')] for built_in_func_call, sympy_func_call in cases: assert parse_expr(built_in_func_call) == parse_expr(sympy_func_call) assert parse_expr('pow(38, -1)') == Rational(1, 38) # issue sympy/sympy#22322 assert parse_expr('abs(-42)', evaluate=False) == Abs(-42, evaluate=False)
[ 9, 4298 ]
def METHOD_NAME(network, outdir, dotFilename): callgraph = pydot.Dot(graph_type="graph", rankdir="LR") _dot_create(network, callgraph) dot_filename = os.path.join(outdir, dotFilename) callgraph.write(dot_filename) try: # dot crashes if the figure is extremely wide. # So avoid terminating simulation unnecessarily callgraph.write_svg(dot_filename + ".svg", prog="neato") callgraph.write_pdf(dot_filename + ".pdf", prog="neato") except: warn("failed to generate dot output from %s", dot_filename)
[ 74, 1903 ]
def METHOD_NAME(rho, sigma, N=3, method="rouwenhorst", m=2): """ Discretize an VAR(1) into a markov chain. The autoregression matrix is supposed to be a scalar. :param rho: :param sigma: :param N: :param method: :param m: :return: """ # rho is assumed to be a scalar # sigma is a positive symmetric matrix # N number of points in each non-degenerate dimension # m : standard deviations to approximate import scipy.linalg from itertools import product d = sigma.shape[1] sigma = sigma.copy() zero_columns = np.where(sigma.sum(axis=0) == 0)[0] for i in zero_columns: sigma[i, i] = 1 L = scipy.linalg.cholesky(sigma) N = int(N) if method == "tauchen": [nodes_1d, probas_1d] = tauchen(N, 0, rho, 1, m=m) elif method == "rouwenhorst": [nodes_1d, probas_1d] = rouwenhorst(rho, 1, N) markov_nodes = np.array(list(product(*([nodes_1d] * d)))).T markov_indices = np.array(list(product(*([range(N)] * d)))).T markov_nodes = np.dot(L, markov_nodes) transition_matrix = 1 for i in range(d): transition_matrix = np.kron(transition_matrix, probas_1d) markov_nodes = np.ascontiguousarray(markov_nodes.T) for i in zero_columns: markov_nodes[:, i] = 0 return [markov_nodes, transition_matrix]
[ 9544, 18093 ]
def METHOD_NAME(): t_coeff = np.array([0.0, 1.0, 1.0, 1.0, 0.0]) base_length = 2.0 position = np.linspace(0, base_length, 10) my_spline, ctr_pts, ctr_coeffs = _bspline(t_coeff, base_length) correct_values = np.array( [ 0.0, 0.52949246, 0.82853224, 0.96296296, 0.99862826, 0.99862826, 0.96296296, 0.82853224, 0.52949246, 0.0, ] ) test_values = my_spline(position) assert_allclose(test_values, correct_values, atol=Tolerance.atol())
[ 9, 16500, 3212 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self): images = [] labels = [] import csv with open(self.label_path, "r") as f: reader = csv.reader(f) # skip the header _ = next(reader) for row in reader: _img = row[0] _label = row[5:] _img = os.path.join(*_img.split("/")[2:]) for i in range(len(_label)): if _label[i] == "" or float(_label[i]) == -1: _label[i] = 0 if self.policy == "zeros" else 1 else: _label[i] = int(float(_label[i])) images.append(_img) labels.append(_label) return images, labels
[ 56, 4146 ]
def METHOD_NAME(self) -> bool: ...
[ 137, 4143 ]
def METHOD_NAME(self): self.cpp_info.set_property("cmake_file_name", "oatpp-postgresql") self.cpp_info.set_property("cmake_target_name", "oatpp::oatpp-postgresql") # TODO: back to global scope in conan v2 once legacy generators removed self.cpp_info.components["_oatpp-postgresql"].includedirs = [ os.path.join("include", f"oatpp-{self.version}", "oatpp-postgresql") ] self.cpp_info.components["_oatpp-postgresql"].libdirs = [os.path.join("lib", f"oatpp-{self.version}")] if self.settings.os == "Windows" and self.options.shared: self.cpp_info.components["_oatpp-postgresql"].bindirs = [os.path.join("bin", f"oatpp-{self.version}")] else: self.cpp_info.components["_oatpp-postgresql"].bindirs = [] self.cpp_info.components["_oatpp-postgresql"].libs = ["oatpp-postgresql"] if self.settings.os in ["Linux", "FreeBSD"]: self.cpp_info.components["_oatpp-postgresql"].system_libs = ["pthread"] # TODO: to remove in conan v2 once legacy generators removed self.cpp_info.filenames["cmake_find_package"] = "oatpp-postgresql" self.cpp_info.filenames["cmake_find_package_multi"] = "oatpp-postgresql" self.cpp_info.names["cmake_find_package"] = "oatpp" self.cpp_info.names["cmake_find_package_multi"] = "oatpp" self.cpp_info.components["_oatpp-postgresql"].names["cmake_find_package"] = "oatpp-postgresql" self.cpp_info.components["_oatpp-postgresql"].names["cmake_find_package_multi"] = "oatpp-postgresql" self.cpp_info.components["_oatpp-postgresql"].set_property("cmake_target_name", "oatpp::oatpp-postgresql") self.cpp_info.components["_oatpp-postgresql"].requires = ["oatpp::oatpp", "libpq::libpq"]
[ 360, 100 ]
def METHOD_NAME(self, **kwargs): # TODO; code needs to be refactored. # example : hdl_identifer not used anywhere # original code from https://github.com/EUDAT-B2SHARE/b2share/pull/1813 input_record = request.view_args['record_id'] if input_record is None: return abort(400) linkset = [] try: rec_pid = RecordUUIDProvider.METHOD_NAME(input_record).pid record = Record.get_record(rec_pid.object_uuid) except: return abort(404, "Record not found!") try: landingpage = current_app.config.METHOD_NAME( 'PREFERRED_URL_SCHEME', '') + '://' + current_app.config.METHOD_NAME( 'JSONSCHEMAS_HOST', '') + '/records/' + input_record citations = [] doi_identifier = None for p in record.METHOD_NAME('_pid'): if p.METHOD_NAME('type') == 'DOI': doi_identifier = p.METHOD_NAME('value') cite = {'href' : 'https://doi.org/' + doi_identifier} citations.append(cite) if doi_identifier is None and record.METHOD_NAME('alternate_identifiers') is not None: for ai in record.METHOD_NAME('alternate_identifiers'): if ai.METHOD_NAME('alternate_identifier_type') == 'DOI': doi_identifier = ai.METHOD_NAME('alternate_identifier') cite = {'href' : 'https://doi.org/' + doi_identifier} citations.append(cite) hdl_identifer = None if doi_identifier is None: #Identifier is required current_app.logger.error('No alternate_identifiers for record {record_id}'.format(record_id=rec_pid.object_uuid)) rec_license = record.METHOD_NAME('license') license = {} if rec_license is not None: license = { 'href': rec_license.METHOD_NAME('license_uri')} #Todo: According to Signposting spec: License cardinality is 1. What we will do for the following case? # https://b2share.eudat.eu/api/oai2d?verb=ListRecords&metadataPrefix=oai_dc # <dc:rights>info:eu-repo/semantics/openAccess</dc:rights> # <dc:rights>GNU General Public License 3 (GPL-3.0)</dc:rights> describedbys = [] if doi_identifier is not None: describedby = {'href':'https://citation.crosscite.org/format?style=bibtex&doi=' + doi_identifier, 'type':'application/x-bibtex'} describedbys.append(describedby) items = [] fs = [] bucket = None if len(record.METHOD_NAME('_files', [])) > 0: # if record contains files, check for user's permissions to access those files bucket = record.METHOD_NAME('_files')[0].METHOD_NAME("bucket") user_has_permission = \ allow_public_file_metadata(record) if bucket \ is None else files_permission_factory( bucket, 'bucket-read').can() if user_has_permission: for file in record.METHOD_NAME('_files', []): fmimetype = fm.ObjectVersion.METHOD_NAME( file.METHOD_NAME('bucket'), file.METHOD_NAME('key'), file.METHOD_NAME('version_id')).mimetype file_location = current_app.config.METHOD_NAME( 'PREFERRED_URL_SCHEME', '') + '://' + current_app.config.METHOD_NAME( 'JSONSCHEMAS_HOST', '') + '/api/files/' + file.METHOD_NAME( 'bucket') + '/' + file.METHOD_NAME('key') file.update({'file-location': file_location}) item_file = {'href': file_location, 'type':fmimetype} items.append(item_file) item_anchor = {'anchor':file_location, 'collection':[{'href':landingpage, 'type':'text/html'}]} fs.append(item_anchor) types = [] type_about_page = {'href':'https://schema.org/AboutPage'} types.append(type_about_page) element = {'anchor': landingpage, 'type':types, 'cite-as': citations, 'item':items, 'describedby':describedbys, 'license':license} linkset.append(element) for a in fs: linkset.append(a) return {'linkset': linkset} except: return abort(404)
[ 19 ]
def METHOD_NAME(self): self.bdist_base = None self.plat_name = None self.formats = None self.dist_dir = None self.skip_build = 0 self.group = None self.owner = None
[ 15, 1881 ]
def METHOD_NAME(self): """Check different error conditions. """ for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE)
[ 964 ]
def METHOD_NAME(self, dashboard_id, cell, **kwargs): ...
[ 1276, 4774, 147, 383, 41, 721, 100 ]
def METHOD_NAME(): return MockIOManager()
[ 248, 249, 722 ]
def METHOD_NAME(a: BlockAccess | Right, curr_time: datetime): if a.duration and a.accessible_to: return min(a.duration, a.accessible_to - curr_time) return a.duration
[ 19, 2205, 4607 ]
def METHOD_NAME(self): new_dict = self.gruneisen_obj.as_dict() self.gruneisen_obj2 = GruneisenParameter.from_dict(new_dict)
[ 9, 6524, 7040 ]
METHOD_NAME(self):
[ 86, 459 ]
def METHOD_NAME(sender, **_kwargs): request = sender.app.webapp_bootstrap() sender.app.request = request
[ 904, 1794 ]
def METHOD_NAME(name): return name.replace(" ", "_").lower()
[ 1907 ]
def METHOD_NAME(self): return "MgmtErrorFormat"
[ 168, 275 ]
def METHOD_NAME(self): pass
[ 241, 43, 452 ]
def METHOD_NAME(self): self.dialog.ui.doubleSpinBoxCarrierFreq.setValue(1e9) self.dialog.ui.doubleSpinBoxCarrierFreq.editingFinished.emit() self.assertEqual(self.dialog.current_modulator.carrier_freq_hz, 1e9) self.dialog.ui.doubleSpinBoxCarrierPhase.setValue(100) self.dialog.ui.doubleSpinBoxCarrierPhase.editingFinished.emit() self.assertEqual(self.dialog.current_modulator.carrier_phase_deg, 100)
[ 9, 2004, 2591 ]
async def METHOD_NAME(): # total roundtrip takes ~5s print('heavylifting other io-bound jobs, e.g. download, upload, file io') await asyncio.sleep(5) print('heavylifting done after 5s')
[ 5893, 38 ]
def METHOD_NAME(self, level_req, string): if self.debug_level >= level_req: self.printfunc(string)
[ 290 ]
def METHOD_NAME(self, state, dict_, passive=attributes.PASSIVE_OFF): if self.key in dict_: return dict_[self.key] # Retrieve the session bound to the state in order to perform # a lazy query for the attribute. session = _state_session(state) if session is None: # State is not bound to a session; we cannot proceed. return None # Find class for discriminator. # TODO: Perhaps optimize with some sort of lookup? discriminator = self.get_state_discriminator(state) target_class = _get_class_registry(state.class_).METHOD_NAME(discriminator) if target_class is None: # Unknown discriminator; return nothing. return None id = self.get_state_id(state) try: target = session.METHOD_NAME(target_class, id) except AttributeError: # sqlalchemy 1.3 target = session.query(target_class).METHOD_NAME(id) # Return found (or not found) target. return target
[ 19 ]
def METHOD_NAME(self, dev_path): return self.get_dev_mode(dev_path) == 'devdax'
[ 137, -1 ]
def METHOD_NAME(im, to_bgr=False): im = np.swapaxes(im, 1, 2) im = np.swapaxes(im, 1, 0) if to_bgr: im = im[[2, 1, 0], :, :] return im
[ 2755 ]
def METHOD_NAME(self): self.tmpf.close() os.unlink(self.tmpf.name)
[ 531, 481 ]
def METHOD_NAME(self): """ Tests the return of running the -f list_nodes_full command for ProfitBricks """ cmd = "-f list_nodes_full {}".format(self.PROVIDER) list_nodes = self.run_cloud(cmd) self.assertIn("state:", [i.strip() for i in list_nodes]) self.assertIn("name:", [i.strip() for i in list_nodes])
[ 9, 245, 480, 324 ]
def METHOD_NAME(self, model, positions, q, dofs, ni, o1, e1, f1): n = self.n du_axial = zeros(n, 'float64') for i in range(self.n): n0, n1 = self.node_ids[i, :] n11 = dofs[(n0, 1)] n21 = dofs[(n1, 1)] q_axial = array([ q[n11], q[n21], ]) u_axial = q_axial du_axial[i] = u_axial[0] - u_axial[1] s = self.s bi = self.B e1[ni : ni+n] = du_axial * s f1[ni : ni+n] = bi * du_axial o1[ni : ni+n] = f1[ni: ni+n] * s #return (axial_strain, axial_stress, axial_force)
[ 6619, 2903 ]
def METHOD_NAME(self) -> str: """ dnc endpoint url that customers can use to connect to """ return pulumi.get(self, "dnc_endpoint")
[ 4019, 841 ]
def METHOD_NAME(i): # stop if already start machines[i].run('tmux send-keys -t python-rc C-c') time.sleep(2) machines[i].kill_detach_tmux() machines[i].run('rm -rf ~/.near') # upload keys, config, genesis machines[i].upload(str(get_node_dir(i)), f'/home/{machines[i].username}/.near') pbar.update(1)
[ 172, 3842, 1537 ]
def METHOD_NAME(binning_method): # load training data X = np.random.rand(10, 1, 150) y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) word_length = 6 alphabet_size = 4 # SFA with ANOVA one-sided test window_size = 32 p = SFA( word_length=word_length, anova=True, alphabet_size=alphabet_size, window_size=window_size, binning_method=binning_method, ).fit(X, y) assert p.breakpoints.shape == (word_length, alphabet_size) _ = p.transform(X, y) # SFA with first feq coefficients p2 = SFA( word_length=word_length, anova=False, alphabet_size=alphabet_size, window_size=window_size, binning_method=binning_method, ).fit(X, y) assert p.dft_length != p2.dft_length assert (p.breakpoints != p2.breakpoints).any() _ = p2.transform(X, y)
[ 9, -1, 8233 ]
def METHOD_NAME(monkeypatch): # given img_data = BytesIO() image = Image.new("RGB", size=(1, 1)) image.save(img_data, format="JPEG") field = "image" error_msg = "Test syntax error" image_file_mock = Mock(side_effect=SyntaxError(error_msg)) monkeypatch.setattr( "saleor.graphql.core.validators.file._validate_image_exif", image_file_mock ) img = SimpleUploadedFile("product.jpg", img_data.getvalue(), "image/jpeg") # when with pytest.raises(ValidationError) as exc: clean_image_file({field: img}, field, ProductErrorCode) # then assert error_msg in exc.value.args[0][field].message
[ 9, 1356, 660, 171, 10093, 437, 3940 ]
def METHOD_NAME(get_fn): # no hydrogens in this file -> no hydrogen bonds t = md.load(get_fn('1bpi.pdb')) eq(np.zeros((0, 3), dtype=int), md.baker_hubbard(t))
[ 9, -1, -1, 1170 ]
def METHOD_NAME( self, data: bytes, pattern: bytes, matcher: str | None = None, matcher_kwargs: dict[str, str] | None = None ) -> bool: """Call the matcher.""" if matcher and not self._matchers.get(matcher): raise self.MatcherNotInstalled( f'No matcher installed for {matcher}' ) match_func = self._matchers[matcher or 'glob'] if matcher in self.matcher_pattern_first: first_arg = bytes_to_str(pattern) second_arg = bytes_to_str(data) else: first_arg = bytes_to_str(data) second_arg = bytes_to_str(pattern) return match_func(first_arg, second_arg, **matcher_kwargs or {})
[ 590 ]
def METHOD_NAME(self): self.pre_operations() yield self.PrivateLinkScopesDelete(ctx=self.ctx)() self.post_operations()
[ 750, 710 ]
def METHOD_NAME(self, session): pass
[ 69, 1072 ]
def METHOD_NAME(context): context.caller_stack._push_frame() try: context._push_buffer() __M_writer = context.writer() # SOURCE LINE 11 __M_writer('\n <div class="header">\n <h1><a href="/">Login</a></h1>\n </div>\n') finally: __M_buf, __M_writer = context._pop_buffer_and_writer() context.caller_stack._pop_frame() __M_writer(filters.trim(__M_buf.getvalue())) return ""
[ 338, 709 ]
def METHOD_NAME(self): return """\ font Sets this axis' title font. Note that the title's font used to be customized by the now deprecated `titlefont` attribute. text Sets the title of this axis. Note that before the existence of `title.text`, the title's contents used to be defined as the `title` attribute itself. This behavior has been deprecated. """
[ 1302, 1303 ]
def METHOD_NAME(self): if self.current_fold < self.n_folds - 1: self.current_fold = self.current_fold + 1 else: self.current_fold = 0
[ 243, 3848 ]
def METHOD_NAME(self): self.nodes[LEADER].start() self.simulator.go(config.LEADER_STARTUP_DELAY) self.assertEqual(self.nodes[LEADER].get_state(), 'leader') for i in range(2, 5): self.nodes[i].start() self.simulator.go(config.ROUTER_STARTUP_DELAY) for i in range(2, 5): self.assertEqual(self.nodes[i].get_state(), 'router') self.simulator.go(config.MAX_ADVERTISEMENT_INTERVAL) self.nodes[ED].start() self.simulator.go(5) self.assertEqual(self.nodes[ED].get_state(), 'child') self.collect_ipaddrs() addrs = self.nodes[ED].get_addrs() for addr in addrs: self.assertTrue(self.nodes[ROUTER3].ping(addr))
[ 9 ]
def METHOD_NAME(self): tocrds = podpac.Coordinates([[39.1, 39.0, 38.9], [-77.1, -77, -77.2]], dims=["lat", "lon"], crs="EPSG:4326") base = podpac.core.data.array_source.Array( source=np.random.rand(3, 3), coordinates=tocrds.transform("EPSG:32618") ) node = podpac.interpolators.Interpolate(source=base, interpolation="nearest") o = node.eval(tocrds) assert np.all((o.lat.data - tocrds["lat"].coordinates) == 0) # now check the Mixin node2 = podpac.core.data.array_source.Array( source=np.random.rand(3, 3), coordinates=tocrds.transform("EPSG:32618") ).interpolate() o = node2.eval(tocrds) assert np.all((o.lat.data - tocrds["lat"].coordinates) == 0) # now check the reverse operation tocrds = podpac.Coordinates( [podpac.clinspace(4307580, 4330177, 7), podpac.clinspace(309220, 327053, 8)], dims=["lat", "lon"], crs="EPSG:32618", ) srccrds = podpac.Coordinates( [podpac.clinspace(39.2, 38.8, 9), podpac.clinspace(-77.3, -77.0, 9)], dims=["lat", "lon"], crs="EPSG:4326" ) node3 = podpac.core.data.array_source.Array(source=np.random.rand(9, 9), coordinates=srccrds).interpolate() o = node3.eval(tocrds) assert np.all((o.lat.data - tocrds["lat"].coordinates) == 0)
[ 9, 8723, 1669, 8901, 9002 ]
def METHOD_NAME(osmid, tags, ref): """Add a new barrier to the list of barriers.""" if 'barrier' in tags: settingsSection = Settings.get_section('barrier', tags['barrier']) if settingsSection is None: return if Settings.has_option(settingsSection, 'ignore') and Settings.get(settingsSection, 'ignore') == 'TRUE': return barrier = Barrier() barrier.OSMID = osmid barrier.type = tags['barrier'] barrier.ref = ref if 'height' in tags: barrier.height = extract_float_from_string(tags['height']) elif Settings.has_option(settingsSection, 'height'): barrier.height = Settings.getfloat(settingsSection, 'height') if 'width' in tags: barrier.width = extract_float_from_string(tags['width']) elif Settings.has_option(settingsSection, 'width'): barrier.width = Settings.getfloat(settingsSection, 'width') if WebotsObject.enable3D: barrier.ref = Barrier.add_intermediate_point_where_needed(barrier.ref) Barrier.list.append(barrier)
[ 238, 24, 245 ]
def METHOD_NAME(q, k, v, extra_options): #NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q #however from my basic testing it seems that using q instead gives better results m, self.u = get_functions(q, ratio, extra_options["original_shape"]) return m(q), k, v
[ -1, 790 ]
def METHOD_NAME(img_dir, gt_dir): """Collect all images and their corresponding groundtruth files. Args: img_dir (str): The image directory gt_dir (str): The groundtruth directory Returns: files (list): The list of tuples (img_file, groundtruth_file) """ assert isinstance(img_dir, str) assert img_dir assert isinstance(gt_dir, str) assert gt_dir ann_list, imgs_list = [], [] for img in os.listdir(img_dir): imgs_list.append(osp.join(img_dir, img)) ann_list.append(osp.join(gt_dir, 'gt_' + img.replace('jpg', 'txt'))) files = list(zip(imgs_list, ann_list)) assert len(files), f'No images found in {img_dir}' print(f'Loaded {len(files)} images from {img_dir}') return files
[ 1444, 1537 ]
def METHOD_NAME(branch_name): outs, errs, exit_code = run_external_command(f"git switch {branch_name}") if exit_code != 0: raise RecipeException(errs)
[ 1493, 705, 24 ]
def METHOD_NAME(input_blob): result = input_blob.copy() result[result < 0] *= alpha return result,
[ 2360 ]
def METHOD_NAME( gym_env: gym.Env, discount: types.Float = 1.0, max_episode_steps: Optional[types.Int] = None, gym_env_wrappers: Sequence[types.GymEnvWrapper] = (), time_limit_wrapper: TimeLimitWrapperType = wrappers.TimeLimit, env_wrappers: Sequence[types.PyEnvWrapper] = (), spec_dtype_map: Optional[Dict[gym.Space, np.dtype]] = None, auto_reset: bool = True, render_kwargs: Optional[Dict[str, Any]] = None,
[ 503, 485 ]
def METHOD_NAME(self, steps=None): if steps is None: function_call("{command} -c=volumeup".format(command=self.playout_control), shell=True) else: function_call("{command} -c=volumeup -v={steps}".format(steps=steps, command=self.playout_control), shell=True)
[ 559, 128, 4076, 3597 ]
def METHOD_NAME(self): """Return `True` if the shutter is closed.""" return self.state.title() == "Closed"
[ 137, 4703 ]
def METHOD_NAME(kubescape_exec: str): return smoke_utils.run_command(command=[kubescape_exec, "scan", "framework", "nsa", all_files])
[ 793, 1486 ]
def METHOD_NAME(self, env, dependent_spec): env.prepend_path("XDG_DATA_DIRS", self.prefix.share) env.prepend_path("GI_TYPELIB_PATH", join_path(self.prefix.lib, "girepository-1.0"))
[ 102, 1604, 22, 1027 ]
def METHOD_NAME(self): # Check the repr, mainly to cover the code paths: assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
[ 9, 92 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(art_warning, decision_tree_estimator): try: classifier = decision_tree_estimator() with pytest.raises(ValueError): _ = AttributeInferenceWhiteBoxLifestyleDecisionTree(classifier, attack_feature=-5) except ARTTestException as e: art_warning(e)
[ 9, 250, 434 ]
def METHOD_NAME(cls): cls.repeater.delete() cls.teardown_subscriptions() cls.domain_obj.delete() clear_plan_version_cache() super().METHOD_NAME()
[ 531, 481, 2 ]
def METHOD_NAME(loglevel=None, logfile=None): """Setup logging.""" logger = logging.getLogger() loglevel = get_loglevel(loglevel or 'ERROR') logfile = logfile if logfile else sys.__stderr__ if not logger.handlers: if hasattr(logfile, 'write'): handler = logging.StreamHandler(logfile) else: handler = WatchedFileHandler(logfile) logger.addHandler(handler) logger.setLevel(loglevel) return logger
[ 102, 663 ]
def METHOD_NAME(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters
[ 572, 386 ]
def METHOD_NAME(self): pass
[ 250, 562, 43, 136 ]
def METHOD_NAME(image_tag_string, client): """Try to retrieve a docker image using the docker API. image_tag_string: can be in image:tag or image@digest_type:digest format""" image = check_image(image_tag_string, client) if image is None: image = pull_image(image_tag_string, client) return image
[ 19, 223, 660 ]
def METHOD_NAME(disease_id): return QueryEBIOLSExtended.__get_entity("get_disease", disease_id)
[ 19, 12834, 1067 ]
def METHOD_NAME( url: str, root: TypePath, filename: Optional[TypePath] = None, md5: Optional[str] = None, ) -> None: """Download a file from a url and place it in root. Args: url: URL to download file from root: Directory to place downloaded file in filename: Name to save the file under. If ``None``, use the basename of the URL md5: MD5 checksum of the download. If None, do not check """ root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) os.makedirs(root, exist_ok=True) # check if file is already present locally if not check_integrity(fpath, md5): try: print('Downloading ' + url + ' to ' + fpath) # noqa: T201 urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(), ) except (urllib.error.URLError, OSError) as e: if url[:5] == 'https': url = url.replace('https:', 'http:') message = ( 'Failed download. Trying https -> http instead. Downloading ' + url + ' to ' + fpath ) print(message) # noqa: T201 urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(), ) else: raise e # check integrity of downloaded file if not check_integrity(fpath, md5): raise RuntimeError('File not found or corrupted.')
[ 136, 274 ]
def METHOD_NAME(self): submodule = import_module('ziptestdata.subdirectory') self.assertEqual( set(resources.contents(submodule)), {'__init__.py', 'binary.file'})
[ 9, 1599, 192 ]
def METHOD_NAME(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' device = checkrunning(True) if device: return { "status": "OK", "device": device, "version": VERSION } else: return { "status": "NO", "version": VERSION }
[ -1 ]
def METHOD_NAME(message, *args, **kwargs): """Log a message to specific logger instance.""" logfile = kwargs.pop("logfile", None) record = logging.LogRecord(None, logging.INFO, None, None, message, args, None, None) record.asctime = f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(record.created))},{record.msecs:03d}" record.message = record.getMessage() record.__dict__.update(kwargs) for key, value in _loggers.items(): if logfile and key == logfile: value.handle(record) if logfile is None and key.endswith(".json"): value.handle(record)
[ 2034 ]
def METHOD_NAME(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> TextIO: """Return a file-like object opened for text reading of the resource.""" return TextIOWrapper( open_binary(package, resource), encoding=encoding, errors=errors )
[ 1452, 526 ]
def METHOD_NAME(self): """ Is the current xml generator gccxml (version 0.7)? Returns: bool: is gccxml 0.7 being used? """ return self._xml_generator_version == self.__gccxml_07
[ 137, 7540, 4468 ]
def METHOD_NAME() -> List[bool]: """Return possible parameter values for the `boolean` parameter type (i.e. [True, False])""" return [True, False]
[ 1176, 201 ]