text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(): """Create fields for param.json""" features = json.load(open('features.json')) fields_content = '' print(features) fields_content += \ '\t\tStimulus & Location & eFeature & Mean & Std \\\\ \n' fields_content += '\t\t\\midrule\n' for stimulus, loc_list in sorted(features.items()): stim_field = stimulus for location, features in sorted(loc_list.items()): loc_field = location for feature_name, (mean, std) in sorted(features.items()): feature_name = feature_name.replace('_', '{\\_}') fields_content += '\t\t%s \\\\\n' % ' & '.join([stim_field, loc_field, feature_name, str(mean), str(std)]) if loc_field != '': loc_field = '' if stim_field != '': stim_field = '' fields_content += '\t\t\\botrule\n' return fields_content, 6
[ 129, 964, 342 ]
def METHOD_NAME(self) -> None: self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(extra_config, expected_http_kwargs, instance_all_metrics): instance = instance_all_metrics instance.update(extra_config) c = ProxysqlCheck('proxysql', {}, [instance]) c.get_tls_context() # need to call this for config values to be saved by _tls_context_wrapper actual_options = {k: v for k, v in c._tls_context_wrapper.config.items() if k in expected_http_kwargs} assert expected_http_kwargs == actual_options
[ 9, 1245, 200, 3116 ]
def METHOD_NAME(n): # Fixed-free elastic rod L = 1.0 le = L/n rho = 7.85e3 S = 1.e-4 E = 2.1e11 mass = rho*S*le/6. k = E*S/le A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1)) B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1)) return A,B
[ 6092, 10074 ]
def METHOD_NAME(self, m_logger): tx = MessageTransaction.create(**self.params) # We must sign the tx before validation will work. tx.sign(self.alice) # We have not touched the tx: validation should pass. self.assertTrue(tx.validate_or_raise())
[ 9, 187, 2543 ]
def METHOD_NAME(self): """ Initialize logs attribute for collecting logs by ducktape. After changing to property based logs, will be removed. """ setattr(self, 'logs', { "log": { "path": self.log_dir, "collect_default": True }, "config": { "path": self.config_dir, "collect_default": True }, "shared": { "path": self.shared_root, "collect_default": True }, "jfr": { "path": self.jfr_dir, "collect_default": True } })
[ 176, 1099, 309 ]
def METHOD_NAME(fn): num_f = getattr(num, fn) np_f = getattr(np, fn) par = (SIZES, range(NDIMS), DTYPES) for size, ndims, dtype in product(*par): shape = ndims * [size] xf = num_f(shape, dtype=dtype) yf = np_f(shape, dtype=dtype) assert np.array_equal(xf, yf) assert xf.dtype == yf.dtype
[ 9, 581, 717 ]
def METHOD_NAME(in_filename, out_filename): """ si on est sûr que les séparateurs restent tous identiques, on peut écrire cette fonction en utilisant la méthode join en conjonction avec un tuple qui est un itérable pour ne pas répéter le séparateur """ with open(in_filename, encoding="UTF-8") as in_file, \ open(out_filename, mode='w', encoding="UTF-8") as out_file: for line_no, line in enumerate(in_file, 1): out_file.write(":".join((str(line_no), str(len(line.split())), str(len(line)), line)))
[ 13202, -1 ]
def METHOD_NAME(self): rval = self.loads('1', parse_int=float) self.assertTrue(isinstance(rval, float)) self.assertEqual(rval, 1.0)
[ 9, 1819 ]
async def METHOD_NAME( view: foc.SampleCollection, input: CountValues ) -> t.Tuple[t.Callable[[t.List], CountValuesResponses], foa.CountValues]: field = view.get_field(input.field) while isinstance(field, fo.ListField): field = field.field def resolve(data: t.List): _, data = data values = [ValueCount(key=value, value=count) for value, count in data] if isinstance(field, fo.StringField): return StrCountValuesResponse(values=values) if isinstance(field, fo.BooleanField): return BoolCountValuesResponse(values=values) if isinstance(field, fo.IntField): return IntCountValuesResponse(values=values) return resolve, foa.CountValues(input.field, _first=LIST_LIMIT, _asc=False)
[ 29, 199 ]
def METHOD_NAME(self) -> None: self._id: Attribute[int] = NotSet self._app_id: Attribute[int] = NotSet self._target_id: Attribute[int] = NotSet self._target_type: Attribute[str] = NotSet
[ 176, 177 ]
def METHOD_NAME(): return APIClient()
[ 340 ]
def METHOD_NAME( nodes_with_weights: List[NNCFNode], default_weight_qconfig: QuantizerConfig, global_weight_constraints: QuantizationConstraints = None, scope_overrides_dict: Dict = None, hw_config: HWConfig = None, ) -> Dict[NNCFNode, List[QuantizerConfig]]: """ Assigns a list of possible quantizer configurations (as determined by HW config, defaults and overrides) to each weighted node that was passed. :param nodes_with_weights: The nodes in NNCFGraph that correspond to weighted operations. :param default_weight_qconfig: The default quantizer configuration for weights, to be used if no other information is given. :param global_weight_constraints: The constraints imposed on all weights by the NNCFConfig .json file, such as "all symmetric" or "all per-channel" etc. :param scope_overrides_dict: The dictionary of strings vs dict of overrides for a given weight quantizer. The strings are matched against the name of the NNCFNodes in nodes_with_weights. :param hw_config: The HWConfig object to be used for device-specific constraints on allowed weights. :return: A dict of each weighted node vs. the list of quantizer configs allowed for quantizing the associated weights """ retval = {} # type: Dict[NNCFNode, List[QuantizerConfig]] default_qconfig = deepcopy(default_weight_qconfig) if global_weight_constraints is not None: default_qconfig = global_weight_constraints.apply_constraints_to(default_qconfig) if scope_overrides_dict is None: scope_overrides_dict = {} weight_scope_overrides_dict = scope_overrides_dict.get("weights") if hw_config is not None: meta_vs_qconfig_map = hw_config.get_metatype_vs_quantizer_configs_map(for_weights=True) for node in nodes_with_weights: qconfig_for_current_scope = get_scoped_quantizer_config( default_qconfig, node.node_name, weight_scope_overrides_dict ) if hw_config is None: qconfig_list = [qconfig_for_current_scope] else: metatype = node.metatype qconfig_list = meta_vs_qconfig_map[metatype] if HWConfig.is_wildcard_quantization(qconfig_list): # Empty list = wildcard quantization qconfig_list = [default_qconfig] elif HWConfig.is_qconf_list_corresponding_to_unspecified_op(qconfig_list): continue # The module will not have its weights quantized local_constraints = global_weight_constraints for overridden_scope, scoped_override_dict in scope_overrides_dict.items(): if matches_any(node.node_name, overridden_scope): scope_constraints = QuantizationConstraints.from_config_dict(scoped_override_dict) local_constraints = local_constraints.get_updated_constraints(scope_constraints) qconfig_list = local_constraints.constrain_qconfig_list( node.node_name, hw_config.target_device, qconfig_list ) retval[node] = qconfig_list return retval
[ 1283, 16722, 50, 24, 468 ]
def METHOD_NAME(obj): """DEPRECATED: Use jupyter_client.jsonutil.json_default""" warnings.warn( "date_default is deprecated since jupyter_client 7.0.0." " Use jupyter_client.jsonutil.json_default.", stacklevel=2, ) return json_default(obj)
[ 153, 235 ]
def METHOD_NAME(root, upgrader, biosample_characterization, biosample_characterization_4, publication, threadlocals, dummy_request): context = root.get_by_uuid(biosample_characterization['uuid']) dummy_request.context = context value = upgrader.upgrade('biosample_characterization', biosample_characterization_4, target_version='5', context=context) assert value['schema_version'] == '5' assert value['references'] == [publication['uuid']]
[ 9, 2036, 2037, 738, 1004 ]
def METHOD_NAME(self): orig.install.METHOD_NAME(self) if self.root: self.single_version_externally_managed = True elif self.single_version_externally_managed: if not self.root and not self.record: raise DistutilsArgError( "You must specify --record or --root when building system" " packages" )
[ 977, 1881 ]
def METHOD_NAME(instrument, elasticapm_client, waiting_httpserver, status_code): waiting_httpserver.serve_content("", code=status_code) url = waiting_httpserver.url + "/hello_world" parsed_url = urllib.parse.urlparse(url) elasticapm_client.begin_transaction("transaction.test") with capture_span("test_request", "test"): httplib2.Http().request(url, "GET") elasticapm_client.end_transaction("MyView") transactions = elasticapm_client.events[TRANSACTION] spans = elasticapm_client.spans_for_transaction(transactions[0]) assert spans[0]["name"].startswith("GET 127.0.0.1:") assert spans[0]["type"] == "external" assert spans[0]["subtype"] == "http" assert url == spans[0]["context"]["http"]["url"] assert status_code == spans[0]["context"]["http"]["status_code"] assert spans[0]["context"]["destination"]["service"] == { "name": "", "resource": "127.0.0.1:%d" % parsed_url.port, "type": "", } assert spans[0]["outcome"] == "failure"
[ 9, -1, 4584, 168 ]
def METHOD_NAME(cls, cfg, args): if args.esp_tool: espidf = args.esp_tool else: espidf = path.join(args.esp_idf_path, 'components', 'esptool_py', 'esptool', 'esptool.py') return Esp32BinaryRunner( cfg, args.esp_device, boot_address=args.esp_boot_address, part_table_address=args.esp_partition_table_address, app_address=args.esp_app_address, erase=args.erase, baud=args.esp_baud_rate, flash_size=args.esp_flash_size, flash_freq=args.esp_flash_freq, flash_mode=args.esp_flash_mode, espidf=espidf, bootloader_bin=args.esp_flash_bootloader, partition_table_bin=args.esp_flash_partition_table, no_stub=args.esp_no_stub)
[ 74, 129 ]
def METHOD_NAME(name): try: return getattr(filter_clses, name) except AttributeError: raise ValueError('Not a filter class: %r' % name)
[ 19, 527, 3847 ]
def METHOD_NAME(): sent = None for call in responses.calls: if "search.api.globus.org" in call.request.url: sent = call assert sent is not None return sent
[ 679, 1070, 128 ]
def METHOD_NAME(subcommand: str, subcommand_args: List[str]=None, stdin: Optional[IO]=None, pwd: Union[Path, str, None]=None) -> None: """Run a given ArchiveBox subcommand with the given list of args""" subcommand_args = subcommand_args or [] if subcommand not in meta_cmds: from ..config import setup_django cmd_requires_db = subcommand in archive_cmds init_pending = '--init' in subcommand_args or '--quick-init' in subcommand_args if cmd_requires_db: check_data_folder(pwd) setup_django(in_memory_db=subcommand in fake_db, check_db=cmd_requires_db and not init_pending) if cmd_requires_db: check_migrations() module = import_module('.archivebox_{}'.format(subcommand), __package__) module.main(args=subcommand_args, stdin=stdin, pwd=pwd) # type: ignore
[ 22, 2649 ]
def METHOD_NAME(split, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read() _check_text_dataset(dataset, expected_features) assert dataset.split == split if split else "train"
[ 9, 126, 280, 526, 265 ]
f METHOD_NAME(self, op, expected_shape, expected_dtype):
[ 638, 2989, 441 ]
def METHOD_NAME(pretrained=False, progress=True, **kwargs): """ Constructs the ReXNet-lite model with width multiplier of 1.0. .. note:: ReXNet-lite model with width multiplier of 1.0 from the `Rethinking Channel Dimensions for Efficient Model Design <https://arxiv.org/pdf/2007.00992.pdf>`_ paper. Args: pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> rexnet_lite_1_0 = flowvision.models.rexnet_lite_1_0(pretrained=False, progress=True) """ model_kwargs = dict(multiplier=1.0, **kwargs) return _create_rexnet_lite( "rexnet_lite_1_0", pretrained=pretrained, progress=progress, **model_kwargs )
[ -1, 8609, 1170, 1506 ]
def METHOD_NAME(mocker) -> None: bb: bitarray = bitarray("0000 0000 0000 0000 0000 0000 0000 0011") rb: ReadBufferByteBased = ReadBufferByteBased(bb, ByteOrder.BIG_ENDIAN) assert rb.read_unsigned_int(logical_name="Byte") == 3
[ 9, 203, 2376, 203, 1715, 962, 4289 ]
def METHOD_NAME(self): d = {'val': {'a', 'b', 'c'}} ser = JsonSetSerializer(self.folder, 'test_json.json') ser.write(d) back = ser.read() self.assertEqual(d, back)
[ 9, 763, 10173, 0, 3834, -1 ]
def METHOD_NAME(self) -> List[str]: return [k for k in self._payload["channel_map_tree"].keys()]
[ 19, 6520 ]
def METHOD_NAME(a: dict(type=float, help='the dividend'), b: dict(type=float, help='the divisor (must be different than 0)') ) -> dict(type=float, help='the result of dividing a by b'): """Divide a by b""" return a / b
[ 5367 ]
def METHOD_NAME(self, obj): """Wraps the output stream, encoding Unicode strings with the specified encoding""" if isinstance(obj, str): obj = obj.encode(self.encoding) self.out.buffer.METHOD_NAME(obj)
[ 77 ]
def METHOD_NAME(self): self.ctx.args.backup_instance_name = self.ctx.args.backup_instance['backup_instance_name'] self.ctx.set_var( "instance", self.ctx.args.backup_instance.to_serialized_data(), schema_builder=self.BackupInstancesCreateOrUpdate._build_schema_on_200_201 )
[ 709, 710 ]
def METHOD_NAME(digest): """ Given a digested result, calculate mean tx/rx kpps Args: Digested samples Returns: a dictionary with the following format { "nsamples": 52 "result": { 0: { "TX": 2352.238 "RX": 4581.312 }, 1: ... } } """ result= {} for port in digest[0].get("packets"): result[port]= { "TX": statistics.mean( [sample["packets"][port]["tx_delta"]/ sample["time_delta"] for sample in digest]) / 1000, "RX": statistics.mean( [sample["packets"][0]["rx_delta"]/ sample["time_delta"] for sample in digest]) / 1000 } return { "nsamples": len(digest), "result": result }
[ 577 ]
def METHOD_NAME(): """Test that DWT produces the same result along each dimension.""" X = _make_nested_from_array( np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), n_instances=1, n_columns=2 ) d = DWTTransformer(num_levels=3).fit(X) res = d.transform(X) orig = convert_list_to_dataframe( [ [ 9 * math.sqrt(2), -4 * math.sqrt(2), -2, -2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, ], [ 9 * math.sqrt(2), -4 * math.sqrt(2), -2, -2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, -math.sqrt(2) / 2, ], ] ) orig.columns = X.columns assert check_if_dataframes_are_equal(res, orig)
[ 9, -1, 11942, 11943, 9589, 1353, 3014 ]
def METHOD_NAME() -> argparse.ArgumentParser: p = argparse.ArgumentParser(description="Test run options") p.add_argument('-n', '--dry-run', action='store_true', help='show me, do not run tests') p.add_argument('-j', dest='jobs', type=int, default=1, help='run tests in multiple parallel jobs') p.add_argument('-d', dest='debug', action='store_true', help='debug') p.add_argument('-p', dest='print', action='store_true', help='redirects qemu\'s stdout and stderr to ' 'the test output') p.add_argument('-gdb', action='store_true', help="start gdbserver with $GDB_OPTIONS options " "('localhost:12345' if $GDB_OPTIONS is empty)") p.add_argument('-valgrind', action='store_true', help='use valgrind, sets VALGRIND_QEMU environment ' 'variable') p.add_argument('-misalign', action='store_true', help='misalign memory allocations') p.add_argument('--color', choices=['on', 'off', 'auto'], default='auto', help="use terminal colors. The default " "'auto' value means use colors if terminal stdout detected") p.add_argument('-tap', action='store_true', help='produce TAP output') g_env = p.add_argument_group('test environment options') mg = g_env.add_mutually_exclusive_group() # We don't set default for cachemode, as we need to distinguish default # from user input later. mg.add_argument('-nocache', dest='cachemode', action='store_const', const='none', help='set cache mode "none" (O_DIRECT), ' 'sets CACHEMODE environment variable') mg.add_argument('-c', dest='cachemode', help='sets CACHEMODE environment variable') g_env.add_argument('-i', dest='aiomode', default='threads', help='sets AIOMODE environment variable') p.set_defaults(imgfmt='raw', imgproto='file') format_list = ['raw', 'bochs', 'cloop', 'parallels', 'qcow', 'qcow2', 'qed', 'vdi', 'vpc', 'vhdx', 'vmdk', 'luks', 'dmg'] g_fmt = p.add_argument_group( ' image format options', 'The following options set the IMGFMT environment variable. ' 'At most one choice is allowed, default is "raw"') mg = g_fmt.add_mutually_exclusive_group() for fmt in format_list: mg.add_argument('-' + fmt, dest='imgfmt', action='store_const', const=fmt, help=f'test {fmt}') protocol_list = ['file', 'rbd', 'nbd', 'ssh', 'nfs', 'fuse'] g_prt = p.add_argument_group( ' image protocol options', 'The following options set the IMGPROTO environment variable. ' 'At most one choice is allowed, default is "file"') mg = g_prt.add_mutually_exclusive_group() for prt in protocol_list: mg.add_argument('-' + prt, dest='imgproto', action='store_const', const=prt, help=f'test {prt}') g_bash = p.add_argument_group('bash tests options', 'The following options are ignored by ' 'python tests.') # TODO: make support for the following options in iotests.py g_bash.add_argument('-o', dest='imgopts', help='options to pass to qemu-img create/convert, ' 'sets IMGOPTS environment variable') g_sel = p.add_argument_group('test selecting options', 'The following options specify test set ' 'to run.') g_sel.add_argument('-g', '--groups', metavar='group1,...', help='include tests from these groups') g_sel.add_argument('-x', '--exclude-groups', metavar='group1,...', help='exclude tests from these groups') g_sel.add_argument('--start-from', metavar='TEST', help='Start from specified test: make sorted sequence ' 'of tests as usual and then drop tests from the first ' 'one to TEST (not inclusive). This may be used to ' 'rerun failed ./check command, starting from the ' 'middle of the process.') g_sel.add_argument('tests', metavar='TEST_FILES', nargs='*', help='tests to run, or "--" followed by a command') return p
[ 93, 2345 ]
def METHOD_NAME(root_dir): utt2spk = {} utt2wav = {} num_good_files = 0 num_bad_files = 0 noise_dir = os.path.join(root_dir, "noise") for root, dirs, files in os.walk(noise_dir): for file in files: file_path = os.path.join(root, file) if file.endswith(".wav"): utt = str(file).replace(".wav", "") utt2wav[utt] = file_path utt2spk[utt] = utt utt2spk_str = "" utt2wav_str = "" for utt in utt2spk: if utt in utt2wav: utt2spk_str = utt2spk_str + utt + " " + utt2spk[utt] + "\n" utt2wav_str = utt2wav_str + utt + " " + utt2wav[utt] + "\n" num_good_files += 1 else: print("Missing file {}".format(utt)) num_bad_files += 1 print("In noise directory, processed {} files: {} had missing wav data".format(num_good_files, num_bad_files)) return utt2spk_str, utt2wav_str
[ 123, 802 ]
def METHOD_NAME(): pattern_analyzer = _PatternAnalyzer(name="test_analyzer", flags=RegexFlags.canon_eq) analyzers = [] analyzers.append(pattern_analyzer) pattern_tokenizer = _PatternTokenizer(name="test_tokenizer", flags=RegexFlags.canon_eq) tokenizers = [] tokenizers.append(pattern_tokenizer) index = SearchIndex(name="test", fields=None, analyzers=analyzers, tokenizers=tokenizers) result = SearchIndex._from_generated(index) assert isinstance(result.analyzers[0], PatternAnalyzer) assert isinstance(result.analyzers[0].flags, list) assert result.analyzers[0].flags[0] == "CANON_EQ" assert isinstance(result.tokenizers[0], PatternTokenizer) assert isinstance(result.tokenizers[0].flags, list) assert result.tokenizers[0].flags[0] == "CANON_EQ"
[ 9, 789, 1070, 724, 1206 ]
def METHOD_NAME(rnames, translation=dict(), symbol_r2python=default_symbol_r2python, symbol_resolve=default_symbol_resolve): """ :param names: an iterable of rnames :param translation: a mapping for R name->python name :param symbol_r2python: a function to translate an R symbol into a (presumably valid) Python symbol :param symbol_resolve: a function to check a prospective set of translation and resolve conflicts if needed """ symbol_mapping = defaultdict(list) for rname in rnames: if rname in translation: rpyname = translation[rname] else: rpyname = symbol_r2python(rname) symbol_mapping[rpyname].append(rname) conflicts, resolutions = symbol_resolve(symbol_mapping) return (symbol_mapping, conflicts, resolutions)
[ 422, 872 ]
def METHOD_NAME(request): return request.param
[ 1028, 615, 16412 ]
def METHOD_NAME(): # COO graph: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4] # [2, 4, 2, 3, 0, 1, 1, 0, 0, 1] # [1, 1, 1, 1, 0, 0, 0, 0, 0] - > edge type. # num_nodes = 5, num_n1 = 2, num_n2 = 3 ntypes = {"n1": 0, "n2": 1} etypes = {"n1:e1:n2": 0, "n2:e2:n1": 1} metadata = gb.GraphMetadata(ntypes, etypes) indptr = torch.LongTensor([0, 2, 4, 6, 8, 10]) indices = torch.LongTensor([2, 4, 2, 3, 0, 1, 1, 0, 0, 1]) type_per_edge = torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) node_type_offset = torch.LongTensor([0, 2, 5]) return gb.from_csc( indptr, indices, node_type_offset=node_type_offset, type_per_edge=type_per_edge, metadata=metadata, )
[ 19, 7271, 303 ]
def METHOD_NAME(): assert run_gtmg_mixed_poisson() < 1e-5
[ 9, 1474, 1638, 12907 ]
def METHOD_NAME(self, tb: str, column_property: ColumnProperty) -> None: from c2cgeoportal_commons.models import DBSession # pylint: disable=import-outside-toplevel column = column_property.columns[0] proxy = column.info["association_proxy"] attribute = column_property.class_attribute cls = attribute.parent.entity association_proxy = getattr(cls, proxy) relationship_property = class_mapper(cls).get_property(association_proxy.target) target_cls = relationship_property.argument query = DBSession.query(getattr(target_cls, association_proxy.value_attr)) if association_proxy.order_by is not None: query = query.order_by(getattr(target_cls, association_proxy.order_by)) attrs = {} if association_proxy.nullable: attrs["minOccurs"] = "0" attrs["nillable"] = "true" attrs["name"] = proxy with tag(tb, "xsd:element", attrs) as tb2: with tag(tb2, "xsd:simpleType") as tb3: with tag(tb3, "xsd:restriction", {"base": "xsd:string"}) as tb4: for (value,) in query: with tag(tb4, "xsd:enumeration", {"value": value}): pass self.element_callback(tb4, column)
[ 238, 2055, 127, 8503 ]
def METHOD_NAME(self): """Return the verbose status message string """ clean_str = self.DEFAULT if self.clean_state == self.STATUS_OK: clean_str = 'clean sandbox' elif self.clean_state == self.DIRTY: clean_str = 'modified sandbox' sync_str = 'on {0}'.format(self.current_version) if self.sync_state != self.STATUS_OK: sync_str = '{current} --> {expected}'.format( current=self.current_version, expected=self.expected_version) msg = ' {clean}, {sync}'.format(clean=clean_str, sync=sync_str) printlog(msg)
[ 3832, 452, 277 ]
def METHOD_NAME(self) -> 'outputs.CreatorPropertiesResponse': """ The Creator resource properties. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: element, key, parents, flag = node, None, [], None if flag in ("text", "tail"): return None else: if element.text: return element, key, parents, "text" elif len(element): parents.append(element) return element[0], 0, parents, None else: return None
[ 19, 865, 186 ]
def METHOD_NAME(x, y, n_out, sampled_x, offset): a = 0 for i in range(n_out - 3): o0, o1, o2 = offset[i], offset[i + 1], offset[i + 2] a = ( _argmax_area( x[a], y[a], x[o1:o2].mean(), y[o1:o2].mean(), x[o0:o1], y[o0:o1], ) + offset[i] ) sampled_x[i + 1] = a # ------------ EDGE CASE ------------ # next-average of last bucket = last point sampled_x[-2] = ( _argmax_area( x[a], y[a], x[-1], # last point y[-1], x[offset[-2] : offset[-1]], y[offset[-2] : offset[-1]], ) + offset[-2] )
[ -1, 921 ]
def METHOD_NAME(self, vocab_path): self._freq_dict = json.loads( open(vocab_path + '.freq.json', 'r', encoding='utf-8').read()) self._word2idx = json.loads( open(vocab_path + '.word2idx.json', 'r', encoding='utf-8').read()) self._idx2word = {} for w, idx in self._word2idx.items(): self._idx2word[idx] = w self.vocab_size_oov = len(self._idx2word) logger.info('vocab file loaded from "' + vocab_path + '"') logger.info('Vocabulary size including oov: %d' % (self.vocab_size_oov))
[ 557, 3259 ]
def METHOD_NAME( self, widget: QtWidgets.QWidget, name: str, icon_or_path: Union[Path, QtGui.QIcon], tool_tip: Optional[str] = None, ) -> None: """ Add widget as tab with icon and tool tip. :param widget: Widget to add as new tab :param name: Tab name :param icon_or_path: Icon file path or QIcon instance :param tool_tip: Optional tab tooltip. If unspecified, a tooltip will be determined from the icon name. """ # Store original icon for rotation adjustments if isinstance(icon_or_path, Path): icon = get_icon(icon_or_path) else: icon = icon_or_path self._tab_icons[id(widget)] = icon # Add new tab, with icon oriented upward tab_pos = self.tabs.tabPosition() upright_icon = self._rotate_icon(icon, tab_pos) tab_idx = self.tabs.addTab(widget, upright_icon, "") self.tabs.setTabToolTip(tab_idx, tool_tip or name)
[ 238, 5678 ]
def METHOD_NAME(path): with open(path, 'rb') as fp: au = sunau.open(fp) rate = au.getframerate() nchannels = au.getnchannels() encoding = au._encoding fp.seek(0) data = fp.read() if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8: raise RuntimeError("Expect .au file with 8-bit mu-law samples") # Convert the data to 16-bit signed. data = audioop.ulaw2lin(data, 2) return (data, rate, 16, nchannels)
[ 203, 4463, 171 ]
def METHOD_NAME(random_state, alpha, size=None, chunk_size=None, gpu=None, dtype=None): r""" Draw samples from the Dirichlet distribution. Draw `size` samples of dimension k from a Dirichlet distribution. A Dirichlet-distributed random variable can be seen as a multivariate generalization of a Beta distribution. Dirichlet pdf is the conjugate prior of a multinomial in Bayesian inference. Parameters ---------- alpha : array Parameter of the distribution (k dimension for sample of dimension k). size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a single value is returned. chunk_size : int or tuple of int or tuple of ints, optional Desired chunk size on each dimension gpu : bool, optional Allocate the tensor on GPU if True, False as default dtype : data-type, optional Data-type of the returned tensor. Returns ------- samples : Tensor The drawn samples, of shape (size, alpha.ndim). Raises ------- ValueError If any value in alpha is less than or equal to zero Notes ----- .. math:: X \approx \prod_{i=1}^{k}{x^{\alpha_i-1}_i} Uses the following property for computation: for each dimension, draw a random sample y_i from a standard gamma generator of shape `alpha_i`, then :math:`X = \frac{1}{\sum_{i=1}^k{y_i}} (y_1, \ldots, y_n)` is Dirichlet distributed. References ---------- .. [1] David McKay, "Information Theory, Inference and Learning Algorithms," chapter 23, http://www.inference.phy.cam.ac.uk/mackay/ .. [2] Wikipedia, "Dirichlet distribution", http://en.wikipedia.org/wiki/Dirichlet_distribution Examples -------- Taking an example cited in Wikipedia, this distribution can be used if one wanted to cut strings (each of initial length 1.0) into K pieces with different lengths, where each piece had, on average, a designated average length, but allowing some variation in the relative sizes of the pieces. >>> import mars.tensor as mt >>> s = mt.random.dirichlet((10, 5, 3), 20).transpose() >>> import matplotlib.pyplot as plt >>> plt.barh(range(20), s[0].execute()) >>> plt.barh(range(20), s[1].execute(), left=s[0].execute(), color='g') >>> plt.barh(range(20), s[2].execute(), left=(s[0]+s[1]).execute(), color='r') >>> plt.title("Lengths of Strings") """ if isinstance(alpha, Iterable): alpha = tuple(alpha) else: raise TypeError("`alpha` should be an array") if dtype is None: dtype = np.random.RandomState().METHOD_NAME(alpha, size=(0,)).dtype size = random_state._handle_size(size) seed = gen_random_seeds(1, random_state.to_numpy())[0] op = TensorDirichlet(seed=seed, alpha=alpha, size=size, gpu=gpu, dtype=dtype) return op(chunk_size=chunk_size)
[ 5124 ]
def METHOD_NAME(self): dask_array = da.zeros((5, 6, 7, 8, 9, 9), chunks=(2, 2, 2, 2, 2, 2)) with pytest.raises(NotImplementedError): lt._get_dask_chunk_slice_list(dask_array)
[ 9, 1327, 3014, 168 ]
def METHOD_NAME(self, painter, rect): if not self.det_mode: return if self.coords is not None and self.coords != QPointF(-1, -1): painter.setClipRect(rect) painter.setPen(self.pen) painter.drawLine( int(self.coords.x()), int(rect.top()), int(self.coords.x()), int(rect.bottom() + 1)) painter.drawLine( int(rect.left()), int(self.coords.y()), int(rect.right() + 1), int(self.coords.y()))
[ 1100, 9451 ]
def METHOD_NAME(self): self.assertEqual(pip_utils.version_satisfies_spec(None, "blah"), True) self.assertEqual(pip_utils.version_satisfies_spec("blah", None), False) self.assertEqual(pip_utils.version_satisfies_spec(">=1.2.3", "1.2.4"), True) self.assertEqual(pip_utils.version_satisfies_spec(">=1.2.3", "1.2.4.dev987"), False) self.assertEqual(pip_utils.version_satisfies_spec(">=1.0", "1.1.dev1"), False) self.assertEqual(pip_utils.version_satisfies_spec(">=1.0,>=0.0.dev0", "1.1.dev1"), True)
[ 9, 281, -1, 1457 ]
def METHOD_NAME(library): library.add_checkbox("checkbox-field", "Checkbox value") assert len(library.elements) == 1 assert library.elements[0] == { "type": "input-checkbox", "name": "checkbox-field", "label": "Checkbox value", "default": False, }
[ 9, 238, 992 ]
def METHOD_NAME(self): return self._apply(lambda t: t.METHOD_NAME() if t.is_floating_point() else t)
[ 627 ]
def METHOD_NAME(self, app, session): # UK GeoIP with single DE cell cell = CellShardFactory(mcc=262) session.flush() query = self.model_query(cells=[cell]) res = self._call(app, body=query, ip=self.test_ip) self.check_model_response(res, cell, region="DE")
[ 9, 118, 3217, 8594 ]
def METHOD_NAME(self): names = set(job.name for job in self.jobs) for f in logs.iterdir(): stem, suffix = f.name.rsplit(".", 1) if suffix == "sid": if stem not in names: sid = get_sid(stem) if sid is not None: print(f"GCing {stem} / {sid}") cancel(sid) f.unlink()
[ 6752 ]
def METHOD_NAME(self): raise NotImplementedError("This method is irrelevant for this backend")
[ 11591, 4100, 43, 1299, 245 ]
def METHOD_NAME(self, predictions, product_store=None): if product_store is None: product_store = self.fake_product_store() return import_insights( predictions, DEFAULT_SERVER_TYPE, product_store=product_store )
[ 22, 512 ]
def METHOD_NAME(self) -> 'outputs.ReplicationExtensionModelPropertiesResponse': """ Replication extension model properties. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME(iterator, env=None): """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics""" suffix = dyld_image_suffix(env) if suffix is None: return iterator def _inject(iterator=iterator, suffix=suffix): for path in iterator: if path.endswith('.dylib'): yield path[:-len('.dylib')] + suffix + '.dylib' else: yield path + suffix yield path return _inject()
[ 7028, 660, 4064, 1070 ]
def METHOD_NAME(self): """Closes resources associated with the transport. .. warning:: Only call this method if the transport is NOT shared with other clients - this may cause errors in other clients! """ raise NotImplementedError()
[ 1462 ]
def METHOD_NAME(self, polymorphic_subtypes: List["ModelType"]) -> None: from .model_type import ModelType if isinstance(self.type, ModelType): self.type.METHOD_NAME(polymorphic_subtypes)
[ 19, 4434, 4288 ]
def METHOD_NAME(content: str) -> set[str]: snapshot = rule_runner.make_snapshot({"subdir/f.sh": content}) return set( rule_runner.request( ParsedShellImports, [ParseShellImportsRequest(snapshot.digest, "subdir/f.sh")] ) )
[ 214 ]
def METHOD_NAME(self): dll = CDLL(_ctypes_test.__file__) for i in range(1, 11): fields = [ (f"f{f}", c_char) for f in range(1, i + 1)] class S(Structure): _fields_ = fields f = getattr(dll, f"TestSize{i}") f.restype = S res = f() for i, f in enumerate(fields): value = getattr(res, f[0]) expected = bytes([ord('a') + i]) self.assertEqual(value, expected)
[ 9, 3120 ]
def METHOD_NAME(test, checks=None): if checks is None: checks = [] test.cmd('az offazure hyperv host list ' '--resource-group "{rg}" ' '--site-name "{hyperv_site}" ', checks=checks)
[ 367, 3645, 1806, 245 ]
def METHOD_NAME(pgconn): pgconn.send_prepare(b"prep", b"select $1::int + $2::int") (res,) = execute_wait(pgconn) assert res.status == pq.ExecStatus.COMMAND_OK, res.error_message pgconn.send_query_prepared(b"prep", [b"3", b"5"]) (res,) = execute_wait(pgconn) assert res.get_value(0, 0) == b"8" pgconn.finish() with pytest.raises(psycopg.OperationalError): pgconn.send_prepare(b"prep", b"select $1::int + $2::int") with pytest.raises(psycopg.OperationalError): pgconn.send_query_prepared(b"prep", [b"3", b"5"])
[ 9, 353, 123 ]
def METHOD_NAME(self): delete = self.jobs_t.delete() self.engine.execute(delete)
[ 188, 75, 494 ]
f METHOD_NAME():
[ 904 ]
def METHOD_NAME(self): if not self.batch_data_indices: return BATCH_TEST_DATA_IDX return self.batch_data_indices
[ 19, 2277, 365, 1894 ]
def METHOD_NAME(self, update_config): store = {} def remove(key): if key in store: print("del {}".format(key)) del store[key] def fake_update_config(k, s, v, a, t): store[k] = v update_config.side_effect = fake_update_config store = {} mpathcount.match_bySCSIid = False mpathcount.check_devconfig( {}, {}, {'mpath-3600a098038303973743f486833396d40': '[2, 4]'}, remove, None) self.assertNotIn('mpath-3600a098038303973743f486833396d40', store) store = {} mpathcount.match_bySCSIid = False mpathcount.check_devconfig( {}, {'SCSIid': '3600a098038303973743f486833396d40,3600a098038303973743f486833396d41'}, {'mpath-3600a098038303973743f486833396d40': '[2, 4]'}, remove, None) self.assertIn('mpath-3600a098038303973743f486833396d40', store) self.assertIn('mpath-3600a098038303973743f486833396d41', store) self.assertEqual('[2, 4]', store['mpath-3600a098038303973743f486833396d40'], msg="Store value incorrect for key 'mpath-3600a098038303973743f486833396d40'") self.assertEqual('', store['mpath-3600a098038303973743f486833396d41'], msg="Store value incorrect for key 'mpath-3600a098038303973743f486833396d41'") store = {} mpathcount.match_bySCSIid = False mpathcount.check_devconfig( {'SCSIid': '3600a098038303973743f486833396d40'}, {}, {'mpath-3600a098038303973743f486833396d40': '[2, 4]'}, remove, None) self.assertIn('mpath-3600a098038303973743f486833396d40', store) self.assertEqual('[2, 4]', store['mpath-3600a098038303973743f486833396d40'], msg="Store value incorrect for key 'mpath-3600a098038303973743f486833396d40'") store = {} mpathcount.match_bySCSIid = False mpathcount.check_devconfig( {'provider': 'present', 'ScsiId': '3600a098038303973743f486833396d40'}, {}, {'mpath-3600a098038303973743f486833396d40': '[2, 4]'}, remove, None) self.assertIn('mpath-3600a098038303973743f486833396d40', store) self.assertEqual('[2, 4]', store['mpath-3600a098038303973743f486833396d40'], msg="Store value incorrect for key 'mpath-3600a098038303973743f486833396d40'") store = { 'mpath-3600a098038303973743f486833396d40': '[2, 4]', 'multipathed': True } mpathcount.match_bySCSIid = False mpathcount.mpath_enabled = False mpathcount.check_devconfig( {}, {'SCSIid': '3600a098038303973743f486833396d40,3600a098038303973743f486833396d41'}, {'mpath-3600a098038303973743f486833396d40': '[2, 4]'}, remove, None) self.assertNotIn('multipathed', store) self.assertNotIn('mpath-3600a098038303973743f486833396d40', store)
[ 9, 250, -1 ]
def METHOD_NAME(self): configs = { 'bert_name': self.bert_name, 'cache_dir': self.model_dir, 'dropout': 0.1 } model = USE(configs) return model
[ 176, 578 ]
def METHOD_NAME(self, *args): """Append one or more arguments to a Hostlist Args may be either a Hostlist or any valid argument to Hostlist() """ count = 0 for arg in args: if not isinstance(arg, Hostlist): arg = Hostlist(arg) count += self.pimpl.append_list(arg) return count
[ 1459 ]
def METHOD_NAME(self, pid: typing.Optional[int]): if not pid: return if pid in self._pids: self._pids.pop(pid) self._proto.worker_disconnected(pid)
[ 69, 2243, 7959 ]
def METHOD_NAME(self, token, obj): if self.is_value(token): if isinstance(obj.last_item, Attribute): obj.last_item.name = token else: obj.last_item.value = token return self.state_6, obj
[ 551, 822 ]
def METHOD_NAME(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' # Some people pass in a list of pathname parts to operate in an OS-agnostic # fashion; don't try to translate in that case as that's an abuse of the # API and they are already doing what they need to be OS-agnostic and so # they most likely won't be using an os.PathLike object in the sublists. if not isinstance(m[0], (list, tuple)): m = tuple(map(os.fspath, m)) s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
[ 16629 ]
def METHOD_NAME(operation_name, **operation_options): try: result = aws.make_api_call('codecommit', operation_name, **operation_options) except ServiceError as ex: if ex.code == 'AccessDeniedException': io.echo( "EB CLI does not have the right permissions to access CodeCommit." " List of IAM policies needed by EB CLI, please configure and try again.\n" " codecommit:CreateRepository\n" " codecommit:CreateBranch\n" " codecommit:GetRepository\n" " codecommit:ListRepositories\n" " codecommit:ListBranches\n" "To learn more, see Docs: " "http://docs.aws.amazon.com/codecommit/latest/userguide/access-permissions.html" ) raise ex return result
[ 93, 58, 128 ]
def METHOD_NAME(activation_key, username, registry): """Store a validation_key in the cache.""" settings = registry.settings hmac_secret = settings["userid_hmac_secret"] cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_VALIDATION_CACHE_KEY.format(username)) # Store an activation key for 7 days by default. cache_ttl = int( settings.get( "account_validation.validation_key_cache_ttl_seconds", DEFAULT_VALIDATION_KEY_CACHE_TTL_SECONDS, ) ) cache = registry.cache cache_result = cache.set(cache_key, activation_key, ttl=cache_ttl) return cache_result
[ 596, 437, 59 ]
def METHOD_NAME(self): "Prints a graphviz diagram of the BM automaton(for debugging)" print("digraph g{") def print_node(node): for subnode_key in node.transition_table.keys(): subnode = node.transition_table[subnode_key] print("%d -> %d [label=%s] //%s" % (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) if subnode_key == 1: print(subnode.content) print_node(subnode) print_node(self.root) print("}")
[ 38, 1412 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self): from operator import delitem for ct, pt in zip(ctype_types, python_types): i = ct(42) p = pointer(i)
[ 9, 5675 ]
def METHOD_NAME(endpoint, oci_cred_body): headers = {"Content-Type": "application/json", "Accept": "application/json", "Authorization": "Bearer " + USER_TOKEN} oke_version_url = CATTLE_TEST_URL + endpoint response = requests.post(oke_version_url, json=oci_cred_body, verify=False, headers=headers) return response
[ 19, 10358, 1094, 17 ]
def METHOD_NAME(self): dashboard = Dashboard(dashboard_title=f'{self.questionnaire.title} - {self.current_datetime}') resp = self.client.create_dashboard(data=dashboard.post_body()) dashboard_id = resp.get('id') resp = self.client.get_dashboard(dashboard_id) result = resp.get('result', {}) dashboard_url = result.get('url') owner_id = result.get('owners', [{}])[0].get('id') return dashboard_id, f'{settings.SUPERSET_BASE_URL}{dashboard_url}', owner_id
[ 129, 3029 ]
def METHOD_NAME(self, request, pk, version): """ Return a single course by ID """ # Wrap the ES get in a try/catch to we control the exception we emit — it would # raise and end up in a 500 error otherwise try: query_response = ES_CLIENT.get(index=self._meta.indexer.index_name, id=pk) except NotFoundError: return Response(status=404) # Format a clean course object as a response return Response(self._meta.indexer.format_es_object_for_api(query_response))
[ 404 ]
def METHOD_NAME(self) -> str: """ A base64-encoded 256-bit primary key for signing and validating the SAS token. """ return pulumi.get(self, "primary_key")
[ 1379, 59 ]
def METHOD_NAME(): subscription_id = os.environ['SUBSCRIPTION_ID'] resource_group = os.environ['RESOURCE_GROUP'] acr_url = os.environ['DOCKER_PREFIX'] hail_oauth_scope = os.environ['HAIL_AZURE_OAUTH_SCOPE'] assert acr_url.endswith('azurecr.io'), acr_url return AzureWorkerAPI(subscription_id, resource_group, acr_url, hail_oauth_scope)
[ 280, 485 ]
def METHOD_NAME(rf, module): class FakeProjectContextViewUrlOverwrite(FakeProjectContextView): module_lookup_field = "id" module_url_kwarg = "module_id" request = rf.get("/url") response, view = dispatch_view( FakeProjectContextViewUrlOverwrite, request, module_id=module.id ) assert view.module == module
[ 9, 155, 2815, 298, 274, 3345 ]
def METHOD_NAME(cur: "psycopg2.cursor", user_id: str): print("Clearing connection_log...") cur.execute("""
[ 537, 550, 390 ]
def METHOD_NAME(self, splits: List[str]) -> List[str]: if self.is_fork: # we got pagure fork but SSH url self.username = splits[0] return splits[1:-1] # path contains username/reponame # or some/namespace/reponame # or fork/username/some/namespace/reponame self.is_fork = ( splits[0] in ("fork", "forks") and len(splits) >= 3 ) # pagure fork if self.is_fork: # fork/username/namespace/repo format self.username = splits[1] return splits[2:-1] if self.username: return [self.username] + splits[:-1] self.username = splits[0] return splits[:-1]
[ 250, 6939 ]
def METHOD_NAME(*args, **kwargs): return np.random.randint(-10, 10, in_shape).astype(np.int32)
[ 567, 362, 5790 ]
def METHOD_NAME( self, algorithm: "EncryptionAlgorithm", plaintext: bytes, iv: "Optional[bytes]" = None ) -> EncryptResult: self._raise_if_unsupported(KeyOperation.METHOD_NAME, algorithm) # If an IV isn't provided with AES-CBCPAD encryption, try to create one if iv is None and algorithm.value.endswith("CBCPAD"): try: iv = os.urandom(16) except NotImplementedError as ex: raise ValueError( "An IV could not be generated on this OS. Please provide your own cryptographically random, " "non-repeating IV for local cryptography." ) from ex ciphertext = self._internal_key.METHOD_NAME(plaintext, algorithm=algorithm.value, iv=iv) return EncryptResult( key_id=self._key.kid, algorithm=algorithm, ciphertext=ciphertext, iv=iv # type: ignore[attr-defined] )
[ 2196 ]
def METHOD_NAME(self): """Enables to get outputs of the operator by evaluating it Returns -------- outputs : OutputsWireframe """ return super().METHOD_NAME
[ 141 ]
def METHOD_NAME(self, spec, prefix): pass
[ 334 ]
def METHOD_NAME(): flag = os.environ.get('BUILD_DOCSET', '') return flag.lower() not in ("0", "no", "")
[ 19, 56, -1 ]
def METHOD_NAME(self) -> None: bot_response = ( "Here is what I found for `funny cats` : \n" "Cats are so funny you will die laughing - " "Funny cat compilation - [Watch now](https://www.youtube.com/watch?v=5dsGWM5XGdg)" ) with self.mock_config_info(self.normal_config), self.mock_http_conversation("test_single"): self.verify_reply("funny cats", bot_response)
[ 9, 97 ]
def METHOD_NAME(self): self.run(sql10.format(proj=self.config.options["proj"]), lambda res: { "class": 1, "data": [self.way_full, self.way, self.positionAsText], "text": T_("`{0}` inside `{1}`", "building=" + res[3], 'landuse=' + res[4]) })
[ 322, 323, 67 ]
def METHOD_NAME(audit_companies, dynamodb, company_table): """ Test missing date_created column """ company_id = str(uuid.uuid4()) # table = dynamodb.Table("cla-test_companies") company_table.put_item(Item={"company_id": company_id}) record = get_company_table(company_table, company_id) result = audit_companies.validate_date_created(record) expected_result = { "company_id": company_id, "is_valid": False, "error_type": ErrorType.NULL, "column": "date_created", "data": None, } assert result == expected_result company_table.put_item( Item={"company_id": company_id, "date_created": str(datetime.now())} ) record = get_company_table(company_table, company_id) result = audit_companies.validate_date_created(record) expected_result = { "company_id": company_id, "is_valid": True, "column": "date_created", } assert result == expected_result
[ 9, 1038, 153, 152 ]
def METHOD_NAME(mock_spec, user_spec): """ :rtype: User """ return create_model_type(mock_spec, 'User', user_spec)
[ 21, 44 ]
def METHOD_NAME( column_count: int, ) -> Iterable[Tuple[int, Optional[RenderableType]]]: item_count = len(renderables) if self.column_first: width_renderables = list(zip(renderable_widths, renderables)) column_lengths: List[int] = [item_count // column_count] * column_count for col_no in range(item_count % column_count): column_lengths[col_no] += 1 row_count = (item_count + column_count - 1) // column_count cells = [[-1] * column_count for _ in range(row_count)] row = col = 0 for index in range(item_count): cells[row][col] = index column_lengths[col] -= 1 if column_lengths[col]: row += 1 else: col += 1 row = 0 for index in chain.from_iterable(cells): if index == -1: break yield width_renderables[index] else: yield from zip(renderable_widths, renderables) # Pad odd elements with spaces if item_count % column_count: for _ in range(column_count - (item_count % column_count)): yield 0, None
[ 84, 10137 ]
def METHOD_NAME(*args, **kwargs): raise RuntimeError(err_msg)
[ 129, -1 ]
async def METHOD_NAME(request): handler = PerspectiveAIOHTTPHandler(manager=MANAGER, request=request, chunk_size=500) await handler.run()
[ 4389, 1519 ]
def METHOD_NAME(self): doc = self.create_slides() url = urlreverse('ietf.doc.views_material.edit_material', kwargs=dict(name=doc.name, action="title")) login_testing_unauthorized(self, "secretary", url) # post r = self.client.post(url, dict(title="New title")) self.assertEqual(r.status_code, 302) doc = Document.objects.get(name=doc.name) self.assertEqual(doc.title, "New title")
[ 9, 2004, 2893 ]