text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): meta = self.db.put(BytesIO(b"content"), meta=self.new_meta()) with self.assertRaises(TypeError): self.db.delete() with self.db.get(meta=meta) as fh: self.assertEqual(fh.read(), b"content") self.assertTrue(self.db.delete(key=meta.key))
[ 9, 34, 654, 335 ]
def METHOD_NAME(self): return len(self.children) == 0
[ 137, 3802 ]
def METHOD_NAME(cls): """Check that all m2m fields have a defined ``through`` model. Returns ------- List[checks.CheckMessage] A list of the check messages representing problems found on the model. """ fields_by_model = _get_fields_by_source_model(cls) errors = [] model_full_name = f"{cls._meta.app_label}.{cls._meta.object_name}" m2m_fields = [ field for field in cls._meta.get_fields() if isinstance(field, models.ManyToManyField) ] for field in m2m_fields: # ignore if defined in a model outside the scope if fields_by_model[field.name]._meta.app_label not in CHECKED_APPS: continue if field.remote_field.through._meta.auto_created: errors.append( checks.Error( f"The field '{field.name}' of the model '{model_full_name}' is a " "ManyToManyField but without a 'through' model defined", hint=f"Add the attribute 'through' to the field '{field.name}' of the " "model '{model_full_name}' and define the appropriate model", obj=cls, id="marsha.models.E009", ) ) return errors
[ 250, 1061, 379 ]
def METHOD_NAME(tor_path = None): """ Provides the version of tor we're testing against. :param str tor_path: location of tor executable to cehck the version of :returns: :class:`~stem.version.Version` of tor invoked by our integration tests """ global TOR_VERSION if TOR_VERSION is None or tor_path: TOR_VERSION = stem.version.get_system_tor_version(tor_path) return TOR_VERSION
[ 9783, 281 ]
def METHOD_NAME(self): return self._liveness
[ 10639 ]
def METHOD_NAME(): """ Get the model resources element key :return: the model resources element key """ return 'resources'
[ 19, 578, 1614, 59 ]
def METHOD_NAME() -> tuple[int, ...] | None: return glibc_version or _get_version()
[ 19, 281 ]
f METHOD_NAME(output_name, node_name):
[ 74, 422 ]
def METHOD_NAME(self): with pytest.raises(ValueError, match="top_k must be > 0, but got -2"): MemoryRetriever(MemoryDocumentStore(), top_k=-2, scale_score=False)
[ 9, 176, 41, 532, 1635, 4407, 511 ]
def METHOD_NAME(self): """ Tests the return of pprint-formatted data """ expected = ["{'local': True}"] ret = self.run_call("test.ping --out=pprint") self.assertEqual(ret, expected)
[ 9, 146, 3742 ]
def METHOD_NAME(self): synchroniser = PilotCStoJSONSynchronizer() res = synchroniser.getCSDict() assert res["OK"], res["Message"] res = synchroniser.getCSDict(includeMasterCS=False) assert res["OK"], res["Message"]
[ 9, 1434 ]
def METHOD_NAME(self): self.assertHolidays( Barbados(years=2023), ("2023-01-01", "New Year's Day"), ("2023-01-02", "New Year's Day (Observed)"), ("2023-01-21", "Errol Barrow Day"), ("2023-04-07", "Good Friday"), ("2023-04-10", "Easter Monday"), ("2023-04-28", "National Heroes Day"), ("2023-05-01", "May Day"), ("2023-05-29", "Whit Monday"), ("2023-07-31", "50th Anniversary of CARICOM Holiday"), ("2023-08-01", "Emancipation Day"), ("2023-08-07", "Kadooment Day"), ("2023-11-30", "Independence Day"), ("2023-12-25", "Christmas Day"), ("2023-12-26", "Boxing Day"), )
[ 9, 13667 ]
def METHOD_NAME( summary_metric: Optional['Summary'], ) -> Union[nullcontext, 'Timer']: """ helper function to either get a time context or a nullcontext if the summary metric is None :param summary_metric: An optional metric :return: either a Timer context or a nullcontext """ return summary_metric.time() if summary_metric else nullcontext()
[ 19, 2718, 104, 198, 894, 1051 ]
def METHOD_NAME(key: str, *values: Any, ex: Optional[int] = None) -> None: if REDIS is not None: REDIS.METHOD_NAME(key, *values) if ex is not None: REDIS.expire(key, ex)
[ 9710 ]
def METHOD_NAME(self) -> Optional[str]: """ The identity that created the resource. """ return pulumi.get(self, "created_by")
[ 152, 604 ]
def METHOD_NAME(self) -> Generator[PlayerInput, None, None]: """ Send virtual input events which release held buttons/axis. After this frame, held/triggered inputs will return to previous state. Yields: Inputs to release all buttons. """ for value in self._inputs.values(): for inp in value: yield from inp.virtual_stop_events()
[ 586, 3566 ]
def METHOD_NAME(self, name: str): """Overwrite this method if it is more complex to load an embedding by name.""" assert self.name_field is not None kwargs = dict(self.default_args) kwargs.pop(self.name_field) return self.embedding_cls(name, **kwargs) # type: ignore[call-arg]
[ 129, 1632, 280, 156 ]
def METHOD_NAME(context: Context, row: Item) -> Optional[str]: name = h.make_name( full=row.get("name"), first_name=row.get("forename"), last_name=row.get("surname"), ) return context.make_id( "person", name, row["latvian_identity_number_masked"], row["birth_date"], )
[ 1349, 147 ]
def METHOD_NAME(pk): try: query = UIQuery.objects.get(id=pk) except UIQuery.DoesNotExist: raise generic_api_exceptions.NotFoundAPIException( NotFound, error_code=ErrorCodes.QueryNotFound.value, message="Query doesn't exist" ) return query
[ 19, 539, 894, 2121 ]
def METHOD_NAME(self): self.asr_result = ""
[ 537, 11459 ]
def METHOD_NAME(self, alphanumeric): for _ in range(0, 5): result = virtool.utils.random_alphanumeric(excluded=["87e9wa"]) assert result != "87e9wa" assert len(result) == 6 assert all(a in alphanumeric for a in result)
[ 9, 2428 ]
def METHOD_NAME(first, second): return im.METHOD_NAME(first).METHOD_NAME(second)
[ 3386 ]
def METHOD_NAME(self): expected_pool_names = ( 'prefork', 'eventlet', 'gevent', 'solo', 'processes', 'threads', 'custom', ) with patch.dict(sys.modules, {'concurrent.futures': Mock()}): importlib.reload(concurrency) assert concurrency.get_available_pool_names() == expected_pool_names
[ 9, 5604, 4259, 610, 1573, 1567, 156 ]
def METHOD_NAME( self, *, operations: Set[PrimitiveOperation], key_types: Set[KeyType], client_parameters: ClientParameters, ) -> Dict[Parameter, int]: """ Count the amount of specified operations in the program and group by parameters. Args: operations (Set[PrimitiveOperation]): set of operations used to filter the statistics key_types (Set[KeyType]): set of key types used to filter the statistics client_parameters (ClientParameters): client parameters required for grouping by parameters Returns: Dict[Parameter, int]: number of specified operations per parameter in the program """ result = {} for statistic in self.statistics: if statistic.operation not in operations: continue for key_type, key_index in statistic.keys: if key_type not in key_types: continue parameter = Parameter(client_parameters, key_type, key_index) if parameter not in result: result[parameter] = 0 result[parameter] += statistic.count return result
[ 29, 2735, 511 ]
def METHOD_NAME(self, s): _s = os.path.basename(s) _svcname = re.sub(r'^[SK][0-9]+', '', _s) _seq = re.sub(r'[KS](\d+).+', r'\1', _s) if _s[0] == 'S': _state = 'on' elif _s[0] == 'K': _state = 'off' else: raise InitError("unexepected service name: %s"%s) return _state, _seq, _svcname
[ 19, -1 ]
def METHOD_NAME(p, app_config, monkeypatch_function_return, create_tvshow): # Given show = p['show'] show_obj = None if show: show_obj = create_tvshow(indexerid=12, name=show) expected = p['expected'] app_config('EMAIL_LIST', p['EMAIL_LIST']) if show: monkeypatch_function_return(p['mocks']) # When actual = Notifier._generate_recipients(show_obj) # Then assert actual == expected
[ 9, 567, 6043 ]
def METHOD_NAME(device, batch_size, input_shape, axis, dct_type, lifter, n_mfcc, norm, msg): with assert_raises(RuntimeError, regex=msg): eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32) pipe = MFCCPipeline(device, batch_size, iter(eii1), axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm) pipe.build() pipe.run()
[ 250, 837, 3582, 909, 335 ]
def METHOD_NAME(self): return os.path.dirname(self._mainfile)
[ -1 ]
async def METHOD_NAME(self, payload): """go to the first page""" await self.show_page(0)
[ 1515, 24, 865, 1174 ]
def METHOD_NAME(self) -> None: super().METHOD_NAME() self.swap_webhook_secrets_return_none = self.swap_to_always_return( secrets_services, 'get_secret', None) self.swap_webhook_secrets_return_secret = self.swap_with_checks( secrets_services, 'get_secret', lambda _: 'secret', expected_args=[ ('ANDROID_BUILD_SECRET',), ('ANDROID_BUILD_SECRET',), ] )
[ 0, 1 ]
def METHOD_NAME(expr): """Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e)).""" cls = operator(expr) args = [] for arg in arguments(expr): if operator(arg) == cls: args.extend(arguments(arg)) else: args.append(arg) return term(cls, args)
[ 247 ]
def METHOD_NAME(gds: GraphDataScience) -> None: with pytest.raises(SyntaxError, match="There is no 'gds.pipeline' to call"): gds.pipeline(42, 1337)
[ 9, 5276, 1148 ]
def METHOD_NAME(): # Stub to overload, pretending to be `float`. The real `float` function is # not used as multiple registrations can collide. def mock_float(x): pass return mock_float
[ 370, 248, 1819 ]
def METHOD_NAME( self, tokens: Tokens, inline: InlineParser[RendererT], state: State ) -> Iterator[DataT]: ...
[ 84, 338 ]
def METHOD_NAME(tmpdir_factory, archive_file_and_extension, compr_support_check): # actually run test archive_file, _ = archive_file_and_extension util = scomp.decompressor_for(archive_file) tmpdir = tmpdir_factory.mktemp("system_comp_test") with working_dir(str(tmpdir)): assert not os.listdir(os.getcwd()) util(archive_file) files = os.listdir(os.getcwd()) assert len(files) == 1 with open(files[0], "r") as f: contents = f.read() assert "TEST" in contents
[ 9, 112, 10045 ]
def METHOD_NAME( self, input_np, pool_type, layout, ): """Generate expected output.""" ref_np = tvm.topi.testing.adaptive_pool( input_np, (1, 1), pool_type, layout, ) return ref_np
[ 391, 146, 2212 ]
def METHOD_NAME(self, data: dict[str, Any]) -> str: return "id" # todo make better
[ 129, 1007 ]
def METHOD_NAME(): """ Import des données brutes de la base bdc_status en base Puis traitement des données de façon à les ventiler dans les différentes tables """ truncate_bdc_statuts() import_bdc_statuts_v16(logger)
[ 512, 61, 275, 14536, 452 ]
def METHOD_NAME(self): @njit def inner(x): return x.dtype.type(x) inputs = [ (np.bool_, True), (np.float32, 12.3), (np.float64, 12.3), (np.int64, 12), (np.complex64, 2j+3), (np.complex128, 2j+3), (np.timedelta64, np.timedelta64(3, 'h')), (np.datetime64, np.datetime64('2016-01-01')), ('<U3', 'ABC'), ] for (T, inp) in inputs: x = np.array(inp, dtype=T) self.assertEqual(inner(x), x[()])
[ 9, -1, 791, 24, 791 ]
def METHOD_NAME(self): self.sim.integrator = "ias15" jupyr = 11.86*2.*math.pi self.sim.dt = jupyr*1e-7 e0 = self.sim.energy() self.assertNotEqual(e0,0.) self.sim.integrate(self.sim.dt*1.001) self.sim.dt = jupyr self.sim.integrate(1e3*jupyr) e1 = self.sim.energy() self.assertLess(math.fabs((e0-e1)/e1),1e-14)
[ 9, -1, 565, 2471, 3475 ]
def METHOD_NAME(self) -> str: pass
[ 459, 44 ]
f METHOD_NAME(self):
[ 9, 5453, 1322, 44 ]
def METHOD_NAME(self): # [module] with util.mock_spec('pkg.__init__', 'pkg.module') as importer: with util.import_state(meta_path=[importer]): module = self.__import__('pkg', fromlist=['module']) self.assertEqual(module.__name__, 'pkg') self.assertTrue(hasattr(module, 'module')) self.assertEqual(module.module.__name__, 'pkg.module')
[ 9, 298, 280, 360 ]
def METHOD_NAME(self, node): node_serialized = {} # adding ignoring knob keys _ignoring_keys = ['invert_mask', 'help', 'mask', 'xpos', 'ypos', 'layer', 'process_mask', 'channel', 'channels', 'maskChannelMask', 'maskChannelInput', 'note_font', 'note_font_size', 'unpremult', 'postage_stamp_frame', 'maskChannel', 'export_cc', 'select_cccid', 'mix', 'version', 'matrix'] # loop through all knobs and collect not ignored # and any with any value for knob in node.knobs().keys(): # skip nodes in ignore keys if knob in _ignoring_keys: continue # get animation if node is animated if node[knob].isAnimated(): # grab animation including handles knob_anim = [node[knob].getValueAt(i) for i in range( self.clip_in_h, self.clip_out_h + 1)] node_serialized[knob] = knob_anim else: node_serialized[knob] = node[knob].value() return node_serialized
[ 1716, 2109 ]
def METHOD_NAME(self, session): pass
[ 69, 2333 ]
def METHOD_NAME(self) -> SubnetConfig: """ :return: the active configuration. """ return self._m_handler.METHOD_NAME()
[ 19, 923, 200 ]
def METHOD_NAME(self): return "dpd"
[ 2591, 156 ]
def METHOD_NAME( self, cluster: clusterlib.ClusterLib, payment_addrs: tp.List[clusterlib.AddressRecord], request: SubRequest, ) -> tp.Tuple[str, tp.List[clusterlib.UTXOData], tp.List[clusterlib.UTXOData]]: """Fund a Plutus script and create the necessary Tx outputs.""" algorithm = request.param temp_template = f"{common.get_test_id(cluster)}_{algorithm}" payment_addr = payment_addrs[0] dst_addr = payment_addrs[1] amount = 2_000_000 script_file = ( plutus_common.SECP256K1_LOOP_ECDSA_PLUTUS_V2 if algorithm == "ecdsa" else plutus_common.SECP256K1_LOOP_SCHNORR_PLUTUS_V2 ) script_address = cluster.g_address.gen_payment_addr( addr_name=temp_template, payment_script_file=script_file ) execution_units = ( plutus_common.SECP256K1_ECDSA_LOOP_COST if algorithm == "ecdsa" else plutus_common.SECP256K1_SCHNORR_LOOP_COST ) redeem_cost = plutus_common.compute_cost( execution_cost=execution_units, protocol_params=cluster.g_query.get_protocol_params(), ) tx_files = clusterlib.TxFiles( signing_key_files=[payment_addr.skey_file], ) txouts = [ clusterlib.TxOut( address=script_address, amount=amount + redeem_cost.fee + spend_raw.FEE_REDEEM_TXSIZE, inline_datum_file=plutus_common.DATUM_42_TYPED, ), # for collateral clusterlib.TxOut(address=dst_addr.address, amount=redeem_cost.collateral), ] tx_raw_output = cluster.g_transaction.send_tx( src_address=payment_addr.address, tx_name=f"{temp_template}_step1", txouts=txouts, tx_files=tx_files, ) txid = cluster.g_transaction.get_txid(tx_body_file=tx_raw_output.out_file) script_utxos = cluster.g_query.get_utxo(txin=f"{txid}#0") assert script_utxos, "No script UTxO" collateral_utxos = cluster.g_query.get_utxo(txin=f"{txid}#1") assert collateral_utxos, "No collateral UTxO" return algorithm, script_utxos, collateral_utxos
[ 5981, 782, 18116 ]
f METHOD_NAME(self):
[ 9, 54, 551, 1457, 137, 35 ]
def METHOD_NAME(tmp_path): mds = MetadataStore(db_filename=tmp_path / 'test.db', channels_dir=tmp_path / 'channels', my_key=TEST_PERSONAL_KEY, disable_sync=True) yield mds mds.shutdown()
[ 773, 1308 ]
def METHOD_NAME( data: StereoExpData, min_x=None, max_x=None, min_y=None, max_y=None, inplace=True ): """ filter cells based on the coordinates of cells. :param data: StereoExpData object. :param min_x: Minimum of x for a cell pass filtering. :param max_x: Maximum of x for a cell pass filtering. :param min_y: Minimum of y for a cell pass filtering. :param max_y: Maximum of y for a cell pass filtering. :param inplace: whether inplace the original data or return a new data. :return: StereoExpData object """ data = data if inplace else copy.deepcopy(data) none_param = [i for i in [min_x, min_y, max_x, max_y] if i is None] if len(none_param) == 4: raise ValueError('Only provide one of the optional parameters `min_x`, `min_y`, `max_x`, `max_y` per call.') pos = data.position obs_subset = np.full(pos.shape[0], True) if min_x: obs_subset &= pos[:, 0] >= min_x if min_y: obs_subset &= pos[:, 1] >= min_y if max_x: obs_subset &= pos[:, 0] <= max_x if max_y: obs_subset &= pos[:, 1] <= max_y data.sub_by_index(cell_index=obs_subset) cal_genes_indicators(data) return data
[ 527, 4645 ]
def METHOD_NAME(self): parameters = { **self.serialize_query_param( "api-version", "2023-04-01", required=True, ), } return parameters
[ 539, 386 ]
def METHOD_NAME(evm: Evm) -> None: """ Pushes the address of the original transaction sender to the stack. The origin address can only be an EOA. Parameters ---------- evm : The current EVM frame. """ # STACK pass # GAS charge_gas(evm, GAS_BASE) # OPERATION push(evm.stack, U256.from_be_bytes(evm.env.METHOD_NAME)) # PROGRAM COUNTER evm.pc += 1
[ 1788 ]
def METHOD_NAME(self, project, scope, object, type=None): """Resolves the given processing issues. If not type is given all processing issues for scope and object are resolved regardless of the type. """ checksum = get_processing_issue_checksum(scope, object) q = ProcessingIssue.objects.filter(project=project, checksum=checksum) if type is not None: q = q.filter(type=type) q.delete()
[ 1014, 3613, 946 ]
def METHOD_NAME(s: str, quote: bool | None = None) -> str: ...
[ 4748 ]
def METHOD_NAME( self, outputhierarchy, outputnode, inputhierarchy, inputnode ): if isinstance(outputnode, str): outputnode = outputhierarchy[-1].get_node(outputnode) if isinstance(inputnode, str): inputnode = inputhierarchy[-1].get_node(inputnode) inputattrs = set(inputnode.inputs.copyable_trait_names()) outputattrs = set(outputnode.outputs.copyable_trait_names()) attrs = inputattrs & outputattrs # find common attr names for attr in attrs: self.connect_attr( outputhierarchy, outputnode, attr, inputhierarchy, inputnode, attr ) return attrs
[ 707, 67, 1685 ]
def METHOD_NAME(path): """add date path (year/mm/dd)""" return f"{path}/{date_path}"
[ 238, 153, 157 ]
def METHOD_NAME(prompt_series, max_frames, seed, frame_idx): max_f = max_frames - 1 pattern = r'`.*?`' regex = re.compile(pattern) prompt_parsed = prompt_series for match in regex.finditer(prompt_parsed): matched_string = match.group(0) parsed_string = matched_string.replace('t', f'{frame_idx}').replace("max_f", f"{max_f}").replace('`', '') parsed_value = numexpr.evaluate(parsed_string) prompt_parsed = prompt_parsed.replace(matched_string, str(parsed_value)) prompt_to_print, *after_neg = prompt_parsed.strip().split("--neg") prompt_to_print = prompt_to_print.strip() after_neg = "".join(after_neg).strip() print(f"\033[32mSeed: \033[0m{seed}") print(f"\033[35mPrompt: \033[0m{prompt_to_print}") if after_neg and after_neg.strip(): print(f"\033[91mNeg Prompt: \033[0m{after_neg}") prompt_to_print += f"--neg {after_neg}" # set value back into the prompt return prompt_to_print
[ 123, 2995 ]
def METHOD_NAME( ql_expr: qlast.Base, args: Mapping[str, qlast.Base] ) -> None: inliner = ParameterInliner(args) inliner.visit(ql_expr)
[ 1817, 386 ]
def METHOD_NAME(self, key: str) -> Type[T]: self._materialize_entrypoints() if _is_importable(key): # If the key contains a dot or colon, we treat it as a import path and attempt # to load it dynamically. MyClass = import_path(key) self._check_cls(MyClass) return MyClass if key in self._aliases: real_key, fn = self._aliases[key] fn() return self.METHOD_NAME(real_key) if key not in self._mapping: raise KeyError(f"Did not find a registered class for {key}") tp = self._ensure_not_lazy(key) if isinstance(tp, ModuleNotFoundError): raise ConfigurationError( f"{key} is disabled; try running: pip install '{__package_name__}[{key}]'" ) from tp elif isinstance(tp, Exception): raise ConfigurationError( f"{key} is disabled due to an error in initialization" ) from tp else: # If it's not an exception, then it's a registered type. return tp
[ 19 ]
def METHOD_NAME(): remote = {'url': 'http://openneuro.org.s3.amazonaws.com/', 'uuid': '57894849-d0c8-4c62-8418-3627be18a196'} catFile = io.StringIO(""":::99fe93bfea62c16a10488593da870df25d09be81 1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""") url = read_rmet_file(remote, catFile) assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
[ 9, 203, 7673, 171 ]
def METHOD_NAME(self): self._status["state"] = "stop"
[ 631 ]
def METHOD_NAME(x): """ Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to https://arxiv.org/abs/2004.09602 Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 :param x: :return: """ return tf.clip_by_value(_gelu(x), -10, 10)
[ 5772, 4280 ]
def METHOD_NAME( testdata_dir: pathlib.Path, tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch ) -> None: """Test execution of assemble plan fails.""" def execute_mock(*args, **kwargs): raise err.TrestleError('execution failed') test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = pathlib.Path('catalogs/') # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.rmtree(pathlib.Path('dist')) shutil.copytree(test_data_source, catalogs_dir) monkeypatch.setattr(Plan, 'execute', execute_mock) with pytest.raises(err.TrestleError): AssembleCmd().assemble_model( 'catalog', argparse.Namespace(trestle_root=tmp_trestle_dir, name='mycatalog', extension='json', verbose=1) )
[ 9, 1893, 2046, 374 ]
def METHOD_NAME(self, size): raise NotImplementedError
[ 19, 596, 59 ]
def METHOD_NAME(p, X, Y, Z): """3D PSF model function with constant background - parameter vector [A, x0, y0, z0, background]""" A, x0, y0, z0, wxy, wz, b = p #return A*scipy.exp(-((X-x0)**2 + (Y - y0)**2)/(2*s**2)) + b #print X.shape return A*np.exp(-((X-x0)**2 + (Y - y0)**2)/(2*wxy**2) - ((Z-z0)**2)/(2*wz**2)) + b
[ 474, 12765 ]
def METHOD_NAME(self) -> InsightsSettingsCommentList: if self._insights_settings_comment is None: self._insights_settings_comment = InsightsSettingsCommentList(self) return self._insights_settings_comment
[ 1689, 817, 1591 ]
def METHOD_NAME(self): self._clearButton.setVisible(self._hasClearableContent())
[ 86, 537, 1974 ]
def METHOD_NAME(*args, **kwargs): func._qt_thread = QFireThread(func, args, kwargs) func._qt_thread.start() return func._qt_thread
[ -1 ]
def METHOD_NAME(self): self.text = self.Text()
[ 0, 1 ]
def METHOD_NAME(self): self.pre_operations() self.LinkedStorageAccountsGet(ctx=self.ctx)() self.post_operations()
[ 750, 710 ]
def METHOD_NAME(self): if self.path_includes_course: root = os.path.join(self.coursedir.course_id, self.coursedir.assignment_id) other_path = os.path.join(self.coursedir.course_id, "*") else: root = self.coursedir.assignment_id other_path = "*" self.src_path = os.path.abspath(os.path.join(self.assignment_dir, root)) self.coursedir.assignment_id = os.path.split(self.src_path)[-1] if not os.path.isdir(self.src_path): self._assignment_not_found(self.src_path, os.path.abspath(other_path))
[ 176, 1339 ]
f METHOD_NAME(self):
[ 156 ]
def METHOD_NAME(self) -> Optional[str]: """ The reason for approval/rejection of the connection. """ return pulumi.get(self, "description")
[ 1067 ]
def METHOD_NAME(self, edit, ev): """Return True or False, handling the event if applicable. This method is called by eventFilter() for KeyPress events. edit: Q(Plain)TextEdit instance ev: the KeyPress QEvent """ result = None if self.handle_home: result = self.handleHome(edit, ev) if result is None and self.handle_horizontal: result = self.handleHorizontal(edit, ev) if result is None and self.handle_vertical: result = self.handleVertical(edit, ev) return bool(result) # None becomes False
[ 276 ]
def METHOD_NAME(): preprocessing_parameters = {"fill_value": "2013-02-26"} invalid_date_str = "2012abc-02" datetime_format = None assert date_feature.DateInputFeature.date_to_list(invalid_date_str, datetime_format, preprocessing_parameters) == [ 2013, 2, 26, 1, 57, 0, 0, 0, 0, ]
[ 9, 153, 24, 245, 1916, 1917, 99 ]
def METHOD_NAME(self, run_server1, run_server2, run_server3): sdf = ShareDataFrame("data_id1", self.qmpc_request) expected = ShareDataFrame("job_uuid", self.qmpc_request, True, ShareDataFrameStatus.EXECUTE) assert sdf.variance([1, 2, 3]) == expected
[ 9, 2873 ]
def METHOD_NAME(model_name, rec_val, batch_size, calibration_samples): val_data, _ = get_val_data(model_name, rec_val=rec_val, batch_size=batch_size) val_data.reset() for i, batch in enumerate(val_data): if i * batch_size >= calibration_samples: break data = batch.data[0].asnumpy() yield {"data": data}
[ 4478, 126 ]
def METHOD_NAME(vec, i: int, value: int): INTEGER(vec)[i] = value
[ 0, 4143, 11214, 1008 ]
def METHOD_NAME(self, source): offset = 0 tokens = ['# -*- coding: %s -*-' % self.encoding] start, end = self.delimiters escaped = (re.escape(start), re.escape(end)) regex = re.compile('%s(.*?)%s' % escaped, re.DOTALL) for i, part in enumerate(regex.split(source)): part = part.replace('\\'.join(start), start) part = part.replace('\\'.join(end), end) if i % 2 == 0: if not part: continue part = part.replace('\\', '\\\\').replace('"', '\\"') part = '\t' * offset + 'write("""%s""")' % part else: part = part.rstrip() if not part: continue part_stripped = part.lstrip() if part_stripped.startswith(':'): if not offset: raise SyntaxError('no block statement to terminate: ${%s}$' % part) offset -= 1 part = part_stripped[1:] if not part.endswith(':'): continue elif self.autowrite.match(part_stripped): part = 'write(%s)' % part_stripped lines = part.splitlines() margin = min(len(l) - len(l.lstrip()) for l in lines if l.strip()) part = '\n'.join('\t' * offset + l[margin:] for l in lines) if part.endswith(':'): offset += 1 tokens.append(part) if offset: raise SyntaxError('%i block statement(s) not terminated' % offset) return compile('\n'.join(tokens), self.file or '<string>', 'exec')
[ 296 ]
def METHOD_NAME( x: np.ndarray, y: np.ndarray, # units-per-pixel-x(dimension) uppx: float, # XXX: troll zone / easter egg.. # want to mess with ur pal, pass in the actual # pixel width here instead of uppx-proper (i.e. pass # in our ``pg.GraphicsObject`` derivative's ``.px_width()`` # gto mega-trip-out ur bud). Hint, it used to be implemented # (wrongly) using "pixel width", so check the git history ;) xrange: Optional[float] = None,
[ 2491, 6694 ]
def METHOD_NAME(self): return self.action_setup
[ 137, 102 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(): return json.load(open(SCHEMA_DIR / "dataset_schema.json"))
[ 4942, 126, 135 ]
def METHOD_NAME(self) -> None: with self.mock_config_info( {"api_key": "12345678", "bot_info": "team"} ), self.mock_http_conversation("api_noop"): self.verify_reply( "", "Sorry, I don't understand what your trying to say. Use `@mention help` to see my help. " "I can't understand the command you sent me :confused: ", )
[ 9, 1227, 7225, 24, 35, 277 ]
def METHOD_NAME(self, attributes: Dict[str, Any]) -> None: if "admin" in attributes: # pragma no branch self._admin = self._makeBoolAttribute(attributes["admin"]) if "maintain" in attributes: # pragma no branch self._maintain = self._makeBoolAttribute(attributes["maintain"]) if "pull" in attributes: # pragma no branch self._pull = self._makeBoolAttribute(attributes["pull"]) if "push" in attributes: # pragma no branch self._push = self._makeBoolAttribute(attributes["push"]) if "triage" in attributes: # pragma no branch self._triage = self._makeBoolAttribute(attributes["triage"])
[ 1080, 177 ]
def METHOD_NAME(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Unsupported value encountered.')
[ 12244 ]
def METHOD_NAME(self): # collect all machine types except 'none', 'isapc', 'microvm' with QEMUMachine(self.qemu_bin) as vm: vm.launch() machines = [m['name'] for m in vm.command('query-machines')] vm.shutdown() machines.remove('none') machines.remove('isapc') machines.remove('microvm') for dev_type in DEV_TYPES: # create the list of machine types and their parameters. mtypes = list() for m in machines: if self.seg_max_adjust_enabled(m): enabled = 'true' else: enabled = 'false' mtypes.append({'name': m, DEV_TYPES[dev_type]['seg_max_adjust']: enabled}) # test each machine type for a device type for mt in mtypes: self.check_mt(mt, dev_type)
[ 9, 1600, 119 ]
def METHOD_NAME(test_case): arg_dict = OrderedDict() arg_dict["test_fun"] = [ _test_logical_and, _test_tensor_logical_and, ] arg_dict["shape"] = [(2, 3), (2, 4, 5)] arg_dict["dtype"] = [flow.float32, flow.int32] arg_dict["device"] = ["cpu", "cuda"] for arg in GenArgList(arg_dict): arg[0](test_case, *arg[1:])
[ 9, 1692, 61 ]
def METHOD_NAME( input_list: Iterable, *, slice_size: NonNegativeInt ) -> Generator[tuple[Any, ...], None, None]: """ Given an iterable and the slice_size yields tuples containing slice_size elements in them. Inputs: input_list= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] slice_size = 5 Outputs: [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13)] """ if not input_list: yield () yield from toolz.partition_all(slice_size, input_list)
[ 2312, 370 ]
def METHOD_NAME(quiz_id: str, bot_handler: BotHandler) -> str: return bot_handler.storage.get(quiz_id)
[ 19, 13463, 280, 147 ]
def METHOD_NAME(self): cmake_layout(self, src_folder="src")
[ 571 ]
def METHOD_NAME(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]: r"""The post process. Will save audio to file, if the output_path is given. Args: inputs: a dict contains following keys: - output_pcm: generated audio array kwargs: accept 'output_path' which is the path to write generated audio Returns: output_pcm: generated audio array """ if 'output_path' in kwargs.keys(): wav.write( kwargs['output_path'], self.preprocessor.SAMPLE_RATE, np.frombuffer(inputs[OutputKeys.OUTPUT_PCM], dtype=np.int16)) return inputs
[ 1710 ]
def METHOD_NAME(word, start, level, window_ind, window_size, series_length, level_bits): num_quadrants = pow(2, level) quadrant = start + int( (window_ind + int(window_size / 2)) / int(series_length / num_quadrants) ) return (word << level_bits) | quadrant, num_quadrants
[ 238, 33 ]
def METHOD_NAME(self): # Other tests-- not very systematic self.assertEqual(pow(3,3) % 8, pow(3,3,8)) self.assertEqual(pow(3,3) % -8, pow(3,3,-8)) self.assertEqual(pow(3,2) % -2, pow(3,2,-2)) self.assertEqual(pow(-3,3) % 8, pow(-3,3,8)) self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8)) self.assertEqual(pow(5,2) % -8, pow(5,2,-8)) self.assertEqual(pow(3,3) % 8, pow(3,3,8)) self.assertEqual(pow(3,3) % -8, pow(3,3,-8)) self.assertEqual(pow(3,2) % -2, pow(3,2,-2)) self.assertEqual(pow(-3,3) % 8, pow(-3,3,8)) self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8)) self.assertEqual(pow(5,2) % -8, pow(5,2,-8)) for i in range(-10, 11): for j in range(0, 6): for k in range(-7, 11): if j >= 0 and k != 0: self.assertEqual( pow(i,j) % k, pow(i,j,k) ) if j >= 0 and k != 0: self.assertEqual( pow(int(i),j) % k, pow(int(i),j,k) )
[ 9, 2395 ]
def METHOD_NAME(name, **kw): callback_ = kw.pop("callback", None) if callback_: class CallableAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): callback_(option_string, values, parser) kw["action"] = CallableAction group.addoption(name, **kw)
[ 93, 1335 ]
async def METHOD_NAME( self, url: str, headers: Optional[Dict[str, str]] = None, ) -> Response: return await self.request(url, "get", headers=headers)
[ 19 ]
def METHOD_NAME( keybinding: Union[str, Iterable[str]], command: str, mode: modes.Mode = modes.GLOBAL ) -> Callable[[customtypes.FuncT], customtypes.FuncT]: """Decorator to add a new keybinding. Args: keybinding: Key sequence(s) to bind as string (Iterable of strings). command: Command to bind to. mode: Mode in which the keybinding is valid. """ def decorator(function: customtypes.FuncT) -> customtypes.FuncT: if isinstance(keybinding, str): bind(keybinding, command, mode) else: for binding in keybinding: bind(binding, command, mode, override=False) return function return decorator
[ 372 ]
async def METHOD_NAME(self, unit: str) -> SystemdUnit: """Return systemd unit for unit name.""" obj_path = await self.dbus.Manager.call_get_unit(unit) unit = SystemdUnit(obj_path) await unit.connect(self.dbus.bus) return unit
[ 19, 805 ]
def METHOD_NAME(self, retrieve_list, exception, match): """Test the :meth:`aiida.orm.nodes.process.calculation.calcjob.CalcJobNode.set_retrieve_list`.""" node = CalcJobNode() if exception: with pytest.raises(exception, match=match): node.set_retrieve_list(retrieve_list) else: node.set_retrieve_list(retrieve_list) assert node.get_retrieve_list() == retrieve_list
[ 9, 0, 404, 245 ]