text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, trakt_pin=None, refresh=False, count=0): if count > 3: settings.TRAKT_ACCESS_TOKEN = "" return False elif count > 0: time.sleep(2) data = {"client_id": settings.TRAKT_API_KEY, "client_secret": settings.TRAKT_API_SECRET, "redirect_uri": "urn:ietf:wg:oauth:2.0:oob"} if refresh: data["grant_type"] = "refresh_token" data["refresh_token"] = settings.TRAKT_REFRESH_TOKEN else: data["grant_type"] = "authorization_code" if trakt_pin: data["code"] = trakt_pin headers = CaseInsensitiveDict({"Content-Type": "application/json"}) resp = self.traktRequest("oauth/token", data=data, headers=headers, url=self.auth_url, method="POST", count=count) if "access_token" in resp: settings.TRAKT_ACCESS_TOKEN = resp["access_token"] if "refresh_token" in resp: settings.TRAKT_REFRESH_TOKEN = resp["refresh_token"] return True return False
[ 12844, 466 ]
def METHOD_NAME(self, name: str, default: Any | None = None) -> Any: ...
[ 19, 1967, 100 ]
def METHOD_NAME(self, index): r""" The antipode on the basis element indexed by ``index``. INPUT: - ``index`` -- an element of the index set For a graded connected Hopf algebra, we can define an antipode recursively by .. MATH:: S(x) := -\sum_{x^L \neq x} S(x^L) \times x^R when `|x| > 0`, and by `S(x) = x` when `|x| = 0`. TESTS:: sage: # needs sage.modules sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example() sage: H.monomial(0).antipode() # indirect doctest P0 sage: H.monomial(1).antipode() # indirect doctest -P1 sage: H.monomial(2).antipode() # indirect doctest P2 sage: H.monomial(3).antipode() # indirect doctest -P3 """ if self.monomial(index) == self.one(): return self.one() S = self.METHOD_NAME x__S_Id = tensor([self, self]).module_morphism( lambda ab: S(ab[0]) * self.monomial(ab[1]), codomain=self) return -x__S_Id( self.monomial(index).coproduct() - tensor([self.monomial(index), self.one()]) )
[ 6325, 69, 1189 ]
def METHOD_NAME(self, word): """All edits that are one edit away from `word`.""" letters = 'abcdefghijklmnopqrstuvwxyz' if self.config.general.diacritics: letters += 'àáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts)
[ -1 ]
def METHOD_NAME( native_image_config, native_image, expected_image_reference ): """ Given - native image configuration file, native image name: 1. A native image that exists in the configuration file 2. A native image that doesn't exist in the configuration file When - running the get_native_image_reference() function Then - make sure the right docker references is extracted 1. The matched reference from the configuration file 2. None """ assert ( native_image_config.get_native_image_reference(native_image) == expected_image_reference )
[ 9, 19, 1577, 660, 272 ]
def METHOD_NAME(Dvalue): ''' Returns the zero field splitting matrix D for spin 1 system. (not most general case) ''' D = matrix([[Dvalue,0,0],[0,0,0],[0,0,Dvalue]]) return D
[ -1 ]
def METHOD_NAME(time_series_sensor_data: np.ndarray, detection_geometry: DetectionGeometryBase, speed_of_sound_in_m_per_s: int = 1540, time_spacing_in_s: float = 2.5e-8, sensor_spacing_in_mm: float = 0.1, recon_mode: str = Tags.RECONSTRUCTION_MODE_PRESSURE, apodization: str = Tags.RECONSTRUCTION_APODIZATION_BOX ) -> np.ndarray: """ Convenience function for reconstructing time series data using Delay and Sum algorithm implemented in PyTorch :param time_series_sensor_data: (2D numpy array) sensor data of shape (sensor elements, time steps) :param speed_of_sound_in_m_per_s: (int) speed of sound in medium in meters per second (default: 1540 m/s) :param time_spacing_in_s: (float) time between sampling points in seconds (default: 2.5e-8 s which is equal to 40 MHz) :param sensor_spacing_in_mm: (float) space between sensor elements in millimeters (default: 0.1 mm) :param recon_mode: SIMPA Tag defining the reconstruction mode - pressure default OR differential :param apodization: SIMPA Tag defining the apodization function (default box) :return: (2D numpy array) reconstructed image as 2D numpy array """ # create settings settings = create_reconstruction_settings(speed_of_sound_in_m_per_s, time_spacing_in_s, sensor_spacing_in_mm, recon_mode, apodization) adapter = SignedDelayMultiplyAndSumAdapter(settings) return adapter.reconstruction_algorithm(time_series_sensor_data, detection_geometry)
[ 5122, 3000, 1344, 6685, 61, 912, 3299 ]
def METHOD_NAME(): import logging from sys import stdout logger = logging.getLogger() logger.setLevel(logging.INFO) console_handler = logging.StreamHandler(stdout) logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s") console_handler.setFormatter(logFormatter) logger.addHandler(console_handler) return logger
[ 102, 2034 ]
def METHOD_NAME(): scenario_platforms = ["multi_platform_rhel", "multi_platform_debian"] benchmark_cpes = {"cpe:/o:redhat:enterprise_linux:8"} assert common.matches_platform(scenario_platforms, benchmark_cpes) is True
[ 9, 245, 457, 2773, 590, 865 ]
def METHOD_NAME( client, item_lib_martigny, # on shelf item_lib_fully, # on loan librarian_martigny ): """Test record retrieval.""" login_user(client, librarian_martigny) data = call_api_permissions(client, 'items', item_lib_fully.pid) assert not data['delete']['can'] assert not data['update']['can'] data = call_api_permissions(client, 'items', item_lib_martigny.pid) assert data['delete']['can'] assert data['update']['can'] response = client.get( url_for( 'api_blueprint.permissions', route_name='items', record_pid='dummy_item_pid' ) ) assert response.status_code == 404
[ 9, 1768, 804 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "networkSecurityGroupName", self.ctx.args.name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(): support.run_unittest(AllTest)
[ 9, 57 ]
def METHOD_NAME( self, mock_ensure_dir, mock_get_available_apis, ): mock_get_available_apis.return_value = [RECOMMENDED_TARGET_API] context = Context() context.setup_dirs(os.getcwd()) context.prepare_build_environment( user_sdk_dir='sdk', user_ndk_dir='ndk', user_android_api=None, user_ndk_api=None, ) # The context was supplied with relative SDK and NDK dirs. Check # that it resolved them to absolute paths. real_sdk_dir = os.path.join(os.getcwd(), 'sdk') real_ndk_dir = os.path.join(os.getcwd(), 'ndk') assert context.sdk_dir == real_sdk_dir assert context.ndk_dir == real_ndk_dir context_paths = context.env['PATH'].split(':') assert context_paths[0:3] == [ f'{real_ndk_dir}/toolchains/llvm/prebuilt/{context.ndk.host_tag}/bin', real_ndk_dir, f'{real_sdk_dir}/tools' ]
[ 9, 966, -1, 3336 ]
def METHOD_NAME(self): return self.value
[ 19, 99 ]
def METHOD_NAME(): function_path = str(pathlib.Path(__file__).parent / "assets" / "handler.py") project = mlrun.new_project("test-handler", save=False) project.set_function( function_path, "myfunc", handler="myhandler", image="mlrun/mlrun" ) run = project.run_function("myfunc", handler="handler2", local=True) # verify that the 2nd handler was running (not the default) assert run.output("handler") == "2"
[ 9, 235, 1519 ]
def METHOD_NAME(self): """Returns a copy of the indices of atoms.""" return self._indices.copy()
[ 19, 1894 ]
def METHOD_NAME(parser): _addopts(parser) parser.set_defaults(**default_config)
[ 238, 2766, 61, 1618 ]
def METHOD_NAME(self): # Create existing token: RemoteToken, RemoteAccount, UserIdentity. with db.session.begin_nested(): # Create RemoteToken and RemoteAccount. RemoteToken.create( user_id=self.user.id, client_id=current_app.config['ORCID_APP_CREDENTIALS']['consumer_key'], token=self.token, secret=None, extra_data={ 'orcid': self.orcid, 'full_name': self.name, 'allow_push': True, } ) user_identity = UserIdentity( id=self.orcid, method='orcid', id_user=self.user.id ) db.session.add(user_identity) with mock.patch('inspirehep.modules.orcid.tasks.oauth_link_external_id') as mock_oauth_link_external_id: # Mocking `oauth_link_external_id` is necessary because when running # with `isolated_app` it raises # "FlushError: New instance ... with identity key (...) conflicts with persistent instance ..." # rather than the standard and expected `IntegrityError` (which # is raised instead when run without `isolated_app`). mock_oauth_link_external_id.side_effect = AlreadyLinkedError(self.user, self.orcid) _link_user_and_token(self.user, self.name, self.orcid, self.token) self._assert_remote_account_and_remote_token_and_user_identity()
[ 9, 9023, 466 ]
def METHOD_NAME( project_name: str, project_type: Optional[str] = 'legacy' ) -> None: osutils = OSUtils() all_projects = list_available_projects(TEMPLATES_DIR, osutils) project = [p for p in all_projects if p.key == project_type][0] template_kwargs = { 'app_name': project_name, 'chalice_version': chalice_version, } project_creator = ProjectCreator(osutils) project_creator.create_new_project( os.path.join(TEMPLATES_DIR, project.dirname), project_name, template_kwargs=template_kwargs, )
[ 129, 80, 155, 8256 ]
def METHOD_NAME(): parser = argparse.ArgumentParser(description='paddle-rec run') parser.add_argument("-m", "--config_yaml", type=str) args = parser.METHOD_NAME() args.config_yaml = get_abs_model(args.config_yaml) return args
[ 214, 335 ]
def METHOD_NAME(self, path): self._line('include %s' % path)
[ 1872 ]
def METHOD_NAME(tmp_dir, dvc, mocker): tmp_dir.dvc_gen({"dir": {"foo": "foo", "bar": "bar", "subdir": {"data": "data"}}}) fs = DataFileSystem(index=dvc.index.data["repo"]) hash_file_spy = mocker.spy(dvc_data.hashfile.hash, "hash_file") assert fs.info("dir")["md5"] == "8761c4e9acad696bee718615e23e22db.dir" assert not hash_file_spy.called
[ 9, 19, 1161, 1190 ]
def METHOD_NAME(tmpdir, centered_pair_predictions_slp_path): cli = ( "--tracking.tracker simple " "--frames 200-300 " f"-o {tmpdir}/simpletracks.slp " f"{centered_pair_predictions_slp_path}" ) inference_cli(cli.split(" ")) labels = sleap.load_file(f"{tmpdir}/simpletracks.slp") assert len(labels.tracks) == 27
[ 9, 53, 4102 ]
def METHOD_NAME(self, implementation): self.unitroller._setPendingImplementation(implementation) assert self.unitroller.pendingComptrollerImplementation() == implementation comptroller = interface.IRariComptroller( implementation, owner=self.safe.account ) comptroller._become(self.unitroller.address) assert self.unitroller.comptrollerImplementation() == implementation
[ 738, -1 ]
def METHOD_NAME(self, short: bool=False) -> list[str]: if not self.is_built: return [ f'<{self.__class__.__name__}>; table_name={self.table_name!r}\n', f' ntimes: {self.ntimes:d}\n', f' ntotal: {self.ntotal:d}\n', ] #ngrids = len(self.gridTypes) msg = [] ntimes = len(self._times) #len(self.node_gridtype) #nnodes, two = self.node_gridtype.shape nelements = 0 ntimes = self.data.shape[0] if self.nonlinear_factor not in (None, np.nan): # transient msg.append(' type=%s ntimes=%s nelements=%s\n' % (self.__class__.__name__, ntimes, nelements)) else: msg.append(' type=%s nelements=%s\n' % (self.__class__.__name__, nelements)) msg.append(' data: [f1, f2, f3, m1, m2, m3] shape=%s dtype=%s\n' % ([int(i) for i in self.data.shape], self.data.dtype)) msg.append(' sources, eids\n') msg += self.get_data_code() return msg
[ 19, 577 ]
def METHOD_NAME(p): '''toktype : TOKEN | LEFT | RIGHT | NONASSOC''' p[0] = p[1]
[ 2054, -1 ]
def METHOD_NAME(self): d = self.new_dict(key='value') # pop() must change the version if the key exists self.check_version_changed(d, d.pop, 'key') # pop() must not change the version if the key does not exist self.check_version_dont_change(d, self.assertRaises, KeyError, d.pop, 'key')
[ 9, 760 ]
def METHOD_NAME(self) -> str: """:class:`str`: Returns the URL of the emoji, if it is custom. If this isn't a custom emoji then an empty string is returned """ if self.is_unicode_emoji(): return '' fmt = 'gif' if self.animated else 'png' return f'{Asset.BASE}/emojis/{self.id}.{fmt}'
[ 274 ]
METHOD_NAME(self) -> str:
[ 86, 250, 551, 171 ]
def METHOD_NAME(state_obj): """ Test if slots work with "parallel: true". """ high_data = { "always-changes-and-succeeds": { "test": [ {"changes": True}, {"comment": "__slot__:salt:test.echo(fun_return)"}, {"parallel": True}, "configurable_test_state", {"order": 10000}, ], "__env__": "base", "__sls__": "parallel_slots", } } state_obj.jid = "123" res = state_obj.call_high(high_data) state_obj.jid = None [(_, data)] = res.items() assert data["comment"] == "fun_return"
[ 9, 275, 2827, 1498 ]
def METHOD_NAME(self): return self.snapshot_folder / f"{self.test_name}.txt"
[ 171, 156, 310 ]
def METHOD_NAME(rna_path): if not rna_path.startswith("pose.bones["): return None # rna_path_bone = rna_path[:rna_path.index("]") + 1] # return obj.path_resolve(rna_path_bone) bone_name = rna_path.split("[")[1].split("]")[0] return obj.pose.bones[bone_name[1:-1]]
[ 3599, 157, 947, -1 ]
def METHOD_NAME(self, subcmd, *args, **kwargs): 'Run git command with *args*, and post a status message.' import sh args = list(subcmd.split()) + list(args) vd.warning('git ' + ' '.join(str(x) for x in args)) return sh.git(*args, _cwd=self.gitRootPath, **kwargs)
[ -1 ]
def METHOD_NAME(self, market, trade): if trade.buyer.name != self.owner.name or \ market.time_slot != self.lower_market.time_slot: return positive_balancing_energy = \ trade.traded_energy * self.balancing_spot_trade_ratio + \ self.lower_market.unmatched_energy_upward negative_balancing_energy = \ trade.traded_energy * self.balancing_spot_trade_ratio + \ self.lower_market.unmatched_energy_downward self._trigger_balancing_trades(positive_balancing_energy, negative_balancing_energy)
[ 1593, 61, 2007, 11171, 5121 ]
def METHOD_NAME(hacs): """Fixtrue for HACS theme repository object""" repository_obj = HacsThemeRepository(hacs, "test/test") yield dummy_repository_base(hacs, repository_obj)
[ 1230, 344 ]
def METHOD_NAME(event_loop=None): """Notarization poller entry point: get everything set up, then enter the main loop. Args: event_loop (asyncio.BaseEventLoop, optional): the event loop to use. If None, use ``asyncio.get_event_loop()``. Defaults to None. """ event_loop = event_loop or asyncio.get_event_loop() config = get_config_from_cmdln(sys.argv[1:]) update_logging_config(config) log.info("Notarization poller starting up at {} UTC".format(arrow.utcnow().format())) log.info("Worker FQDN: {}".format(socket.getfqdn())) rm(config["work_dir"]) makedirs(config["work_dir"]) running_tasks = RunTasks(config) async def _handle_sigterm(): log.info("SIGTERM received; shutting down") await running_tasks.cancel() def _handle_sigusr1(): """Stop accepting new tasks.""" log.info("SIGUSR1 received; no more tasks will be taken") running_tasks.is_stopped = True event_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.ensure_future(_handle_sigterm())) event_loop.add_signal_handler(signal.SIGUSR1, _handle_sigusr1) try: event_loop.run_until_complete(running_tasks.invoke()) except Exception: log.critical("Fatal exception", exc_info=1) raise finally: log.info("Notarization poller stopped at {} UTC".format(arrow.utcnow().format())) log.info("Worker FQDN: {}".format(socket.getfqdn()))
[ 57 ]
def METHOD_NAME(app): # pylint: disable=redefined-outer-name, invalid-name """Return a session-wide initialised database. Drops all existing tables - Meta follows Postgres FKs """ with app.app_context(): # Clear out any existing tables metadata = MetaData(_db.engine) metadata.reflect() metadata.drop_all() _db.drop_all() sequence_sql = """SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema='public' """ sess = _db.session() for seq in [name for (name,) in sess.execute(text(sequence_sql))]: try: sess.execute(text('DROP SEQUENCE public.%s ;' % seq)) print('DROP SEQUENCE public.%s ' % seq) except Exception as err: # pylint: disable=broad-except # noqa: B902 print(f'Error: {err}') sess.commit() # ############################################## # There are 2 approaches, an empty database, or the same one that the app will use # create the tables # _db.create_all() # or # Use Alembic to load all of the DB revisions including supporting lookup data # This is the path we'll use in legal_api!! # even though this isn't referenced directly, it sets up the internal configs that upgrade needs legal_api_dir = os.path.abspath('..').replace('queue_services', 'legal-api') legal_api_dir = os.path.join(legal_api_dir, 'migrations') Migrate(app, _db, directory=legal_api_dir) upgrade() return _db
[ 1267 ]
def METHOD_NAME(self) -> str: headers, _ = self._requester.requestBlobAndCheck("GET", f"{self.url}/logs") return headers["location"]
[ 1099, 274 ]
def METHOD_NAME(self, algorithm, metric): # given the algorithm string + metric string, choose the optimal # algorithm to compute the result. if algorithm == 'auto': # use KD Tree if possible if metric in KDTree.valid_metrics: return 'kd_tree' elif metric in BallTree.valid_metrics: return 'ball_tree' else: raise ValueError("invalid metric: '{0}'".format(metric)) elif algorithm in TREE_DICT: if metric not in TREE_DICT[algorithm].valid_metrics: raise ValueError("invalid metric for {0}: " "'{1}'".format(TREE_DICT[algorithm], metric)) return algorithm else: raise ValueError("invalid algorithm: '{0}'".format(algorithm))
[ 7367, 4089 ]
def METHOD_NAME(module, name, that): def decorator(f): @wraps(f) def wrapper(*args, **kw): with MonkeyPatchScope([(module, name, that)]): return f(*args, **kw) return wrapper return decorator
[ 3093, 1575 ]
async def METHOD_NAME(self) -> None: KEY = os.environ["HEALTHINSIGHTS_KEY"] ENDPOINT = os.environ["HEALTHINSIGHTS_ENDPOINT"] # Create aTrial Matcher client # <client> trial_matcher_client = ClinicalMatchingClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY)) # </client> # Construct Patient # <PatientConstructor> patient1 = self.get_patient_from_fhir_patient() # </PatientConstructor> # Create registry filter registry_filters = models.ClinicalTrialRegistryFilter() # Limit the trial to a specific patient condition ("Non-small cell lung cancer") registry_filters.conditions = ["Non-small cell lung cancer"] # Limit the clinical trial to a certain phase, phase 1 registry_filters.phases = [models.ClinicalTrialPhase.PHASE1] # Specify the clinical trial registry source as ClinicalTrials.Gov registry_filters.sources = [models.ClinicalTrialSource.CLINICALTRIALS_GOV] # Limit the clinical trial to a certain location, in this case California, USA registry_filters.facility_locations = [models.GeographicLocation(country_or_region="United States", city="Gilbert", state="Arizona")] # Limit the trial to a specific study type, interventional registry_filters.study_types = [models.ClinicalTrialStudyType.INTERVENTIONAL] # Construct ClinicalTrial instance and attach the registry filter to it. clinical_trials = models.ClinicalTrials(registry_filters=[registry_filters]) # Create TrialMatcherRequest configuration = models.TrialMatcherModelConfiguration(clinical_trials=clinical_trials) trial_matcher_data = models.TrialMatcherData(patients=[patient1], configuration=configuration) # Health Insights Trial match trials try: poller = await trial_matcher_client.begin_match_trials(trial_matcher_data) trial_matcher_result = await poller.result() self.print_results(trial_matcher_result) except Exception as ex: print(str(ex)) return
[ 590, 4231, 958 ]
def METHOD_NAME(self): cm = self._configure_cmake() cm.install() tools.rmdir(os.path.join(self.package_folder, "share")) self.copy("COPYING", src=self._source_subfolder, dst="licenses")
[ 360 ]
def METHOD_NAME(self, targets): return NormalizedDict( (html_escape(key), self._encode_uri_component(value)) for key, value in targets.items() )
[ 4748, 61, 421, 465 ]
def METHOD_NAME( e1, p1, e2, p2): de = e1 - e2 dp = deltaPhi(p1, p2) return de*de + dp*dp
[ 1364, 8343 ]
def METHOD_NAME(self): # clear previous overflow record self._found_overflow.fill_(self.module.overflow_counter.item()) return self._found_overflow.item() > 0
[ 250, 1482 ]
def METHOD_NAME(result: CompletedProcess) -> List[dict]: """ Common logic to assert a succesful execution of a run_playbook action. Returns the stdout deserialized """ assert result.returncode == 0 assert not result.stderr output = jsonify_output(result.stdout.decode()) if output: assert output[-1]["event_data"]["ok"] assert not output[-1]["event_data"]["failures"] return output
[ 638, 4338, 146 ]
def METHOD_NAME(Dom): try: return Dom.firstChild.data.strip() except: return ""
[ 399, 669, 365 ]
def METHOD_NAME(inp, fold_ratio, num_channels): if fold_ratio == 1: return inp dec0_u = np.zeros((num_channels, inp.shape[1]*fold_ratio, inp.shape[2]*fold_ratio)) print(inp.shape) print(dec0_u.shape) for i in range(fold_ratio): for j in range(fold_ratio): dec0_u[:, i::fold_ratio, j::fold_ratio] = inp[num_channels*(i*fold_ratio + j):num_channels*(i*fold_ratio + j + 1), :, :] return dec0_u
[ 5142, 660 ]
def METHOD_NAME(self): self.assertTrue(filter.is_archive(RES_DIR + "application.zip")) self.assertFalse(filter.is_archive(RES_DIR + "text.txt"))
[ 9, 137, 1622 ]
def METHOD_NAME(self): bool_data = [{"a": "True", "b": "False"}, {"a": "True", "b": "True"}] tbl = Table(bool_data) assert tbl.size() == 2 assert tbl.schema() == {"a": bool, "b": bool}
[ 9, 410, 1852, 863, 3 ]
nc def METHOD_NAME(self, ctx, num_per_row = None):
[ -1 ]
def METHOD_NAME(self, new_cfg: Dict) -> None: self.learner.set_new_cfg(new_cfg)
[ 69, 86, 2610 ]
def METHOD_NAME(self): args = [] if self.spec.satisfies("@6:") and self.pkg.run_tests: args.append("--with-external-gtest") if self.spec.satisfies("@7:"): if "+tiff" in self.spec: args.append("--enable-tiff") else: args.append("--disable-tiff") if "+curl" in self.spec: args.append("--with-curl=" + self.spec["curl"].prefix.bin.join("curl-config")) else: args.append("--without-curl") return args
[ 111, 335 ]
def METHOD_NAME(self): r"multi-character newlines, split across chunks, are converted" input = 'a\nb\r\nc\rd\n\re' for splitpoint in range(1, len(input) - 1): a, b = input[:splitpoint], input[splitpoint:] lines_info = [] lines_info.append(self.lbf.append(a, 2.0)) lines_info.append(self.lbf.append(b, 2.0)) lines_info.append(self.lbf.flush()) lines_info = [e for e in lines_info if e is not None] joined_line_info = lines_info[0] for line_info in lines_info[1:]: joined_line_info = join_line_info(joined_line_info, line_info) self.assertEqual(joined_line_info, ('a\nb\nc\nd\n\ne\n', [1, 3, 5, 7, 8, 10], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]))
[ 9, 265, 2788 ]
def METHOD_NAME(self, all_user_bots_raw_data): self._all_user_bots_raw_data = all_user_bots_raw_data
[ 0, 75, 21, 13903, 772, 365 ]
def METHOD_NAME(self, *args): self.texture = self.canvas.children[-1].texture
[ 3600, 176 ]
def METHOD_NAME(uuid: Optional[_UUID_typing] = None) -> tuple: header = '{"type": "bundle", "spec_version": "2.0", "id":' if uuid is None: uuid = uuid4() return f'{header} "bundle--{uuid}", "objects": [', ', ', json_footer
[ -1, -1 ]
def METHOD_NAME(self): assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2}
[ 9, 15464 ]
def METHOD_NAME(self): """Package can't be found in repo.""" not_existing_pkg_name = "not-existing-aur-package-7h68712683h1628h1" result = pikaur( f"-S {not_existing_pkg_name} --repo", capture_stderr=True, ) self.assertEqual(result.returncode, 6) self.assertIn(MSG_CANNOT_BE_FOUND, result.stderr) self.assertEqual( result.stderr.splitlines()[-1].strip(), not_existing_pkg_name, )
[ 9, 428, 130, 622, 522 ]
f METHOD_NAME(x):
[ 1576, 1889 ]
def METHOD_NAME(self): """Returns the value returned by the node when evaluated.""" entries_list = self.entries.tolist() return sympy.Array(entries_list)
[ 24, 7824 ]
def METHOD_NAME(self, avatarId): comp = components.Componentized() user = self.userFactory(comp, avatarId) sess = self.sessionFactory(comp) sess.transportFactory = self.transportFactory sess.chainedProtocolFactory = self.chainedProtocolFactory comp.setComponent(iconch.IConchUser, user) comp.setComponent(iconch.ISession, sess) return user
[ 19, 5994 ]
def METHOD_NAME(rabbitmq_container): """ Test rabbitmq_plugin.enabled and rabbitmq_plugin_disabled First try to disable the plugin. Second enable the plugin again. Third disable the plugin. """ with patch.object(rabbitmq, "_get_rabbitmq_plugin", mock_get_rabbitmq_plugin): # Try to disable the plugin ret = rabbitmq_plugin.disabled("rabbitmq_auth_backend_http") expected = { "name": "rabbitmq_auth_backend_http", "result": True, "comment": "Plugin 'rabbitmq_auth_backend_http' is already disabled.", "changes": {}, } assert ret == expected # Enable the plugin ret = rabbitmq_plugin.enabled("rabbitmq_auth_backend_http") expected = { "name": "rabbitmq_auth_backend_http", "result": True, "comment": "Plugin 'rabbitmq_auth_backend_http' was enabled.", "changes": {"old": "", "new": "rabbitmq_auth_backend_http"}, } assert ret == expected # Disable the plugin ret = rabbitmq_plugin.disabled("rabbitmq_auth_backend_http") expected = { "name": "rabbitmq_auth_backend_http", "result": True, "comment": "Plugin 'rabbitmq_auth_backend_http' was disabled.", "changes": {"new": "", "old": "rabbitmq_auth_backend_http"}, } assert ret == expected
[ 9, 1295 ]
def METHOD_NAME(self, parts, words, results): if not parts and not words: return True if not parts or (not words and parts != ["*"]): return False if parts[0] == "*": for index in range(len(words), -1, -1): results.append(words[:index]) if self.METHOD_NAME(parts[1:], words[index:], results): return True results.pop() return False elif parts[0].startswith("@"): root = parts[0][1:] if root not in self.synons: raise ValueError("Unknown synonym root {}".format(root)) if not words[0].lower() in self.synons[root]: return False results.append([words[0]]) return self.METHOD_NAME(parts[1:], words[1:], results) elif parts[0].lower() != words[0].lower(): return False else: return self.METHOD_NAME(parts[1:], words[1:], results)
[ 590, 8118, 3264 ]
def METHOD_NAME(self, requests): """ Basic text preprocessing, based on the user's choice of application mode. Args: requests (list): A list of dictionaries with a "data" or "body" field, each containing the input text to be processed. Returns: tuple: A tuple with two tensors: the batch of input ids and the batch of attention masks. """ input_texts = [data.get("data") or data.get("body") for data in requests] input_ids_batch, attention_mask_batch = [], [] for input_text in input_texts: input_ids, attention_mask = self.encode_input_text(input_text) input_ids_batch.append(input_ids) attention_mask_batch.append(attention_mask) input_ids_batch = torch.cat(input_ids_batch, dim=0) attention_mask_batch = torch.cat(attention_mask_batch, dim=0) return input_ids_batch, attention_mask_batch
[ 666 ]
def METHOD_NAME(s): sys.stderr.write("m: " + s + "\n"); sys.stderr.flush()
[ 390 ]
def METHOD_NAME(scope, context, ownership, privilege, membership, data): if privilege == "admin": return True rules = list(filter(lambda r: scope == r["scope"], simple_rules)) rules = list(filter(lambda r: r["context"] == "na" or context == r["context"], rules)) rules = list(filter(lambda r: r["ownership"] == "na" or ownership == r["ownership"], rules)) rules = list( filter( lambda r: r["membership"] == "na" or ORG_ROLES.index(membership) <= ORG_ROLES.index(r["membership"]), rules, ) ) rules = list(filter(lambda r: GROUPS.index(privilege) <= GROUPS.index(r["privilege"]), rules)) resource = data["resource"] rules = list(filter(lambda r: eval(r["limit"], {"resource": resource}), rules)) return bool(rules)
[ 1171, 446 ]
def METHOD_NAME(*, x_ms_client_request_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop("template_url", "/azurespecials/overwrite/x-ms-client-request-id/via-param/method/") # Construct headers _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs)
[ 56, 49, 19, 377 ]
def METHOD_NAME(test): # pylint: disable=missing-function-docstring level_all = test.get_measurement('level_all') assert level_all.value == 9 test_attachment = test.get_attachment('test_attachment') assert test_attachment.data == b'This is test attachment data.' lots_of_dims = test.get_measurement('lots_of_dims') assert lots_of_dims.value.value == [ (1, 21, 101, 123), (2, 22, 102, 126), (3, 23, 103, 129), (4, 24, 104, 132), ] test.logger.info('Pandas datafram of lots_of_dims \n:%s', lots_of_dims.value.to_dataframe())
[ 689 ]
def METHOD_NAME(item): """ Get the file_on_disk url """ return generate_storage_url("{}.{}".format(item["checksum"], item["file_format"]))
[ 404, 948, 274 ]
def METHOD_NAME(): raise AttributeError("dummy exception")
[ 241, 168 ]
def METHOD_NAME(self, card, comment=''): """ Adds a CSHEAR card from ``BDF.add_card(...)`` Parameters ---------- card : BDFCard() a BDFCard object comment : str; default='' a comment for the card """ eid = integer(card, 1, 'eid') pid = integer_or_blank(card, 2, 'pid', eid) nids = [integer_or_blank(card, 3, 'n1'), integer_or_blank(card, 4, 'n2'), integer_or_blank(card, 5, 'n3'), integer_or_blank(card, 6, 'n4')] assert len(card) <= 7, 'len(CSHEAR card) = %i\ncard=%s' % (len(card), card) self.add(eid, pid, nids)
[ 238, 5427 ]
def METHOD_NAME( self, update: object ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, Dict[Any, Any]]]]]]: """Determines whether an update should be passed to this handler's :attr:`callback`. Args: update (:class:`telegram.Update` | :obj:`object`): Incoming update. Returns: :obj:`list`: The list of args for the handler. """ if isinstance(update, Update) and update.effective_message: message = update.effective_message if message.text: text_list = message.text.split() if text_list[0].lower() not in self.commands: return None filter_result = self.filters.METHOD_NAME(update) if filter_result: return text_list[1:], filter_result return False return None
[ 250, 86 ]
def METHOD_NAME(): r = minimize(func, (1.5, 1.7, 1.5), method="simplex", tol=1e-4) assert r.success assert_allclose(r.x, (0, 1, 2), atol=2e-3)
[ 9, 8135 ]
def METHOD_NAME(header): includes = [] with open(header, 'r', encoding = 'utf-8') as file: for line in file: if not line.startswith('#include'): continue match = include_parser.match(line) if match: includes.append(match.group(1)) return normalized_paths(includes)
[ 3403, 280, 171 ]
def METHOD_NAME(self): return self.scene().participants_dict[self.model_item.METHOD_NAME]
[ 3836 ]
def METHOD_NAME(in_wrapper): print('print_parameters') in_wrapper.on_post_processing_delegate.remove_callable(METHOD_NAME) # Print the ramp points directly print('heightramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('heightramp'))) heightramp_data = in_wrapper.get_float_ramp_parameter_points('heightramp') if not heightramp_data: print('\tNone') else: for idx, point_data in enumerate(heightramp_data): print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format( idx, point_data.position, point_data.value, point_data.interpolation )) print('colorramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('colorramp'))) colorramp_data = in_wrapper.get_color_ramp_parameter_points('colorramp') if not colorramp_data: print('\tNone') else: for idx, point_data in enumerate(colorramp_data): print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format( idx, point_data.position, point_data.value, point_data.interpolation )) # Print all parameter values param_tuples = in_wrapper.get_parameter_tuples() print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0)) if param_tuples: for param_tuple_name, param_tuple in param_tuples.items(): print('parameter tuple name: {}'.format(param_tuple_name)) print('\tbool_values: {}'.format(param_tuple.bool_values)) print('\tfloat_values: {}'.format(param_tuple.float_values)) print('\tint32_values: {}'.format(param_tuple.int32_values)) print('\tstring_values: {}'.format(param_tuple.string_values)) if not param_tuple.float_ramp_points: print('\tfloat_ramp_points: None') else: print('\tfloat_ramp_points:') for idx, point_data in enumerate(param_tuple.float_ramp_points): print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format( idx, point_data.position, point_data.value, point_data.interpolation )) if not param_tuple.color_ramp_points: print('\tcolor_ramp_points: None') else: print('\tcolor_ramp_points:') for idx, point_data in enumerate(param_tuple.color_ramp_points): print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format( idx, point_data.position, point_data.value, point_data.interpolation ))
[ 38, 386 ]
def METHOD_NAME(self, num_replicas=1): config = super().METHOD_NAME(num_replicas=num_replicas) return config_util.merge_config( config, { "infer": { "length_bucket_width": 1 # To ensure fixed length in each batch. } }, )
[ 803, 200 ]
METHOD_NAME( self ) :
[ 9, 750 ]
def METHOD_NAME(user_metadata, info, username, email): from allaccess.compat import get_user_model User = get_user_model() user_metadata.update({ User.USERNAME_FIELD: email, }) return user_metadata
[ 1053, 21, 773 ]
def METHOD_NAME(): bpy.utils.unregister_class(VectorPolarInNode)
[ 2468 ]
def METHOD_NAME(self, principal, password, options=None, env=None): args = ["kinit", principal] if options: args.extend(options) return self._run_in_env(args, password.encode('utf-8'), env)
[ 10819 ]
def METHOD_NAME(self): if self.codebook_indices is None: from itertools import product p = [range(self.num_vars)] * self.groups inds = list(product(*p)) self.codebook_indices = torch.tensor( inds, dtype=torch.long, device=self.vars.device ).flatten() if not self.combine_groups: self.codebook_indices = self.codebook_indices.view( self.num_vars ** self.groups, -1 ) for b in range(1, self.groups): self.codebook_indices[:, b] += self.num_vars * b self.codebook_indices = self.codebook_indices.flatten() return self.codebook_indices
[ 19, 10543, 1894 ]
def METHOD_NAME(line): """Returns (job_id, location) from matched log.""" job_id_pattern = re.compile(_DATAFLOW_JOB_ID_PATTERN) matched_job_id = job_id_pattern.search(line or '') if matched_job_id: return ( matched_job_id.group('job_id').decode(), matched_job_id.group('location').decode(), ) return (None, None)
[ 297, 202, 147, 61, 708 ]
def METHOD_NAME(tmp_path, video_clip): x1, x2, y1, y2 = 0, 50, 0, 100 video_clip.set_bbox(x1, x2, y1, y2) file = video_clip.crop(dest_folder=str(tmp_path)) vid = VideoWriter(file) assert vid.dimensions == (x2 - x1, y2 - y1)
[ 9, 797, 712 ]
def METHOD_NAME(): """Pop-up the existing menu near the mouse cursor.""" menu = _get_menu() cursor = QtGui.QCursor() point = cursor.pos() menu.exec_(point)
[ 2102 ]
def METHOD_NAME(): """ Retrieves and return the Mattermost's configured hook :return: String: the hook string """ hook = __salt__["config.get"]("mattermost.hook") or __salt__["config.get"]( "mattermost:hook" ) if not hook: raise SaltInvocationError("No Mattermost Hook found") return hook
[ 19, 1021 ]
METHOD_NAME(self, show_key, **kwargs):
[ 19, 561, 8351 ]
def METHOD_NAME(backend): # fix seed to use the same samples in every execution np.random.seed(123) obs0 = 2 * Z(0) * Z(1) + Z(0) * Z(2) obs1 = 2 * Z(0) * Z(1) + Z(0) * Z(2) * I(3) h_sym = hamiltonians.SymbolicHamiltonian(obs0, backend=backend) h_dense = hamiltonians.Hamiltonian(3, h_sym.matrix, backend=backend) h1 = hamiltonians.SymbolicHamiltonian(obs1, backend=backend) c = Circuit(4) c.add(gates.RX(0, np.random.rand())) c.add(gates.RX(1, np.random.rand())) c.add(gates.RX(2, np.random.rand())) c.add(gates.RX(3, np.random.rand())) c.add(gates.M(0, 1, 2)) nshots = 10**5 result = backend.execute_circuit(c, nshots=nshots) expval_sym = result.expectation_from_samples(h_sym) expval_dense = result.expectation_from_samples(h_dense) expval = h1.expectation(result.state()) backend.assert_allclose(expval_sym, expval_dense) backend.assert_allclose(expval_sym, expval, atol=10 / np.sqrt(nshots))
[ 9, 2908, 280, 700 ]
def METHOD_NAME(self) -> Optional[str]: """ parent path to the subvolume """ return pulumi.get(self, "parent_path")
[ 935, 157 ]
def METHOD_NAME(self, message, level="info", **kw): """Add status message to the messages list """ record = dict(message=message, level=level, **kw) self.add_record_to("messages", record)
[ 238, 452, 277 ]
def METHOD_NAME(path: str, frames: Iterable, shared_filesystem: bool, gds=False) -> dict: """Write frames to disk Parameters ---------- path: str File path frames: Iterable The frames to write to disk shared_filesystem: bool Whether the target filesystem is shared between all workers or not. If True, the filesystem must support the `os.link()` operation. gds: bool Enable the use of GPUDirect Storage. Notice, the consecutive `disk_read()` must enable GDS as well. Returns ------- header: dict A dict of metadata """ cuda_frames = tuple(hasattr(f, "__cuda_array_interface__") for f in frames) if gds and any(cuda_frames): import kvikio # Write each frame consecutively into `path` in parallel with kvikio.CuFile(path, "w") as f: file_offsets = itertools.accumulate(map(nbytes, frames), initial=0) futures = [f.pwrite(b, file_offset=o) for b, o in zip(frames, file_offsets)] for each_fut in futures: each_fut.get() else: with open(path, "wb") as f: os.writev(f.fileno(), frames) # type: ignore return { "method": "stdio", "path": SpillToDiskFile(path), "frame-lengths": tuple(map(nbytes, frames)), "shared-filesystem": shared_filesystem, "cuda-frames": cuda_frames, }
[ 113, 77 ]
def METHOD_NAME(pipeline_uuid: str) -> str: content = None trigger_file_path = get_triggers_file_path(pipeline_uuid) if os.path.exists(trigger_file_path): with open(trigger_file_path) as fp: content = fp.read() return content
[ 557, 6927, 171, 459 ]
def METHOD_NAME(self): Business.objects.create(cc_id=5, cc_name="name_5", cc_owner="owner", cc_company="company") with patch(CORE_MODEL_PROJECT_SYNC_PROJECT, MagicMock(side_effect=IntegrityError)): project.sync_projects_from_cmdb("user") Project.objects.update_business_project_status.assert_called_once_with( archived_cc_ids=set(), active_cc_ids={1, 4, 5} ) self.assertEqual(len(Project.objects.all()), 0) project.sync_projects_from_cmdb("user") self.assertEqual(len(Project.objects.all()), 3)
[ 9, 164, 2847, 280, -1, 155, 1985 ]
METHOD_NAME(self):
[ 19, 1458, 1318 ]
async def METHOD_NAME(init_scheduler): from nonebot_plugin_saa import TargetQQGroup from nonebot_bison.types import Target as TTarget from nonebot_bison.config.db_config import SubscribeDupException, config await config.add_subscribe( TargetQQGroup(group_id=123), target=TTarget("weibo_id"), target_name="weibo_name", platform_name="weibo", cats=[], tags=[], ) with pytest.raises(SubscribeDupException): await config.add_subscribe( TargetQQGroup(group_id=123), target=TTarget("weibo_id"), target_name="weibo_name", platform_name="weibo", cats=[], tags=[], )
[ 9, 238, 2976, 1066 ]
def METHOD_NAME(dut,shell='sonic', skip_error_check=True, **kwargs): """ To perform config save. Author : Prudvi Mangadu ([email protected]) :param dut: single or list of duts :return: """ cli_type = kwargs.get('cli_type', st.get_ui_type(dut,**kwargs)) cli_type = 'klish' if cli_type in ['rest-put','rest-patch'] else cli_type dut_li = list(dut) if isinstance(dut, list) else [dut] st.log("Performing config save", dut=dut) if shell == 'sonic': command = 'config save -y' [retvals, exceps] = utils.exec_foreach(True, dut_li, st.config, command) if shell == "vtysh" or cli_type == 'click': command = 'do copy running-config startup-config' [retvals, exceps] = utils.exec_foreach(True, dut_li, st.config, command, type="vtysh", skip_error_check=skip_error_check) if cli_type == 'klish': #Need to execute write mem in case of klish also. Once all klish conversion is complete, only one command will be executed. command = "do write memory" [retvals, exceps] = utils.exec_foreach(True, dut_li, st.config, command, type=cli_type, skip_error_check=skip_error_check) st.debug([retvals, exceps]) return True
[ 200, 73 ]
def METHOD_NAME(process): reEmul(process, useStatic=True, ntuple=False) return process
[ 1259, 7274, 628 ]
def METHOD_NAME(self): repMap = self.getRepMap() parameters = "root://eoscms//eos/cms" + ",root://eoscms//eos/cms".join(repMap["resultFiles"]) mergedoutputfile = "root://eoscms//eos/cms%(finalResultFile)s"%repMap return ('root -x -b -q -l "TkAlOfflineJobsMerge.C(\\\"' +parameters+'\\\",\\\"'+mergedoutputfile+'\\\")"')
[ 1459, 24, 411 ]
def METHOD_NAME(self): self._bell = True
[ 1241 ]