text
stringlengths
15
7.82k
ids
sequencelengths
1
7
async def METHOD_NAME(self, ctx: Context): record = await Alert.filter(active=True).order_by("-created_at").first() if not record: return if await record.reads.filter(user_id=ctx.author.id).exists(): return user_prompts = await record.prompts.filter(user_id=ctx.author.id).order_by("-prompted_at") if len(user_prompts) >= 3: return if user_prompts and user_prompts[0].prompted_at > (ctx.bot.current_time - timedelta(minutes=5)): return _e = discord.Embed( color=self.bot.color, title="You have an unread alert!", description="Click `Read Now` to read it." ) _e.set_thumbnail(url="https://cdn.discordapp.com/attachments/851846932593770496/1031240353489109112/alert.gif") v = PromptView(ctx, record) v.message = await ctx.message.reply(embed=_e, view=v) prompt = await Prompt.create(user_id=ctx.author.id) await record.prompts.add(prompt)
[ 69, 462, 1323 ]
def METHOD_NAME(include_links=False): devices = glob.glob('/dev/ttyS*') # built-in serial ports devices.extend(glob.glob('/dev/ttyUSB*')) # usb-serial with own driver devices.extend(glob.glob('/dev/ttyXRUSB*')) # xr-usb-serial port exar (DELL Edge 3001) devices.extend(glob.glob('/dev/ttyACM*')) # usb-serial with CDC-ACM profile devices.extend(glob.glob('/dev/ttyAMA*')) # ARM internal port (raspi) devices.extend(glob.glob('/dev/rfcomm*')) # BT serial devices devices.extend(glob.glob('/dev/ttyAP*')) # Advantech multi-port serial controllers if include_links: devices.extend(list_ports_common.list_links(devices)) return [info for info in [SysFS(d) for d in devices] if info.subsystem != "platform"] # hide non-present internal serial ports
[ -1 ]
def METHOD_NAME(self): pretty = Pretty(snippets.PYTHON_DICT, justify="center") self.console.print(pretty)
[ 104, 885, 16314, 1262 ]
def METHOD_NAME(self) -> "Traversable": """Return a Traversable object for the loaded package."""
[ 1537 ]
def METHOD_NAME(h=None): """Generates a random color in RGB format.""" if not h: h = random.random() s = 0.5 l = 0.5 return _hls2hex(h, l, s)
[ 236, 36 ]
def METHOD_NAME(self, X, _transform=False): """ Fit the model with X. Parameters ---------- X : dask cuDF input """ n_cols = X.shape[1] data = DistributedDataHandler.create(data=X, client=self.client) self.datatype = data.datatype if "svd_solver" in self.kwargs and self.kwargs["svd_solver"] == "tsqr": comms = Comms(comms_p2p=True) else: comms = Comms(comms_p2p=False) comms.init(workers=data.workers) data.calculate_parts_to_sizes(comms) worker_info = comms.worker_info(comms.worker_addresses) parts_to_sizes, _ = parts_to_ranks( self.client, worker_info, data.gpu_futures ) total_rows = data.total_rows models = dict( [ ( data.worker_info[wf[0]]["rank"], self.client.submit( self._create_model, comms.sessionId, self._model_func, self.datatype, **self.kwargs, pure=False, workers=[wf[0]], ), ) for idx, wf in enumerate(data.worker_to_parts.items()) ] ) pca_fit = dict( [ ( wf[0], self.client.submit( DecompositionSyncFitMixin._func_fit, models[data.worker_info[wf[0]]["rank"]], wf[1], total_rows, n_cols, parts_to_sizes, data.worker_info[wf[0]]["rank"], _transform, pure=False, workers=[wf[0]], ), ) for idx, wf in enumerate(data.worker_to_parts.items()) ] ) wait(list(pca_fit.values())) raise_exception_from_futures(list(pca_fit.values())) comms.destroy() self._set_internal_model(list(models.values())[0]) if _transform: out_futures = flatten_grouped_results( self.client, data.gpu_futures, pca_fit ) return to_output(out_futures, self.datatype) return self
[ 90 ]
def METHOD_NAME(self): clear_mappers()
[ 1843, 9 ]
def METHOD_NAME(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t in integer_types: return str(value).encode("ascii") return b"!" + pickle.dumps(value)
[ 278, 279 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "resourceGuardsName", self.ctx.args.resource_guard_name, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(self,rows,description): for col in description: colname = col[0] datatype = col[1] self.assertTrue(datatypes[datatype]) for row in rows: for col in row: foo = "" + str(col)
[ 1162, 51 ]
METHOD_NAME(self):
[ 7354, 452 ]
f METHOD_NAME(self):
[ 9, 5326 ]
def METHOD_NAME(self): test_files = [TEST_SVG_A] self.write_svgs(test_files) output = self.combine_svgs(test_files) self.assert_expected_xml(output, test_files)
[ 9, 2003, 97, 10129 ]
def METHOD_NAME(index): # Load options import yaml with open('../setup.yaml', 'r') as infile: setup_options = yaml.safe_load(infile) transformations = enumerate_transformations() print(f'Running transformation {index} / {len(transformations)}') # Select appropriate index transformation = transformations[index] override_string = [ f'{key}:{value}' for key, value in transformation.items() ] from perses.app.setup_relative_calculation import METHOD_NAME print(override_string) METHOD_NAME(yaml_filename=setup_options['perses_yaml_template'], override_string=override_string)
[ 22 ]
def METHOD_NAME(evt: Event, var: GameState, player: User, old_role: Optional[str]): if old_role == "bodyguard" and evt.data["role"] != "bodyguard": if player in GUARDED: guarded = GUARDED.pop(player) guarded.send(messages["protector_disappeared"])
[ 69, 80, 1018 ]
def METHOD_NAME(minion_id, pillar, command): """ Read in the generated libvirt keys """ key_dir = os.path.join(__opts__["pki_dir"], "libvirt", minion_id) cacert = os.path.join(__opts__["pki_dir"], "libvirt", "cacert.pem") if not os.path.isdir(key_dir): # No keys have been generated gen_hyper_keys( minion_id, pillar.get("ext_pillar_virt.country", "US"), pillar.get("ext_pillar_virt.st", "Utah"), pillar.get("ext_pillar_virt.locality", "Salt Lake City"), pillar.get("ext_pillar_virt.organization", "Salted"), pillar.get("ext_pillar_virt.expiration_days", "365"), ) ret = {} for key in os.listdir(key_dir): if not key.endswith(".pem"): continue fn_ = os.path.join(key_dir, key) with salt.utils.files.fopen(fn_, "r") as fp_: ret["libvirt.{}".format(key)] = salt.utils.stringutils.to_unicode( fp_.read() ) with salt.utils.files.fopen(cacert, "r") as fp_: ret["libvirt.cacert.pem"] = salt.utils.stringutils.to_unicode(fp_.read()) return ret
[ 1661, 5005 ]
async def METHOD_NAME(request: web.Request) -> web.Response: query_params = parse_request_query_parameters_as(SearchFilesQueryParams, request) log.debug( "received call to search_files_starting_with with %s", f"{query_params=}", ) dsm = cast( SimcoreS3DataManager, get_dsm_provider(request.app).get(SimcoreS3DataManager.get_location_id()), ) data: list[FileMetaData] = await dsm.METHOD_NAME( query_params.user_id, prefix=query_params.startswith ) log.debug("Found %d files starting with '%s'", len(data), query_params.startswith) return web.json_response( {"data": [jsonable_encoder(FileMetaDataGet.from_orm(d)) for d in data]}, dumps=json_dumps, )
[ 1070, 1537, 8466, 41 ]
def METHOD_NAME(self, other): """Boolean expression. Returns true if the column overlaps (has points in common with) the right hand operand. """ return self.expr.op('&&')(other)
[ 2740 ]
def METHOD_NAME(msg): server_log.write(f"{datetime.now().strftime('%H:%M:%S.%f')} | {msg}\n") server_log.flush()
[ -1 ]
METHOD_NAME(self):
[ 9, 238, 598, 44 ]
def METHOD_NAME(uri): """Removes password from URI but keeps username.""" p = urlsplit(uri) if p.password is not None: nl = p.hostname if p.username is not None: nl = f"{p.username}@{nl}" if p.port is not None: nl += f":{p.port}" p = p._replace(netloc=nl) return p.geturl()
[ 1360, 2897 ]
def METHOD_NAME(self): try: class Foo(object): @PropertySubSlots def spam(self): """Trying to copy this docstring will raise an exception""" return 1 except AttributeError: pass else: raise Exception("AttributeError not raised")
[ 9, 2827, 2573, 215, 442 ]
def METHOD_NAME(self): """ This method is to be overridden by the custom test case classes that inherit from the TethysTestCase class and is used to perform any set up that is applicable to every test function that is defined within the custom test class Return: None """ pass
[ 0, 1 ]
async def METHOD_NAME(self) -> None: """Load available profiles.""" for content in self.sys_config.path_apparmor.iterdir(): if not content.is_file(): continue self._profiles.add(content.name) _LOGGER.info("Loading AppArmor Profiles: %s", self._profiles) # Load profiles if self.available: for profile_name in self._profiles: try: await self._load_profile(profile_name) except HostAppArmorError: pass else: _LOGGER.warning("AppArmor is not enabled on host")
[ 557 ]
def METHOD_NAME(self, request, METHOD_NAME): return paths_filtered_by_status(request, METHOD_NAME)
[ 295 ]
def METHOD_NAME(moon_dataset): """tests saving and loading""" embedder = ParametricUMAP() embedding = embedder.fit_transform(moon_dataset) # completes successfully assert embedding is not None assert embedding.shape == (moon_dataset.shape[0], 2) # Portable tempfile model_path = tempfile.mkdtemp(suffix="_umap_model") embedder.save(model_path) loaded_model = load_ParametricUMAP(model_path) assert loaded_model is not None loaded_embedding = loaded_model.transform(moon_dataset) assert_array_almost_equal( embedding, loaded_embedding, decimal=5, err_msg="Loaded model transform fails to match original embedding", )
[ 9, 73, 557 ]
def METHOD_NAME(self, context, layout): #pass col = layout.column(align=True) row = col.row(align=True) row.prop(self, 'mode', expand=True) row = col.row(align=True) row.prop(self, 'regime', expand=True) if self.regime == 'G': row = col.row(align=True) row.prop(self, 'direction', expand=True) col.prop(self, 'defgrid')
[ 1100, 1409 ]
def METHOD_NAME(self): for tmp_dir, repo, pathfunc in parameterize(): filename1 = "test1.txt" filename2 = "test2.txt" filename1 = os.path.join(tmp_dir, filename1) filename2 = os.path.join(tmp_dir, filename2) write_text = "text" with io.open(filename1, "w", encoding="utf-8", newline="\n") as fh: fh.write(write_text) self.assertTrue(os.path.exists(filename1)) self.assertFalse(os.path.exists(filename2)) if repo is not None: self.assertFalse( list(repo.index.iter_blobs(BlobFilter(filename2))) ) fio.copy_file(pathfunc(filename1), pathfunc(filename2)) self.assertTrue(os.path.exists(filename1)) self.assertTrue(os.path.exists(filename2)) if repo is not None: self.assertTrue( list(repo.index.iter_blobs(BlobFilter(filename2))) ) read_text = "" with io.open(filename2, "r", encoding="utf-8") as fh: read_text = fh.read() self.assertEqual(write_text, read_text) if repo is not None: blob = next(repo.index.iter_blobs(BlobFilter(filename2)))[1] read_text = blob.data_stream[3].read().decode("utf-8") self.assertEqual(write_text, read_text)
[ 9, 215, 171 ]
async def METHOD_NAME( app, *api_path, method='get', noauth=False, bypass_proxy=False, **kwargs ): """Make an API request""" if bypass_proxy: # make a direct request to the hub, # skipping the proxy base_url = app.hub.url else: base_url = public_url(app, path='hub') headers = kwargs.setdefault('headers', {}) if 'Authorization' not in headers and not noauth and 'cookies' not in kwargs: # make a copy to avoid modifying arg in-place kwargs['headers'] = h = {} h.update(headers) h.update(auth_header(app.db, kwargs.pop('name', 'admin'))) url = ujoin(base_url, 'api', *api_path) if 'cookies' in kwargs: # for cookie-authenticated requests, # add _xsrf to url params if "_xsrf" in kwargs['cookies'] and not noauth: url = url_concat(url, {"_xsrf": kwargs['cookies']['_xsrf']}) f = getattr(async_requests, method) if app.internal_ssl: kwargs['cert'] = (app.internal_ssl_cert, app.internal_ssl_key) kwargs["verify"] = app.internal_ssl_ca resp = await f(url, **kwargs) assert "frame-ancestors 'self'" in resp.headers['Content-Security-Policy'] assert ( ujoin(app.hub.base_url, "security/csp-report") in resp.headers['Content-Security-Policy'] ) assert 'http' not in resp.headers['Content-Security-Policy'] if not kwargs.get('stream', False) and resp.content: assert resp.headers.get('content-type') == 'application/json' return resp
[ 58, 377 ]
def METHOD_NAME(value): if hasattr(value, 'text'): value = value.text if isinstance(value, int): return value elif isinstance(value, (float, Decimal)): return int(value) elif isinstance(value, str) and value.startswith("0x"): return int(value, base=16) else: raise TypeError('invalid type {!r} for integer'.format(type(value)))
[ 696, 24, 962 ]
def METHOD_NAME(head): return \
[ 4556, 1010, 4742, 227, 1796 ]
def METHOD_NAME(monkeypatch, mock_proposals): async def mock_db_call(*args, **kwargs): return mock_proposals monkeypatch.setattr(EventDb, "fetch_proposals", mock_db_call)
[ 248, 1267, 1047, 4528 ]
def METHOD_NAME(): op.rename_table('band', 'guest_group') op.rename_table('band_bio', 'guest_bio') if is_sqlite: with op.batch_alter_table('guest_bio', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_bio', 'band_id', new_column_name='guest_id') op.rename_table('band_charity', 'guest_charity') if is_sqlite: with op.batch_alter_table('guest_charity', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_charity', 'band_id', new_column_name='guest_id') op.rename_table('band_info', 'guest_info') if is_sqlite: with op.batch_alter_table('guest_info', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_info', 'band_id', new_column_name='guest_id') op.rename_table('band_merch', 'guest_merch') if is_sqlite: with op.batch_alter_table('guest_merch', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_merch', 'band_id', new_column_name='guest_id') op.rename_table('band_panel', 'guest_panel') if is_sqlite: with op.batch_alter_table('guest_panel', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_panel', 'band_id', new_column_name='guest_id') op.rename_table('band_stage_plot', 'guest_stage_plot') if is_sqlite: with op.batch_alter_table('guest_stage_plot', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_stage_plot', 'band_id', new_column_name='guest_id') op.rename_table('band_taxes', 'guest_taxes') if is_sqlite: with op.batch_alter_table('guest_taxes', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.alter_column('band_id', new_column_name='guest_id') else: op.alter_column('guest_taxes', 'band_id', new_column_name='guest_id')
[ 738 ]
def METHOD_NAME(self): options = [ self.define("BLAS_LIBRARIES", self.spec["blas"].libs.joined(";")), self.define("LAPACK_LIBRARIES", self.spec["lapack"].libs.joined(";")), self.define_from_variant("BUILD_SHARED_LIBS", "shared"), self.define_from_variant("PLASMA_DETECT_LUA", "lua"), ] for package, provider in ( ("openblas", "openblas"), ("intel-mkl", "mkl"), ("netlib-lapack", "netlib"), ): if package in self.spec: for lib in ("CBLAS", "LAPACKE"): options.append(self.define("{}_PROVIDER".format(lib), provider)) if "cray-libsci" in self.spec: for lib in ("CBLAS", "LAPACKE"): libsci_prefix = self.spec["cray-libsci"].package.external_prefix options.append(self.define("{}_PROVIDER".format(lib), "generic")) options.append( self.define("{}_INCLUDE_DIRS".format(lib), join_path(libsci_prefix, "include")) ) options.append( self.define("{}_LIBRARIES".format(lib), self.spec["blas"].libs.joined(";")) ) options.append(self.define("CBLAS_ADD_TYPEDEF", True)) return options
[ 334, 335 ]
def METHOD_NAME( self, mock__get_client, mock_has_client_policy, mock_get_action_value ): """Google Authenticator url with default issuer and label with this empty setting - the tokenissuer should become 'LinOTP' and - the tokenlabel should become the serial using hmac non defaults: SHA256, 8 digits """ mock__get_client.return_value = "localhost" mock_has_client_policy.return_value = {} mock_get_action_value.return_value = "" param = { "hashlib": "SHA256", "otpkey": "cc5bad98a76279171a08a5d18fd400e748945c2b", "serial": "HOTP1234", "otplen": "8", "type": "hmac", } url = create_google_authenticator(param=param) assert url.startswith("otpauth://hotp/LinOTP:HOTP1234?") assert "counter=0" in url assert "digits=8" in url assert "algorithm=SHA256" in url param["user.login"] = "hugo" url = create_google_authenticator(param=param) assert url.startswith("otpauth://hotp/LinOTP:hugo?") param["user.realm"] = "realm" url = create_google_authenticator(param=param) assert url.startswith("otpauth://hotp/LinOTP:hugo?") param["description"] = "description" url = create_google_authenticator(param=param) assert url.startswith("otpauth://hotp/LinOTP:hugo?")
[ 9, 466, 636, 5500, 235 ]
def METHOD_NAME(self, doc): if hasattr(self.field, "__doc__") and self.field.__doc__: doc.write(self.field.__doc__ + "\n\n") # pragma: no cover
[ 24, 2092, 791, 2573 ]
def METHOD_NAME(self): """ check for convergence Args: self (object): create by prep Returns: converged?: True if converged, False otherwise """ primal_gap = self._compute_primal_convergence() dual_gap = self._compute_dual_residual() self.prev_xbars = self._get_xbars() ret_val = max(primal_gap, dual_gap) <= self.convergence_threshold if self._verbose and self._rank == 0: print(f"primal gap = {round(primal_gap, 5)}, dual gap = {round(dual_gap, 5)}") if ret_val: print("Dual convergence check passed") else: print("Dual convergence check failed " f"(requires primal + dual gaps) <= {self.convergence_threshold}") if self.tracking and self._rank == 0: self.tracker.add_row([self._ph._PHIter, primal_gap, dual_gap]) self.tracker.write_out_data() return ret_val
[ 137, 6880 ]
def METHOD_NAME(self): return b''.join(bytes(k) for k in self.chunks())
[ 2522 ]
f METHOD_NAME(self):
[ 9, 1571, 3209 ]
def METHOD_NAME(self): p = self.Kind() p.pack1(Gtk.Button()) p.pack2(Gtk.Button()) p.set_relative(0.75) self.failUnlessAlmostEqual(p.get_relative(), 0.75) with visible(p, width=200, height=200) as p: self.failUnlessAlmostEqual(p.get_relative(), 0.75, 2)
[ 9, 2999, 709, 102, 2189 ]
def METHOD_NAME(self, class_, attr, schema): if not re.match('^[A-Za-z0-9_.-]+(\\.%l)?$', schema) \ or schema in {'.', '..'}: self.bad_filename_schemas.append("%s.%s" % (class_, attr))
[ 250, 1147, 135 ]
def METHOD_NAME(self): if self.instance.enabled: self.instance.enabled = False self.instance.save() return True
[ 193 ]
def METHOD_NAME( self, mock_PipelineTask): component_op(input1='hello', input2=100) mock_PipelineTask.assert_called_once_with( component_spec=component_op.component_spec, args={ 'input1': 'hello', 'input2': 100, })
[ 9, 3974, 1007, -1, 134, 41, 235 ]
def METHOD_NAME(): '''Test parameters that validate should accept.''' _, invoke = get_invoke("test27_loop_swap.f90", "gocean1.0", name="invoke_loop1") schedule = invoke.schedule my_rt = MyRegionTrans() # Check that correct ordering works: node_list = [schedule.children[0], schedule.children[1], schedule.children[2]] my_rt.validate(node_list) # Check that a single Node is accepted my_rt.validate(schedule.children[0]) # Check that a single Schedule is accepted my_rt.validate(schedule)
[ 9, 187, 1217 ]
def METHOD_NAME(graph: Graph) -> Document: parsed_fields: Dict[str, Any] = dict() logger = Logger() creation_info, doc_node = parse_creation_info(graph) parsed_fields["creation_info"] = creation_info for element, triple, parsing_method in [ ("packages", (None, RDF.type, SPDX_NAMESPACE.Package), parse_package), ("files", (None, RDF.type, SPDX_NAMESPACE.File), parse_file), ("snippets", (None, RDF.type, SPDX_NAMESPACE.Snippet), parse_snippet), ]: elements = [] for element_node, _, _ in get_correctly_typed_triples(logger, graph, *triple): try: elements.append(parsing_method(element_node, graph, creation_info.document_namespace)) except SPDXParsingError as err: logger.extend(err.get_messages()) parsed_fields[element] = elements for element, triple, parsing_method in [ ("annotations", (None, SPDX_NAMESPACE.annotation, None), parse_annotation), ("relationships", (None, SPDX_NAMESPACE.relationship, None), parse_relationship), ]: elements = [] for parent_node, _, element_node in graph.triples(triple): try: elements.append(parsing_method(element_node, graph, parent_node, creation_info.document_namespace)) except SPDXParsingError as err: logger.extend(err.get_messages()) parsed_fields[element] = elements for triple, relationship_type in [ ((None, SPDX_NAMESPACE.hasFile, None), RelationshipType.CONTAINS), ((None, SPDX_NAMESPACE.describesPackage, None), RelationshipType.DESCRIBES), ]: for parent_node, _, element_node in get_correctly_typed_triples(logger, graph, *triple): try: relationship = parse_implicit_relationship( parent_node, relationship_type, element_node, graph, creation_info.document_namespace ) if relationship not in parsed_fields["relationships"]: parsed_fields["relationships"].append(relationship) except SPDXParsingError as err: logger.extend(err.get_messages()) extracted_licensing_infos = [] for _, _, extracted_licensing_info_node in get_correctly_typed_triples( logger, graph, None, SPDX_NAMESPACE.hasExtractedLicensingInfo ): try: extracted_licensing_infos.append( parse_extracted_licensing_info(extracted_licensing_info_node, graph, creation_info.document_namespace) ) except SPDXParsingError as err: logger.extend(err.get_messages()) parsed_fields["extracted_licensing_info"] = extracted_licensing_infos raise_parsing_error_if_logger_has_messages(logger) document = construct_or_raise_parsing_error(Document, parsed_fields) return document
[ 711, 303, 24, 352 ]
def METHOD_NAME(self) -> None: code = """a = 1 #XXX """ with self.assertAddsMessages( MessageTest(msg_id="fixme", line=2, args="XXX", col_offset=17) ): self.checker.process_tokens(_tokenize_str(code))
[ 9, 10150, 529, 173 ]
def METHOD_NAME(self) -> int: return int(self._time_elapsed)
[ 104, 3229 ]
def METHOD_NAME(self, model): """ Call Block Triangularization solver on model. """ if self.config.block_solver is not None: solver = SolverFactory(self.config.block_solver) solver.options.update(self.config.block_solver_options) else: solver = get_solver(options=self.config.block_solver_options) if model.is_indexed(): for d in model.values(): self._solve_block_data(d, solver) else: self._solve_block_data(model, solver)
[ 7319, 2266 ]
def METHOD_NAME(patternA, patternB, expectedPattern): pattern = patternIntersect(patternA, patternB) assert expectedPattern == pattern
[ 9, 652, 3801 ]
def METHOD_NAME(item) -> Mapping: mapping = {} for item_key in item_keys: try: mapping[item_key] = item[item_key] except (AttributeError, KeyError): pass try: # get datetime in UTC dt = datetime.utcfromtimestamp(timegm(item.published_parsed)) # make sure that the output string is labeled as UTC dt_tz = dt.replace(tzinfo=pytz.UTC) mapping["published"] = dt_tz.isoformat() except (AttributeError, KeyError): pass return mapping
[ 197, 1024, 24, 445 ]
def METHOD_NAME(self, name, procs, db): # Run time on one proc on machine with scale factor == 1.0 return 35.0
[ 22, 104 ]
def METHOD_NAME(self) -> int: return self._fock.METHOD_NAME
[ 293, 4085 ]
def METHOD_NAME(stat): return (_W_INT(stat)&0177777)
[ -1 ]
def METHOD_NAME(tmp_path) -> str: n = 4 X = np.random.rand(n) y = np.random.rand(n) clf = DummyClassifier(strategy="prior") clf.fit(X, y) METHOD_NAME = os.path.join(tmp_path, "sklearn-model.joblib") joblib.dump(clf, METHOD_NAME) return METHOD_NAME
[ 578, 354 ]
def METHOD_NAME(): return _num_samples
[ 181, 700 ]
def METHOD_NAME(self): if len(self.eval_splits) == 1: return self.plugin.prepare_dataloader(self.dataset["test"], batch_size=self.eval_batch_size) elif len(self.eval_splits) > 1: return [ self.plugin.prepare_dataloader(self.dataset[x], batch_size=self.eval_batch_size) for x in self.eval_splits ]
[ 9, 568 ]
def METHOD_NAME(self): """Verify that loop_setup with offset and size specified works as expected""" # now test with the offset and size succ, self.loop = BlockDev.loop_setup(self.dev_file, 10 * 1024**2, 50 * 1024**2) self.assertTrue(succ) self.assertTrue(self.loop) # should have size as specified with open("/sys/block/%s/size" % self.loop, "r") as f: size = int(f.read()) * 512 self.assertEqual(size, 50 * 1024**2) succ = BlockDev.loop_teardown(self.loop) self.assertTrue(succ)
[ 9, 1751, 102, 41, 1540, 61, 1318 ]
def METHOD_NAME(self): t = testRepository(1) while not t.isReadyForCheck(): pass returnable = t.checkTest() from GangaCore.Core.InternalServices.Coordinator import enableInternalServices, disableInternalServices disableInternalServices() enableInternalServices() return returnable
[ 9, 17354 ]
def METHOD_NAME(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = self._conv_forward(input, weight) return output
[ 76 ]
def METHOD_NAME(self) -> Iterator[GraphNode]: """ Create watermark nodes :return: """ for part in self.parts: part_node = GraphNode( key=self.get_watermark_model_key(), label=Watermark.LABEL, attributes={ 'partition_key': part[0], 'partition_value': part[1], 'create_time': self.create_time } ) yield part_node
[ 129, 1716, 640 ]
def METHOD_NAME(): # Create an existing config if not os.path.exists(f"{DATA_DIR}\\conf"): os.makedirs(f"{DATA_DIR}\\conf") with open(f"{DATA_DIR}\\conf\\minion", "w") as f: # \n characters are converted to os.linesep f.writelines(existing_content)
[ 1153, 200 ]
f METHOD_NAME(self, c, url):
[ 377, 176 ]
def METHOD_NAME(self, log_file: Path) -> None: if ANALYTICS_SERVER == "": return if SITE_KEY == "": return if global_settings.GlobalEnviromentSettings().read_property("DisableCrashAndAnalyticsReporting") is True: return if not log_file.exists(): return if self.constants.commit_info[0].startswith("refs/tags"): # Avoid being overloaded with crash reports return commit_info = self.constants.commit_info[0].split("/")[-1] + "_" + self.constants.commit_info[1].split("T")[0] + "_" + self.constants.commit_info[2].split("/")[-1] crash_data= { "KEY": SITE_KEY, "APPLICATION_VERSION": self.version, "APPLICATION_COMMIT": commit_info, "OS_VERSION": self.os, "MODEL": self.model, "TIMESTAMP": self.date, "CRASH_LOG": log_file.read_text() } network_handler.NetworkUtilities().post(CRASH_URL, json = crash_data)
[ 353, 2653, 339 ]
def METHOD_NAME(): """Create a group.""" groups = GroupSchema(strict=True, many=True).load(request.json, partial=True).data created_groups = [] for group in groups: ensure_unused_group_name(group['name']) group['owner_id'] = request.user.user_id group['user_defined'] = True group = local.model.METHOD_NAME(group) local.model.add_user_in_group(request.user.user_id, group['uuid'], is_admin=True) created_groups.append(group) return GroupSchema(many=True).dump(created_groups).data
[ 129, 846 ]
f METHOD_NAME(self):
[ 9, 2423, 1068, 11932, 176 ]
def METHOD_NAME(self): """ Tests that both user-facing parameter/gradient dtypes and internal saved dtype attributes are as expected when using an FP16 model possibly with explicit mixed precision enabled. """ self.run_subtests( { "to_half_before_fsdp_init": [False, True], "use_orig_params": [False, True], "mixed_precision": [ MixedPrecision(), MixedPrecision( param_dtype=torch.float16, reduce_dtype=torch.float32, ), MixedPrecision( param_dtype=torch.float32, ), ], }, self._test_fp16_dtypes, )
[ 9, 23, 4303 ]
def METHOD_NAME(op, data, _, aggcontext=None, **kwargs): return aggcontext.agg(data, "std", ddof=variance_ddof[op.how])
[ 750, 6603, 4045, 2834, 1396 ]
def METHOD_NAME(self): conflicting_aur_package1 = "resvg" conflicting_aur_package2 = "resvg-git" self.remove_if_installed( conflicting_aur_package1, conflicting_aur_package2, ) self.assertEqual( pikaur( f"-S {conflicting_aur_package1}" f" {conflicting_aur_package2}", ).returncode, 131, ) self.assertNotInstalled(conflicting_aur_package1) self.assertNotInstalled(conflicting_aur_package2)
[ 9, 5170, 12439, 2975 ]
def METHOD_NAME(filename, dimension, classes=2, data_feature_name="data"): cur_class = randint(0, classes - 1) loc = int(cur_class - (classes / 2)) arr = np.random.normal(loc=loc, size=(dimension,)) feature = {"labels": label_feature(cur_class), data_feature_name: string_feature(arr)} example = tf.train.Example(features=tf.train.Features(feature=feature)) with open(filename, "wb") as f: f.write(example.SerializeToString())
[ 56, 97, 148, 171 ]
def METHOD_NAME(self): """Test QFI's options""" a = Parameter("a") qc = QuantumCircuit(1) qc.rx(a, 0) qgt = LinCombQGT(estimator=self.estimator, options={"shots": 100}) with self.subTest("QGT"): qfi = QFI(qgt=qgt) options = qfi.options result = qfi.run([qc], [[1]]).result() self.assertEqual(result.options.get("shots"), 100) self.assertEqual(options.get("shots"), 100) with self.subTest("QFI init"): qfi = QFI(qgt=qgt, options={"shots": 200}) result = qfi.run([qc], [[1]]).result() options = qfi.options self.assertEqual(result.options.get("shots"), 200) self.assertEqual(options.get("shots"), 200) with self.subTest("QFI update"): qfi = QFI(qgt, options={"shots": 200}) qfi.update_default_options(shots=100) options = qfi.options result = qfi.run([qc], [[1]]).result() self.assertEqual(result.options.get("shots"), 100) self.assertEqual(options.get("shots"), 100) with self.subTest("QFI run"): qfi = QFI(qgt=qgt, options={"shots": 200}) result = qfi.run([qc], [[0]], shots=300).result() options = qfi.options self.assertEqual(result.options.get("shots"), 300) self.assertEqual(options.get("shots"), 200)
[ 9, 1881 ]
def METHOD_NAME(self, signature): func_id = ABI.function_selector(signature) func_name = str(signature.split("(")[0]) if func_name in self.__dict__ or func_name in {"add_function", "address", "name_"}: raise EthereumError(f"Function name ({func_name}) is internally reserved") entry = HashesEntry(signature, func_id) if func_name in self.__hashes: self.__hashes[func_name].append(entry) return if func_id in {entry.func_id for entries in self.__hashes.values() for entry in entries}: raise EthereumError(f"A function with the same hash as {func_name} is already defined") self.__hashes[func_name] = [entry]
[ 238, 559 ]
METHOD_NAME(self):
[ 401, 462, 426 ]
def METHOD_NAME(self): self.basic_star_test(('*',))
[ 9, 15796, 947, 1815 ]
async def METHOD_NAME(self): ret = ( await Book.annotate(avg=Avg("rating")) .filter(avg__gt=3) .group_by("author_id") .values_list("author_id", "avg") ) self.assertEqual(len(ret), 1) self.assertEqual(ret[0][1], 4.5)
[ 9, 1654, 199, 245, 527, 846, 604 ]
def METHOD_NAME(): """git tags can be UTF-8 encoded""" with mock.patch( "dulwich.client.get_transport_and_path", autospec=True, return_value=( mock.Mock( **{"fetch_pack.return_value": {"☃".encode("UTF-8"): b"deadbeef"}} ), "path", ), ): with mock.patch("time.sleep", autospec=True): ret = remote_git.list_remote_refs("git-url") assert ret == {"☃": "deadbeef"}
[ 9, 256, 4428, 114 ]
def METHOD_NAME(self): if not self.dependencies["libzen"].options.enable_unicode: raise ConanInvalidConfiguration("This package requires libzen with unicode support")
[ 187 ]
def METHOD_NAME(a: np.ndarray, b: np.ndarray) -> np.ndarray: # Previously, this was implemented as: # left_mass = mass_case_left(a, 0) # right_mass = mass_case_right(0, b) # return _log_sum(left_mass, right_mass) # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1. # Correct for this with an alternative formulation. # We're not concerned with underflow here: if only one term # underflows, it was insignificant; if both terms underflow, # the result can't accurately be represented in logspace anyway # because sc.log1p(x) ~ x for small x. return np.log1p(-_ndtr(a) - _ndtr(-b))
[ 2858, 331, 11727 ]
def METHOD_NAME(string, vocab_path): punc_set = set() with open(vocab_path, "r") as f: for token in f: punc_set.add(token.strip()) punc_set.add(" ") for ascii_num in range(65296, 65306): punc_set.add(chr(ascii_num)) for ascii_num in range(48, 58): punc_set.add(chr(ascii_num)) res = [] temp = "" for c in string: if c in punc_set: if temp != "": res.append(temp) temp = "" res.append(c) else: temp += c if temp != "": res.append(temp) return res
[ 189, 265 ]
def METHOD_NAME(req, poolname, details): action = get_action() return action.diskpool_get_info(req, poolname, details)
[ 1806, 19, 113, 100 ]
def METHOD_NAME(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: """The targets of the metrics. Targets include things like the qubits (i.e. strings like `q0_1`), but may also be empty when the metric applies globally. """
[ 465 ]
def METHOD_NAME(self, num_items: int) -> Tuple[bytes, ...]: # # Note: This function is optimized for speed over readability. # if num_items > len(self.values): raise InsufficientStack( "Wanted %d stack items, only had %d", num_items, len(self.values), ) else: neg_num_items = -1 * num_items all_popped = reversed(self.values[neg_num_items:]) del self.values[neg_num_items:] type_cast_popped = [] # Convert any non-matching types to the requested type (int) # This doesn't use the @to_tuple(generator) pattern, for added performance for item_type, popped in all_popped: if item_type is int: type_cast_popped.append(int_to_big_endian(popped)) # type: ignore elif item_type is bytes: type_cast_popped.append(popped) # type: ignore else: raise _busted_type(item_type, popped) return tuple(type_cast_popped)
[ 760, 321 ]
def METHOD_NAME(project: Optional[pulumi.Input[Optional[str]]] = None, repository: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRepositoryIamPolicyResult]: """ Retrieves the current IAM policy data for repository ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.sourcerepo.get_repository_iam_policy(project=google_sourcerepo_repository["my-repo"]["project"], repository=google_sourcerepo_repository["my-repo"]["name"]) ``` :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. :param str repository: Used to find the parent resource to bind the IAM policy to """ ...
[ 19, 1230, 1694, 54, 146 ]
def METHOD_NAME( a_true: str, a_sub: str, variables: List[str] ) -> Tuple[float, str]: sym_true, sym_true_source = phs.convert_string_to_sympy_with_source( a_true, variables, allow_complex=False, allow_trig_functions=False ) sym_sub, sym_sub_source = phs.convert_string_to_sympy_with_source( a_sub, variables, allow_complex=False, allow_trig_functions=False ) if sym_true_source == sym_sub_source: return (1, CORRECT_UNCONDITIONAL_FEEDBACK) try: if sym_true.equals(sym_sub): return (1.0, CORRECT_COMPLEX_FEEDBACK) elif sympy.limit(sym_sub, sympy.Symbol(variables[0]), sympy.oo) < sympy.sympify( 0 ): return (0.0, NEGATIVE_FEEDBACK) L = sympy.limit( sympy.simplify(sym_true / sym_sub), sympy.Symbol(variables[0]), sympy.oo ) if L < sympy.sympify(0): return (0.0, NEGATIVE_FEEDBACK) elif L == sympy.oo: return (0.0, INCORRECT_FEEDBACK) elif L == sympy.sympify(0): return (0.25, TOO_LOOSE_FEEDBACK) elif L == sympy.sympify(1): return (0.5, LOWER_ORDER_TERMS_FEEDBACK) return (0.5, CONSTANT_FACTORS_FEEDBACK) except TypeError: return (0.0, TYPE_ERROR_FEEDBACK)
[ 5560, 1857, 1120 ]
def METHOD_NAME(self, node_field, pattern_field): """ Check if two fields match. Field match if: - If it is a list, all values have to match. - If if is a node, recursively check it. - Otherwise, check values are equal. """ if isinstance(pattern_field, list): return self.check_list(node_field, pattern_field) if isinstance(pattern_field, AST): return Check(node_field, self.placeholders).visit(pattern_field) return Check.strict_eq(pattern_field, node_field)
[ 101, 590 ]
def METHOD_NAME(pkt): res = {} if pkt is not None: res["DF"] = "Y" if pkt.flags.DF else "N" res["W"] = "%X" % pkt.window res["ACK"] = "S++" if pkt.ack == 2 else "S" if pkt.ack == 1 else "O" res["Flags"] = str(pkt[TCP].flags)[::-1] res["Ops"] = "".join(x[0][0] for x in pkt[TCP].options) else: res["Resp"] = "N" return res
[ 10485, 10486, 5136 ]
def METHOD_NAME(self): if self.facet_providers is None: self._load_facets_configuration() return self.facet_providers.values()
[ 19, 2190 ]
def METHOD_NAME(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.METHOD_NAME() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].METHOD_NAME()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CpuGeneratorCoreConfig, dict): for key, value in self.items(): result[key] = value return result
[ 24, 553 ]
def METHOD_NAME(item, is_residue=None): """ Calculate the mass for the given object. :footcite:`Meija2016` If a residue name is given, the mass values refer to the masses of the complete molecule without additional or missing protons. In case of residues in a longer chain, some atoms might be missing from the molecule. For example non-terminal residues in a protein or nucleotide chain miss the mass of a water molecule. Parameters ---------- item : str or Atom or AtomArray or AtomArrayStack The atom or molecule to get the mass for. If a string is given, it is interpreted as residue name or chemical element. If an :class:`Atom` is given the mass is taken from its element. If an :class:`AtomArray` or :class:`AtomArrayStack` is given the mass is the sum of the mass of its atoms. is_residue : bool, optional If set to true and a string is given for `item`, the string will be strictly interpreted as residue. If set to false, the string is strictly interpreted as element. By default the string will be interpreted as element at first and secondly as residue name, if the element is unknown. Returns ------- mass : float or None The mass of the given object in *u*. None if the mass is unknown. References ---------- .. footbibliography:: Examples -------- >>> print(mass(atom_array)) 2170.438 >>> first_residue = list(residue_iter(atom_array))[0] >>> print(first_residue) A 1 ASN N N -8.901 4.127 -0.555 A 1 ASN CA C -8.608 3.135 -1.618 A 1 ASN C C -7.117 2.964 -1.897 A 1 ASN O O -6.634 1.849 -1.758 A 1 ASN CB C -9.437 3.396 -2.889 A 1 ASN CG C -10.915 3.130 -2.611 A 1 ASN OD1 O -11.269 2.700 -1.524 A 1 ASN ND2 N -11.806 3.406 -3.543 A 1 ASN H1 H -8.330 3.957 0.261 A 1 ASN H2 H -8.740 5.068 -0.889 A 1 ASN H3 H -9.877 4.041 -0.293 A 1 ASN HA H -8.930 2.162 -1.239 A 1 ASN HB2 H -9.310 4.417 -3.193 A 1 ASN HB3 H -9.108 2.719 -3.679 A 1 ASN HD21 H -11.572 3.791 -4.444 A 1 ASN HD22 H -12.757 3.183 -3.294 >>> print(mass("ASN")) 132.118 >>> first_atom = first_residue[0] >>> print(first_atom) A 1 ASN N N -8.901 4.127 -0.555 >>> print(mass(first_atom)) 14.007 >>> print(mass("N")) 14.007 """ if isinstance(item, str): if is_residue is None: result_mass = _atom_masses.get(item.upper()) if result_mass is None: result_mass = _res_masses.get(item.upper()) elif not is_residue: result_mass = _atom_masses.get(item.upper()) else: result_mass = _res_masses.get(item.upper()) elif isinstance(item, Atom): result_mass = METHOD_NAME(item.element, is_residue=False) elif isinstance(item, AtomArray) or isinstance(item, AtomArrayStack): result_mass = sum( (METHOD_NAME(element, is_residue=False) for element in item.element) ) else: raise TypeError( f"Cannot calculate mass for {type(item).__name__} objects" ) if result_mass is None: raise KeyError(f"{item} is not known") return result_mas
[ 2858 ]
def METHOD_NAME(node): if node is None: return None assert isinstance(node, (ast.expr, ast.stmt)) if skip_incorrect and getattr(node, "incorrect_range", False): return None return node
[ 1217, 1716 ]
async def METHOD_NAME(self): async with assert_device_properties_set( self.subject._device, {SWITCH_DPS: True, HVACMODE_DPS: "0"}, ): await self.subject.async_set_hvac_mode(HVACMode.COOL)
[ 9, 0, 7162, 854, 24, 10350 ]
def METHOD_NAME(matchers, expected): assert repr(filters.Filter(matchers)) == expected
[ 9, 527, 92 ]
def METHOD_NAME(addr, expected): assert verify_addresses.strip_suffix(addr) == expected
[ 9, 1360, 4064, 432 ]
def METHOD_NAME(self, mocker): """ Using an IP address as input. Existing source.abuse_contact should not be overwritten. """ if self.mock: prepare_mocker(mocker) else: mocker.real_http = True self.input_message = INPUT_IP self.run_bot(parameters={'overwrite': False}) self.assertMessageEqual(0, OUTPUT_IP_NO_OVERWRITE)
[ 9, 1213, 654, 3345 ]
def METHOD_NAME(value): return datetime.datetime(value - 1, 1, 1)
[ 3351, 447 ]
def METHOD_NAME(self): return not self.active, self.time_left()
[ 266, 59 ]
def METHOD_NAME(lab: typing.Optional[selftest.SelftestHost] = None) -> None: """Test if using a path on the wrong host fails.""" with lab or selftest.SelftestHost() as lh: p = lh.workdir / "folder" / "file.txt" with tbot.acquire_lab() as lh2: raised = False try: # mypy detects that this is wrong lh2.exec0("echo", p) # type: ignore except tbot.error.WrongHostError: raised = True assert raised # It is ok to clone a machine and reuse the path with lh.clone() as lh3: lh3.exec0("echo", p) lh.exec0("mkdir", "-p", p.parent) assert p.parent.is_dir() lh.exec0("uname", "-a", linux.RedirStdout(p)) assert p.is_file() lh.exec0("rm", "-r", p.parent) assert not p.exists() assert not p.parent.exists()
[ 5028, 157, 7700 ]
def METHOD_NAME(coord: tuple): d = min(len2(coord[0], coord[1]), min(len2(coord[1], coord[2]) * 0.8, min(len2(coord[0], coord[2]) * 0.6, len3(coord) * 0.2))) return 1.0 - d
[ 1260 ]
def METHOD_NAME(opts: Dict[str, Any]) -> str: mode = opts['highlight-mode'] if mode == 'hljs': return '' elif mode == 'pygments': return 'highlight' elif mode == 'ace': return 'ace-tm'
[ 19, 8186, 2 ]
def METHOD_NAME(cls, context): return context.scene.render.engine == "PLASMA_GAME"
[ 1237 ]
def METHOD_NAME(self): """We expect a list view with `included_resources` to have three queries: 1. Primary resource COUNT query 2. Primary resource SELECT 3. Comments prefetched """ with self.assertNumQueries(3): response = self.client.get( "/entries?fields[entries]=comments&page[size]=25" ) self.assertEqual(len(response.data["results"]), 25)
[ 9, 539, 518, 1916, 3904, 1614 ]