text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(cls, org): """ Check if the given organization is present in any of the site configuration. Returns: True if given organization is present in site configurations otherwise False. """ return org in cls.get_all_orgs()
[ 220, 3411 ]
def METHOD_NAME(monkeypatch: pytest.MonkeyPatch): labels = {"prefect.resource.id": "the.thing"} labels.update({str(i): str(i) for i in range(10)}) monkeypatch.setattr("prefect.events.schemas.MAXIMUM_LABELS_PER_RESOURCE", 10) with pytest.raises(ValidationError, match="maximum number of labels"): Resource(__root__=labels)
[ 9, 1467, 69, 415 ]
def METHOD_NAME(self, scoreboard: Scoreboard) -> Embed: """ Ends the question and displays the statistics on who got the question correct, awards points, etc. Returns: An embed displaying the correct answers and the % of people that chose each answer. """ guesses = self.question.stop() labels = ascii_uppercase[:len(self.question.answers)] answer_embed = Embed( title=f"The correct answer for Question {self.question.number} was...", description=self.question.correct ) if len(guesses) != 0: answers_chosen = { answer_choice: len( tuple(filter(lambda x: x[0] == answer_choice, guesses.values())) ) for answer_choice in labels } answers_chosen = dict( sorted(answers_chosen.items(), key=lambda item: item[1], reverse=True) ) for answer, people_answered in answers_chosen.items(): is_correct_answer = dict(self.question.answers)[answer[0]] == self.question.correct # Setting the color of answer_embed to the % of people that got it correct via the mapping if is_correct_answer: # Maps the % of people who got it right to a color, from a range of red to green percentage_to_color = [0xFC94A1, 0xFFCCCB, 0xCDFFCC, 0xB0F5AB, 0xB0F5AB] answer_embed.color = percentage_to_color[round(people_answered / len(guesses) * 100) // 25] field_title = ( (":white_check_mark: " if is_correct_answer else "") + f"{people_answered} players ({people_answered / len(guesses) * 100:.1f}%) chose" ) # The `ord` function is used here to change the letter to its corresponding position answer_embed.add_field( name=field_title, value=self.question.answers[ord(answer) - 65][1], inline=False ) # Assign points to users for user_id, answer in guesses.items(): if dict(self.question.answers)[answer[0]] == self.question.correct: scoreboard.assign_points( int(user_id), points=(1 - (answer[-1] / self.question.time) / 2) * self.question.max_points, speed=answer[-1] ) elif answer[-1] <= 2: scoreboard.assign_points( int(user_id), points=-(1 - (answer[-1] / self.question.time) / 2) * self.question.max_points ) else: scoreboard.assign_points( int(user_id), points=0 ) return answer_embed
[ 1798, 1745 ]
def METHOD_NAME(self): Config.alias("STORAGE_ALIAS", "STORAGE") Config.alias("STORAGE_ALIAS_ALIAS", "STORAGE_ALIAS") cfg = Config(STORAGE_ALIAS_ALIAS="z") expect(cfg.STORAGE).to_equal("z") expect(cfg.STORAGE_ALIAS).to_equal("z") expect(cfg.STORAGE_ALIAS_ALIAS).to_equal("z")
[ 9, 41, 4273, 2334 ]
def METHOD_NAME(subscription_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-05-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallManagedRuleSets", ) # pylint: disable=line-too-long path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
[ 56, 245, 377 ]
def METHOD_NAME(self, l, s, sp): return sp * (2 * s - 1) / l
[ -1 ]
def METHOD_NAME(partner_id: int): """Get all members of a partner. Accepts Query Parameters for pagination: per_page: number of results per page page: page number """ args = request.args q_page = args.get("page", 1, type=int) q_per_page = args.get("per_page", 20, type=int) # partner = Partner.get(partner_id) all_members = db.session.query(PartnerMember).filter( PartnerMember.partner_id == partner_id ) results = all_members.paginate( page=q_page, per_page=q_per_page, max_per_page=100) return { "results": [ partner_member_orm_to_json(member) for member in results.items ], "page": results.page, "totalPages": results.pages, "totalResults": results.total, }
[ 19, 7618, 3563 ]
def METHOD_NAME(self): """ Generates a GDF co-annotation graph. """ include = [*self.parameters.get("include", []), "file_name"] network_parameters = {"generated_by": "4CAT Capture & Analysis Toolkit", "source_dataset_id": self.source_dataset.key} network = nx.Graph(**network_parameters) try: min_confidence = float(self.parameters.get("min_confidence", 0)) except ValueError: min_confidence = 0 for annotations in self.source_dataset.iterate_items(self): file_annotations = {} annotations = {atype: annotations[atype] for atype in include if atype in annotations} if not annotations: continue for annotation_type, tags in annotations.items(): if self.interrupted: raise ProcessorInterruptedException("Interrupted while processing Google Vision API output") if annotation_type == "file_name": continue if annotation_type == "webDetection": # handle web entities separately, since they're structured a bit # differently for entity in [e["description"] for e in tags.get("webEntities", []) if "description" in e]: node_id = "webEntity:" + entity file_annotations[node_id] = {"node_id": node_id, "category": "webEntity", "label": entity, "confidence": -1} else: # handle the other features here, since they all have a similar # structure short_type = annotation_type.split("Annotation")[0] label_field = "name" if annotation_type == "localizedObjectAnnotations" else "description" for tag in tags: if min_confidence and "score" in tag and tag["score"] < min_confidence: # skip if we're not so sure of the accuracy continue node_id = short_type + ":" + tag[label_field] file_annotations[node_id] = {"node_id": node_id, "category": short_type, "label": tag[label_field], "confidence": float(tag.get("score", -1))} # save with a label of the format 'landmark:Eiffel Tower' for node_id, annotation in file_annotations.items(): if node_id not in network.nodes(): network.add_node(node_id, **annotation) # save pairs for from_annotation in file_annotations: for to_annotation in file_annotations: if from_annotation == to_annotation: continue edge = (from_annotation, to_annotation) if edge in network.edges(): network[from_annotation][to_annotation]["weight"] += 1 else: network.add_edge(*edge, weight=1) nx.write_gexf(network, self.dataset.get_results_path()) self.dataset.finish(len(network.nodes()))
[ 356 ]
def METHOD_NAME(self): # Get setting through fresh query ss = self.test_app.settings_set.select_subclasses().get(name="primary_condor") # Execute ret = ss.get_value() # Check result self.assertIsInstance(ret, CondorScheduler) self.assertEqual("test_condor_scheduler", ret.name) self.assertEqual("https://example.com", ret.host) self.assertEqual(33, ret.port) self.assertEqual("condor", ret.username) self.assertEqual("password", ret.password) self.assertEqual("/path/to/some/key", ret.private_key_path) self.assertEqual("secret", ret.private_key_pass)
[ 9, 19, 99, 12565, 549 ]
def METHOD_NAME(token, issue_number, label): """https://docs.github.com/en/rest/reference/issues#add-labels-to-an-issue""" url = f'{GITHUB_API_URL}/issues/{issue_number}/labels' headers = { 'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}' } data = [label] with requests.post(url, headers=headers, data=json.dumps(data), timeout=TIMEOUT) as response: logging.info("add_label: %s response: %s", url, response)
[ 238, 636 ]
def METHOD_NAME( self, body: "SupportsStr", *, # keyword-only args: icon: Optional[str] = None, ) -> "DeltaGenerator": """Display error message. Parameters ---------- body : str The error text to display. icon : str or None An optional, keyword-only argument that specifies an emoji to use as the icon for the alert. Shortcodes are not allowed, please use a single character instead. E.g. "🚨", "🔥", "🤖", etc. Defaults to None, which means no icon is displayed. Example ------- >>> import streamlit as st >>> >>> st.error('This is an error', icon="🚨") """ alert_proto = AlertProto() alert_proto.icon = validate_emoji(icon) alert_proto.body = clean_text(body) alert_proto.format = AlertProto.ERROR return self.dg._enqueue("alert", alert_proto)
[ 168 ]
def METHOD_NAME(): open(SYNCFILE, "wb").close()
[ 129, 164, 171 ]
async def METHOD_NAME(manager, compute_project): manager._dynamips_ids[compute_project.id] = set([1, 2, 3]) project_dir = compute_project.module_working_path(manager.module_name.lower()) os.makedirs(project_dir) open(os.path.join(project_dir, "test.ghost"), "w+").close() await manager.project_closed(compute_project) assert not os.path.exists(os.path.join(project_dir, "test.ghost")) assert compute_project.id not in manager._dynamips_ids
[ 9, 155, 4703 ]
def METHOD_NAME(state, i, val): s = state.cloned() mv = s.measure(i) if mv != val: s.X(i) return s
[ 0, 99, 47, 301 ]
def METHOD_NAME(self, parser): parser.add_argument("action", type=str)
[ 238, 134 ]
def METHOD_NAME(string_values, name="text"): """ Create the tensor that the value holds the list of string. NOTICE: The value will be holded in the cpu place. Args: string_values(list[string]): The value will be setted to the tensor. name(string): The name of the tensor. """ tensor = paddle.Tensor(core.VarDesc.VarType.STRING, [], name, core.VarDesc.VarType.STRINGS, False) tensor.value().set_string_list(string_values) return tensor
[ 24, 768 ]
def METHOD_NAME(apps, schema_editor): """ Add sample images to default destinations, and copy the images to the media dir """ # Only run in development if not settings.DEBUG: return copy_default_images() Destination = apps.get_model('destinations', 'Destination') Event = apps.get_model('destinations', 'Event') for event in get_sample_events(): event_destination = Destination.objects.get(name='Independence Seaport Museum') event['destination'] = event_destination sample_event = Event.objects.filter(name=event['name']).first() if not sample_event: sample_event = Event(**event) sample_event.save() paths = get_image_paths(event['name']) sample_event = Event.objects.filter(name=event['name']).first() image = paths.pop() image_wide = paths.pop() sample_event.image = image sample_event.image_raw = image sample_event.wide_image = image_wide sample_event.wide_image_raw = image_wide for path in paths: extra = sample_event.extraeventpicture_set.create() extra.image_raw = path extra.save() sample_event.extraeventpicture_set.add(extra) sample_event.save()
[ 238, 734, 239 ]
def METHOD_NAME(self): # noqa: R0201 # pylint: disable=no-self-use return {"requires_y": True}
[ 4138, 114 ]
def METHOD_NAME(self, resource): params_model = {} current_freeform_tags = resource.get("freeform_tags") current_defined_tags = resource.get("defined_tags") if self.data.get("freeform_tags"): delete_tag_lists = self.data.get("freeform_tags") for tag in delete_tag_lists: if tag in current_freeform_tags: current_freeform_tags.pop(tag) else: log.info("%s tag does not exists.", tag) if self.data.get("defined_tags"): delete_tag_lists = self.data.get("defined_tags") for tag in delete_tag_lists: splits = tag.split(".") if len(splits) == 2 and (splits[0] in current_defined_tags): namespace = current_defined_tags.get(splits[0]) if splits[1] in namespace: namespace.pop(splits[1]) else: log.info("%s tag does not exists", splits[1]) else: log.info( ( "Defined %s namespace might be wrong or does not exists in the" " resource - %s" ), splits[0], resource.get("name"), ) params_model["freeform_tags"] = current_freeform_tags params_model["defined_tags"] = current_defined_tags return params_model
[ 188, 82 ]
def METHOD_NAME(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): """Raises an AssertionError if objects are not equal up to desired tolerance. Args: actual(numpy.ndarray or cupy.ndarray): The actual object to check. desired(numpy.ndarray or cupy.ndarray): The desired, expected object. rtol(float): Relative tolerance. atol(float): Absolute tolerance. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_allclose` """ # NOQA numpy.testing.METHOD_NAME( cupy.asnumpy(actual), cupy.asnumpy(desired), rtol=rtol, atol=atol, err_msg=err_msg, verbose=verbose)
[ 638, 5362 ]
def METHOD_NAME(self): """Build pie chart.""" df = self.df width = self.width height = self.height name = self.name plt.rcParams.update({"figure.max_open_warning": 0}) category_name = df.columns[0] value_name = df.columns[1] df = df.sort_values(by=value_name, ascending=False) category_column = df[category_name] value_column = df[df.columns[1]] labels = category_column plt.gca().axis("equal") def autopct(pct): """Get percentages for the pie chart slices > 10%.""" return ("%1.0f%%" % pct) if pct > 1 else "" METHOD_NAME = plt.METHOD_NAME( value_column, startangle=0, radius=1, autopct=autopct, textprops={"color": "w", "fontsize": 7}, ) plt.legend( METHOD_NAME[0], labels, bbox_to_anchor=(1, 0.5), loc="center right", fontsize=7, bbox_transform=plt.gcf().transFigure, frameon=False, ) plt.subplots_adjust(left=0.2, wspace=0.2) plt.gcf().set_size_inches( width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR ) plt.savefig( BASE_DIR + "/assets/" + name, transparent=True, dpi=500, bbox_inches="tight" ) plt.clf()
[ 3672 ]
def METHOD_NAME(conn): cur = conn.cursor() ensure_table(cur, "id serial primary key, data text") data = "".join(chr(randrange(1, 256)) for i in range(10 * 1024 * 1024)) with cur.copy("copy copy_in (data) from stdin") as copy: copy.write_row([data]) cur.execute("select data from copy_in limit 1") assert cur.fetchone()[0] == data
[ 9, 215, 4289, 1318, 148 ]
def METHOD_NAME(operations, ranges, entity_map, entity): """Prepare operations dict defining operations on specific indexes. Data format: - key: index value - value: list of html elements that should be insert into text on specific index """ for range_date in ranges: tag = "a" if entity else TAG_MAPPING[range_date["style"]] offset = range_date["offset"] length = offset + range_date["length"] - 1 if entity: entity_key = str(range_date["key"]) href = entity_map[entity_key]["data"]["url"] start_tag = f'{tag} href="{href}"' else: start_tag = tag if tag != "code" else tag + ' class="inline-code"' operations[offset - 1].append(f"<{start_tag}>") operations[length] = [f"</{tag}>"] + operations[length]
[ 123, 710 ]
def METHOD_NAME(addr: str) -> bytearray: try: raw_address = base58.decode_check(addr, blake256d_32) except ValueError: raise DataError("Invalid address") w = utils.empty_bytearray(26) w.append(0xBD) # OP_SSTXCHANGE scripts.write_output_script_p2pkh(w, raw_address[2:]) return w
[ 146, 782, -1 ]
def METHOD_NAME(self, post): login_result = Mock() login_result.text = '...<a href="https://tr.anidub.com/index.php?action=logout">...' post.return_value = login_result with self.assertRaises(AnidubLoginFailedException) as e: self.tracker.login(helper.fake_login, helper.fake_password) self.assertEqual(e.exception.code, 2) self.assertEqual(e.exception.message, 'Failed to retrieve cookies')
[ 9, 273, 1423, 4177 ]
def METHOD_NAME(environment_name: Optional[pulumi.Input[str]] = None, registry_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegistryEnvironmentContainerResult]: """ Azure Resource Manager resource envelope. :param str environment_name: Container name. This is case-sensitive. :param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
[ 19, 510, 1027, 224, 146 ]
def METHOD_NAME(self): stgs = settings.Settings() stgs.process_config_path(self.config_file.name) config = stgs.as_dict() self.assertIsNone(config.get("foo.non_existing"))
[ 9, 256, 1153, 59 ]
def METHOD_NAME(self): class CustomException(Exception): pass class BadComplexLike: def __complex__(self): raise CustomException("something went wrong") a = self.test_class() with self.assertRaises(CustomException): a.value = BadComplexLike()
[ 9, 504, 6579 ]
def METHOD_NAME(self, ipath): """ Validate a spirv module. Args: ipath: Input file path of the spirv module. """ flags = [] check_call(["spirv-val", *flags, ipath])
[ 187 ]
def METHOD_NAME(self): with self.mock_modules('pkg.__init__') as mock: with util.import_state(meta_path=[mock]): del mock['pkg'].__package__ module = self.__import__('pkg') self.assertEqual(module.__package__, 'pkg')
[ 9, 360 ]
async def METHOD_NAME(_, e): recorded_errors.append(e)
[ 69, 462, 168, 2560 ]
def METHOD_NAME(_context, _value): return ExpectationResult( success=True, label="output_table_exists", description=f"Checked {name} exists", )
[ 74, 2908 ]
def METHOD_NAME(): """Command that creates a composite of several workflows.""" return ( Command().command(compose_workflow).require_migration().require_clean().with_database(write=True).with_commit() )
[ 166, 3855, 462 ]
def METHOD_NAME(self, run_namespace=None): rates_var = self.variables["rates"] if isinstance(rates_var, Subexpression): # Check that the units of the expression make sense expr = rates_var.expr identifiers = get_identifiers(expr) variables = self.resolve_all( identifiers, run_namespace, user_identifiers=identifiers ) unit = parse_expression_dimensions(rates_var.expr, variables) fail_for_dimension_mismatch( unit, Hz, "The expression provided for " "PoissonGroup's 'rates' " "argument, has to have units " "of Hz", ) super().METHOD_NAME(run_namespace)
[ 1553, 22 ]
def METHOD_NAME(self): "Test ML.CompObject UpdateFlops method" compObject = ML.CompObject() self.assertEqual(compObject.GetFlops(), 0.0) flops = 3.14e6 compObject.SetFlops(flops) self.assertEqual(compObject.GetFlops(), flops) compObject.UpdateFlops(flops) self.assertEqual(compObject.GetFlops(), 2*flops)
[ 9, 86, 230 ]
def METHOD_NAME(self) -> bool: return True
[ 1338 ]
async def METHOD_NAME(c, s, a, b): """Test Client.rebalance(). This test is just to test the TLS Client wrapper around Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py. """ assert a.address.startswith("tls://") futures = await c.scatter(range(100), workers=[a.address]) assert len(a.data) == 100 assert len(b.data) == 0 await c.rebalance() assert len(a.data) == 50 assert len(b.data) == 50
[ 9, 7565 ]
def METHOD_NAME( request, submission, testrun_report_id=None, model=TestRunReport ): qs = model.objects.filter(submission_report__submission=submission) if is_contest_admin(request) and testrun_report_id is not None: qs = qs.filter(id=testrun_report_id) else: qs = qs.filter(submission_report__status='ACTIVE') return get_object_or_404(qs)
[ 19, 15159, 339, 894, 2121 ]
def METHOD_NAME(path): name = os.path.basename(path) if re.match('^[0-9]+$', name): return int(name) elif re.match('^[0-9a-f]+$', name): return int(name, 16) else: return 0
[ 56, 59 ]
def METHOD_NAME(self): """ If the interaction fails to produce a result then L{interact} raises an exception. """ with self.assertRaises(Exception): interact(Protocol(), Protocol(), Deferred())
[ 9, 6600 ]
async def METHOD_NAME(self, *, requester: RequestType, user_id: int) -> None: return
[ 4420, 34, 365, 43, 21 ]
def METHOD_NAME(file_path) -> Image: image = Image( end_line=18, start_line=16, name='mongo:2.6.8', file_path=file_path, related_resource_id='executors(image-executor).docker.image[1](mongo:2.6.8)', ) return image
[ 1263, 975, -1 ]
def METHOD_NAME(self): gm = GeoMGRS("test_coord_1.234") nt.assert_equal(str(gm), "[MGRS: test_coord_1.234]")
[ 9, 24, 3 ]
def METHOD_NAME( docker_registry: UrlStr, core_stack_namespace: str, ops_stack_namespace: str, core_stack_compose_specs: ComposeSpec, docker_client: DockerClient, ) -> list[Service]: # NOTE: the goal here is NOT to test time-to-deploy but # rather guaranteing that the framework is fully deployed before starting # tests. Obviously in a critical state in which the frameworks has a problem # the fixture will fail try: for attempt in Retrying( wait=wait_fixed(5), stop=stop_after_delay(4 * _MINUTE), before_sleep=before_sleep_log(log, logging.INFO), reraise=True, ): with attempt: for service in docker_client.services.list(): assert_service_is_running(service) finally: for stack_namespace in (core_stack_namespace, ops_stack_namespace): subprocess.run( f"docker stack ps {stack_namespace}", shell=True, check=False ) # logs table like # ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR # xbrhmaygtb76 simcore_sidecar.1 itisfoundation/sidecar:latest crespo-wkstn Running Running 53 seconds ago # zde7p8qdwk4j simcore_rabbit.1 itisfoundation/rabbitmq:3.11.2-management crespo-wkstn Running Running 59 seconds ago # f2gxmhwq7hhk simcore_postgres.1 postgres:10.10 crespo-wkstn Running Running about a minute ago # 1lh2hulxmc4q simcore_director.1 itisfoundation/director:latest crespo-wkstn Running Running 34 seconds ago # ... # TODO: find a more reliable way to list services in a stack core_stack_services: list[Service] = list( docker_client.services.list( filters={"label": f"com.docker.stack.namespace={core_stack_namespace}"} ) ) assert ( core_stack_services ), f"Expected some services in core stack '{core_stack_namespace}'" assert len(core_stack_compose_specs["services"].keys()) == len(core_stack_services) return core_stack_services
[ 12185, 1501, 8845, 3186 ]
def METHOD_NAME(self, context, layout): layout.label(text="Output mode:") layout.prop(self, "out_mode", text="") if self.out_mode == 'OBJ': layout.prop(self, 'separate') else: layout.prop(self, 'sort')
[ 1100, 1409 ]
def METHOD_NAME(self, x): return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, )
[ 76 ]
def METHOD_NAME(method: DependencyMethods, env: 'Environment', for_machine: MachineChoice) -> bool: """Report whether a method is valid or not. If the method is valid, return true, otherwise return false. This is used in a list comprehension to filter methods that are not possible. By default this only remove EXTRAFRAMEWORK dependencies for non-mac platforms. """ # Extra frameworks are only valid for macOS and other apple products if (method is DependencyMethods.EXTRAFRAMEWORK and not env.machines[for_machine].is_darwin()): return False return True
[ 356, 103 ]
def METHOD_NAME(self, app): resp = app.get("/wmts/myrest/tms_cache/GLOBAL_MERCATOR/01/0/0.png", status=500) xml = resp.lxml assert validate_with_xsd(xml, xsd_name="ows/1.1.0/owsExceptionReport.xsd") assert_xpath_wmts( xml, "/ows:ExceptionReport/ows:Exception/@exceptionCode", "NoApplicableCode" )
[ 9, 19, 4161, 1458, 168 ]
def METHOD_NAME(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages" ) # pylint: disable=line-too-long path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), "location": _SERIALIZER.url("location", location, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
[ 56, 245, 604, 708, 377 ]
def METHOD_NAME(self): if self.catalog is not None: return self.catalog.uri
[ 2824, 354 ]
def METHOD_NAME( mock_snmp_client, exploit_host: Callable[[], Tuple[bool, bool]] ): exploit_host() assert mock_snmp_client.create_command.called
[ 9, 4714, 63, 462 ]
async def METHOD_NAME(self, user_id: int, quoted_storage_file_id: str) -> None: response = await self.client.delete( f"/locations/{self.SIMCORE_S3_ID}/files/{quoted_storage_file_id}", params={"user_id": user_id}, ) response.raise_for_status()
[ 34, 171 ]
def METHOD_NAME(self, annotation): client = self.get_client() logger.debug(f'Creating new object on {self.__class__.__name__} Storage {self} for annotation {annotation}') ser_annotation = self._get_serialized_data(annotation) # get key that identifies this object in storage key = RedisExportStorageLink.get_key(annotation) # put object into storage client.set(key, json.dumps(ser_annotation)) # create link if everything ok RedisExportStorageLink.create(annotation, self)
[ 73, 2141 ]
def METHOD_NAME(self): # Arrange indicator = AverageTrueRange(10) bar = TestDataStubs.bar_5decimal() # Act indicator.handle_bar(bar) # Assert assert indicator.has_inputs assert indicator.value == 2.999999999997449e-05
[ 9, 276, 681, 682, 662 ]
def METHOD_NAME(name, idx): return GetItemSource(LocalSource(name), idx)
[ 1458 ]
def METHOD_NAME(): with pytest.warns(DeprecationWarning, match="Converting to complex64") as record: Plan(1, (8, 8), dtype="float32") with pytest.warns(DeprecationWarning, match="Converting to complex128") as record: Plan(1, (8, 8), dtype="float64")
[ 9, 1249, 595 ]
def METHOD_NAME(self) -> bool: if self._is_connected(): return True try: LOG.debug(f'Connect statistic middleware server: {self._stat_address}') self._stat_mng_client = AddrPickableDataClient(self._stat_address) return True except BaseException as exc: if not isinstance(exc, ConnectionRefusedError): LOG.error(f'Failed to connect statistic middleware server: {self._stat_address}', exc_info=exc) else: LOG.debug(f'Failed to connect statistic middleware server: {self._stat_address}, error: {str(exc)}') return False
[ 707, 3174 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME(): """Gets DB_PATHs of all existing components in Fed-BioMed root""" config_files = get_all_existing_config_files() db_names = {} for _config in config_files: config = get_component_config(_config) component_id = config['default']['id'] db_name = f"{DB_PREFIX}{component_id}" db_names = {**db_names, component_id: db_name} return db_names
[ 19, 1153, 1007, 1267, 83 ]
def METHOD_NAME(*remove, **update): """ From: https://stackoverflow.com/questions/2059482/temporarily-modify-the-current-processs-environment Temporarily updates the ``os.environ`` dictionary in-place. The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations. :param remove: Environment variables to remove. :param update: Dictionary of environment variables and values to add/update. """ env = os.environ update = update or {} remove = remove or [] # List of environment variables being updated or removed. stomped = (set(update.keys()) | set(remove)) & set(env.keys()) # Environment variables and values to restore on exit. update_after = {k: env[k] for k in stomped} # Environment variables and values to remove on exit. remove_after = frozenset(k for k in update if k not in env) try: env.update(update) [env.pop(k, None) for k in remove] yield finally: env.update(update_after) [env.pop(k) for k in remove_after]
[ 680, 4686 ]
def METHOD_NAME(): return CustomErrorHandler()
[ 343, 168, 1519 ]
def METHOD_NAME(self, interval=0.01): silent_iterations = 0 while self.poll() is None: if self.stdout is not None: silent_iterations = 0 self.recv() if self.stderr is not None: silent_iterations = 0 self.recv_err() silent_iterations += 1 if silent_iterations > 100: silent_iterations = 0 (stdoutdata, stderrdata) = self.communicate() if stdoutdata: log.debug(stdoutdata) if stderrdata: log.error(stderrdata) time.sleep(interval)
[ 1237, 61, 203, 1238, 1239 ]
def METHOD_NAME(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float: value_hyps = [] value_refs = [] for p, t in zip(hypothesis, groundtruth): ref_dict, hyp_dict = get_slot_dict(p, t) # Slot Value WER/CER evaluation unique_slots = list(ref_dict.keys()) for slot in unique_slots: for ref_i, ref_v in enumerate(ref_dict[slot]): if slot not in hyp_dict: hyp_v = "" value_refs.append(ref_v) value_hyps.append(hyp_v) else: min_wer = 100 best_hyp_v = "" for hyp_v in hyp_dict[slot]: tmp_wer = wer([hyp_v], [ref_v]) if min_wer > tmp_wer: min_wer = tmp_wer best_hyp_v = hyp_v value_refs.append(ref_v) value_hyps.append(best_hyp_v) return wer(value_hyps, value_refs)
[ 3572, 99, 3140 ]
def METHOD_NAME(): """Test calling query interface""" respx.get(re.compile(r"https://pulsedive\.com/api/explore\.php.*")).respond( 200, json=_QUERY_RESP ) pd_lookup = PDlookup(pd_key="ACCESS") result = pd_lookup.explore("https://evil.com") check.is_instance(result, pd.DataFrame) check.equal(len(result), 1)
[ 9, 6616, 539 ]
def METHOD_NAME(self) -> bool: return time.time() > self.expires
[ 220, 3426 ]
def METHOD_NAME(self) -> None: m = ControlFlow() gm = torch._export.export(m, (torch.tensor(True), torch.randn(3, 4))).graph_module # No error should be raised verifier = ATenDialectVerifier() verifier(gm)
[ 9, 10195, 401, 233, 1434 ]
def METHOD_NAME(preprocessed_data_dir): print("Preparing for preprocessing data...") # Validation set is fold 1 fold = 1 validation_fold_file = '../models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/folds/fold1_validation.txt' # Make sure the model exists model_dir = 'build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1' model_path = os.path.join(model_dir, "plans.pkl") assert os.path.isfile(model_path), "Cannot find the model file {:}!".format(model_path) checkpoint_name = "model_final_checkpoint" # Other settings fp16 = False num_threads_preprocessing = 12 raw_data_dir = 'build/raw_data/nnUNet_raw_data/Task043_BraTS2019/imagesTr' # Open list containing validation images from specific fold (e.g. 1) validation_files = [] with open(validation_fold_file) as f: for line in f: validation_files.append(line.rstrip()) # Create output and preprocessed directory if not os.path.isdir(preprocessed_data_dir): os.makedirs(preprocessed_data_dir) # Create list of images locations (i.e. 4 images per case => 4 modalities) all_files = subfiles(raw_data_dir, suffix=".nii.gz", join=False, sort=True) list_of_lists = [[os.path.join(raw_data_dir, i) for i in all_files if i[:len(j)].startswith(j) and len(i) == (len(j) + 12)] for j in validation_files] # Preprocess images, returns filenames list # This runs in multiprocess print("Acually preprocessing data...") preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists, validation_files, preprocessed_data_dir, num_threads_preprocessing) print("Saving metadata of the preprocessed data...") with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "wb") as f: pickle.dump(preprocessed_files, f) print("Preprocessed data saved to {:}".format(preprocessed_data_dir)) print("Done!")
[ 666, 102 ]
def METHOD_NAME(self): try: return { step: [self.convert_time(eval(x)) for x in items ] for step, items in self.data["transcript"].items() } except Exception as e: env.logger.warning(str(e)) return {}
[ 14961 ]
def METHOD_NAME(self, module: Any, name: Optional[str] = None) -> None: if name is None: name = str(len(self._modules)) if name in self._modules: raise KeyError('name exists') self.add_module(name, module)
[ 238 ]
def METHOD_NAME(self): """Test that admin can view draft.""" # Login as admin self.user = self.login() # Try getting page draft response = self.client.get( reverse("wagtailadmin_pages:view_draft", args=(self.child_page.id,)) ) # User can view self.assertEqual(response.status_code, 200)
[ 9, 255, 1089, 2870 ]
def METHOD_NAME( self, sample: dict[str, Any], show_titles: bool = True, suptitle: Optional[str] = None, ) -> plt.Figure: """Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle. """ image = sample["mask"].squeeze(0) ncols = 1 showing_prediction = "prediction" in sample if showing_prediction: pred = sample["prediction"].squeeze(0) ncols = 2 fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4)) if showing_prediction: axs[0].imshow(image) axs[0].axis("off") axs[1].imshow(pred) axs[1].axis("off") if show_titles: axs[0].set_title("Mask") axs[1].set_title("Prediction") else: axs.imshow(image) axs.axis("off") if show_titles: axs.set_title("Mask") if suptitle is not None: plt.suptitle(suptitle) return fig
[ 1288 ]
def METHOD_NAME(): # 3DCNN workloads verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 0) verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 0) verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 1) verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 1) # bias, relu verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_relu=True) verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True) verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True, add_relu=True) # dilation = 2 verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, 1, dilation=2) # batch size verify_conv3d_ncdhw(4, 64, 56, 5, 3, 1, 1) # weird workloads verify_conv3d_ncdhw(2, 2, 2, 2, 2, 2, 2) verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 3) # Asymmetric padding verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 0, 0, 1, 1, 1)) verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 2, 1, 2, 1)) verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, (2, 2, 2, 1, 1, 1), dilation=2) verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 1, 1)) verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 0)) verify_conv3d_ncdhw(1, 32, 32, 1, 3, 1, "VALID") verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, "VALID") # DHW kernel layout verify_conv3d_ncdhw(1, 32, 56, 16, (3, 5, 7), 2, (1, 2, 3)) verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 7), 2, (1, 2, 3, 0, 3, 2)) verify_conv3d_ncdhw(1, 3, 56, 16, (3, 3, 7), 2, (1, 2, 3)) verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 3), 2, (1, 3, 1)) # grouped workloads verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 0, groups=4) verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 0, groups=4) verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 1, groups=4) verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 1, groups=4)
[ 9, 2143, 5144 ]
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[Optional[str]]] = None, zone: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDiskIamPolicyResult]: """ Retrieves the current IAM policy data for disk ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.compute.get_disk_iam_policy(project=google_compute_disk["default"]["project"], zone=google_compute_disk["default"]["zone"], name=google_compute_disk["default"]["name"]) ``` :param str name: Used to find the parent resource to bind the IAM policy to :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. :param str zone: A reference to the zone where the disk resides. Used to find the parent resource to bind the IAM policy to. If not specified, the value will be parsed from the identifier of the parent resource. If no zone is provided in the parent identifier and no zone is specified, it is taken from the provider configuration. """ ...
[ 19, 113, 1694, 54, 146 ]
def METHOD_NAME(tx: Transaction) -> Union[LegacyTransaction, Bytes]: """ Encode a transaction. Needed because non-legacy transactions aren't RLP. """ if isinstance(tx, LegacyTransaction): return tx elif isinstance(tx, AccessListTransaction): return b"\x01" + rlp.encode(tx) elif isinstance(tx, FeeMarketTransaction): return b"\x02" + rlp.encode(tx) else: raise Exception(f"Unable to encode transaction of type {type(tx)}")
[ 421, 1853 ]
def METHOD_NAME(): # Arrange old_settings_dict = settings.read_yaml_file( "/code/tests/test_data/V2_8_5/settings.yaml" ) v285 = migrations.CobblerVersion(2, 8, 5) # Act result = migrations.get_settings_file_version(old_settings_dict) # Assert assert result == v285
[ 9, 19, 817, 171, 281 ]
def METHOD_NAME(nb_simulations): return 5. + nb_simulations*5.
[ 104, 1835 ]
def METHOD_NAME(x,y): return (IOC_VOID|((x)<<8)|y)
[ 249 ]
def METHOD_NAME(self): """ Reads next request from the stream and processes it. """ in_request = False request = "" for line in self._rd: # Remove trailing newline line = line.decode('UTF-8').rstrip() if line == "#request begin": in_request = True elif line == "#request end": self._process(request) return True else: if in_request: request = request + line return False
[ 356, 377 ]
def METHOD_NAME(version: str): # Might want to work out some magic here to auto-identify the version from the commit if version == None: print("Version required for undo operation (try --help)") sys.exit(1) # Delete the version (good verification all by itself really) subprocess.run(["git", "tag", "-d", "v" + version], check=True) # Tag the commit we're about to delete because we could be deleting the wrong thing. savename = "version-undo-backup-" + str(int(time.time())) subprocess.run(["git", "tag", savename], check=True) # *Outright eliminate the commit from the branch!* - Dangerous if we get rid of the wrong commit, hence backup subprocess.run(["git", "reset", "--keep", "HEAD^"], check=True) print("Done (deleted commit saved as " + savename + ")")
[ 2796, 281 ]
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, slot: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWebAppPublishingCredentialsSlotResult]: """ Description for Gets the Git/FTP publishing credentials of an app. :param str name: Name of the app. :param str resource_group_name: Name of the resource group to which the resource belongs. :param str slot: Name of the deployment slot. If a slot is not specified, the API will get the publishing credentials for the production slot. """ ...
[ 245, 2412, 991, 14085, 3568, 3572, 146 ]
def METHOD_NAME(filepath, tags=["val_metric_global_model"]): data = {} for summary in tf.compat.v1.train.summary_iterator(filepath): for v in summary.summary.value: if v.tag in tags: if v.tag in data.keys(): data[v.tag].append([summary.step, v.simple_value]) else: data[v.tag] = [[summary.step, v.simple_value]] return data
[ 203, 2168 ]
f METHOD_NAME(self, tag, input_fn, sizes, ninputs, repeats):
[ 22, 1668 ]
def METHOD_NAME(self): pass
[ 531, 481 ]
def METHOD_NAME(self, context): self.inputs.new('SvStringsSocket', "X").prop_name = 'x_' self.inputs.new('SvStringsSocket', "Y").prop_name = 'y_' self.inputs.new('SvStringsSocket', "Z").prop_name = 'z_' self.width = 100 self.outputs.new('SvVerticesSocket', "Vectors")
[ 2153, 176 ]
def METHOD_NAME(self, record): if not IS_TTY: return super(CliFormatter, self).METHOD_NAME(record) record.msg = format_multiline(record.msg) record.msg = ' '.join((self._prefix(record), record.msg)) record.args = tuple(a if isinstance(a, NO_CAST) else safe_unicode(a) for a in record.args) return super(CliFormatter, self).METHOD_NAME(record)
[ 275 ]
def METHOD_NAME(structure, num_dopants=5, threshold=0.001, match_oxi_sign=False): """ Get dopant suggestions based on substitution probabilities. Args: structure (Structure): A pymatgen structure decorated with oxidation states. num_dopants (int): The number of suggestions to return for n- and p-type dopants. threshold (float): Probability threshold for substitutions. match_oxi_sign (bool): Whether to force the dopant and original species to have the same sign of oxidation state. E.g. If the original site is in a negative charge state, then only negative dopants will be returned. Returns: (dict): Dopant suggestions, given as a dictionary with keys "n_type" and "p_type". The suggestions for each doping type are given as a list of dictionaries, each with they keys: - "probability": The probability of substitution. - "dopant_species": The dopant species. - "original_species": The substituted species. """ els_have_oxi_states = [hasattr(s, "oxi_state") for s in structure.species] if not all(els_have_oxi_states): raise ValueError("All sites in structure must have oxidation states to predict dopants.") sp = SubstitutionPredictor(threshold=threshold) subs = [sp.list_prediction([s]) for s in set(structure.species)] subs = [ { "probability": pred["probability"], "dopant_species": next(iter(pred["substitutions"])), "original_species": next(iter(pred["substitutions"].values())), } for species_preds in subs for pred in species_preds ] subs.sort(key=lambda x: x["probability"], reverse=True) return _get_dopants(subs, num_dopants, match_oxi_sign)
[ 19, 5752, 280, 4282, 3318 ]
METHOD_NAME(self, value):
[ 0, 4463, 2591 ]
def METHOD_NAME(embed, page, port): svg = SVG(SVG_FILE, embed=embed) bbox = get_bbox(page, port, svg) assert bbox['width'] == 507.203125 assert int(bbox['height']) == 427
[ 9, 10129, 1577, 1318 ]
def METHOD_NAME(assume_installed=True): paths = ['/usr/bin/dart', '/usr/lib/dart/bin/dart'] for path in paths: if os.path.exists(path): if not assume_installed: print('Assumed not installed, found %s' % path) sys.exit(1) else: if assume_installed: print('Assumed installed, but could not find %s' % path) sys.exit(1)
[ 9, 3732 ]
def METHOD_NAME(self, request, *, station_id, seq): self.request = request self.station = Station.objects.get(pk=station_id) self.seq = seq self._check_permission() try: self.form = self._get_form_from_request() except RedirectToFirstStep: kwargs = {"station_id": self.station.id, "seq": 1} return HttpResponseRedirect(reverse("telemetry_wizard", kwargs=kwargs)) if self.request.method == "POST" and self.form.is_valid(): return self._process_valid_form_post() else: return self._process_get()
[ 2506 ]
def METHOD_NAME(): run_hdiff(dace.dtypes.DeviceType.CPU)
[ 9, 2265 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(self): self.response.write( '<h2>Number of providers: {}</h2>'.format(len(CONFIG))) links(self)
[ 19 ]
def METHOD_NAME( request_params: RequestParams, ) -> Dict[str, Dict[int, int]]: road_number = request_params.location_info["road1"] road_segment = request_params.location_info["road_segment_name"] start_time = request_params.start_time end_time = request_params.end_time cache_key = (road_number, road_segment, start_time, end_time) if cache_dict.get(cache_key): return cache_dict.get(cache_key) query = KilledAndInjuredCountPerAgeGroupWidgetUtils.create_query_for_killed_and_injured_count_per_age_group( end_time, road_number, road_segment, start_time ) dict_grouped, has_data = KilledAndInjuredCountPerAgeGroupWidgetUtils.parse_query_data(query) if not has_data: return {} while len(cache_dict) > CACHE_MAX_SIZE: cache_dict.popitem(last=False) cache_dict[cache_key] = dict_grouped return dict_grouped
[ 527, 61, 846, 10728, 29, 2735, 2958 ]
def METHOD_NAME( self, email, password, test=None, domain="https://archive.org", **kwargs ): self.driver.get('%s/account/login.php' % domain) self.driver.find_element_by_id('username').send_keys(email) self.driver.find_element_by_id('password').send_keys(password) self.driver.find_element_by_name('submit').click() if test: assert ( self.ia_is_logged_in() ), f"IA Login failed w/ username: {email} and password: {password}"
[ 16424, 273 ]
def METHOD_NAME(self, s): l = [] for elem in self: if elem not in s: l.append(elem) for elem in l: self.remove(elem) return self
[ 2845, 86 ]
def METHOD_NAME(line): return line.strip().endswith("-grams:")
[ 6382, 7338, 1287 ]
def METHOD_NAME(self, global_sample_index: int) -> Tuple[int, ...]: """If you were to lay the tiles out in a grid, the tile layout shape would be the shape of the grid. Example: Sample shape: (1000, 500) Tile shape: (10, 10) Output tile layout shape: (100, 50) """ tile_meta = self[global_sample_index] tile_shape = tile_meta[1] sample_shape = tile_meta[0] if len(tile_shape) != len(sample_shape): raise ValueError( "Tile shape and sample shape must have the same number of dimensions." ) layout = [ np.ceil(sample_shape_dim / tile_shape_dim) for tile_shape_dim, sample_shape_dim in zip(tile_shape, sample_shape) ] return tuple(int(x) for x in layout)
[ 19, 4161, 571, 555 ]
def METHOD_NAME(self): if os.name == 'posix': r, w, e = popen2.popen3([self.cmd]) self.validate_output(self.teststr, self.expected, r, w, e) r, w, e = popen2.popen3(self.cmd) self.validate_output(self.teststr, self.expected, r, w, e)
[ 9, 13464 ]
def METHOD_NAME(dependency) -> Path: version = dependencies[dependency]['version'] return Path(directory_for_dependency(dependency, version))
[ 19, 2913, 2851 ]