text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, data, key): """decrypt data""" try: plaintext = "" # Mocking decrypted response for tests if self.key == "mock": plaintext = "mock".encode() else: request = azkms_obj(key).decrypt(EncryptionAlgorithm.rsa_oaep_256, data) plaintext = request.plaintext return plaintext.decode() except Exception as e: raise AzureKMSError(e)
[ 443 ]
def METHOD_NAME(self): self.assertEqual( [ "2 cups shredded chicken, (we use rotisserie chicken)", "1/2 teaspoon ground cumin", "1/2 teaspoon ground chili powder", "1/2 teaspoon kosher salt", "1/4 teaspoon garlic powder", "1/4 teaspoon paprika", "2 teaspoons fresh lime juice", "1 cup shredded cheddar or Mexican blend cheese", "20 corn tortillas", "Shredded lettuce", "Diced tomatoes", "Guacamole", "Sour Cream", "Chopped Green Onion", "Crumbled Queso Fresco", "Pico de Gallo", "Salsa", ], self.harvester_class.ingredients(), )
[ 9, 9797 ]
def METHOD_NAME( self, model: nn.Module, path: str, map_location: Any = None, strict: bool = True ) -> None: pass
[ 557, 578 ]
def METHOD_NAME(objs, path): found = False for obj in objs: if 'file.path' in obj and obj['file.path'].lower() == path.lower() and obj['file.type'] == "dir": found = True break assert found, "Dir '{0}' not found".format(path)
[ 220, 1190 ]
def METHOD_NAME(self) -> List[LinterIssue]: """Execute linting. :return: A list of linter issues flagged by this linter. """
[ 22 ]
def METHOD_NAME(value: str) -> str: """Raise exception if VPC id has invalid length.""" if len(value) > 64: return "have length less than or equal to 64" return ""
[ 187, 7755, 147 ]
def METHOD_NAME(comments: Optional[Sequence[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOriginAccessIdentitiesResult: """ Use this data source to get ARNs, ids and S3 canonical user IDs of Amazon CloudFront origin access identities. ## Example Usage ### All origin access identities in the account ```python import pulumi import pulumi_aws as aws example = aws.cloudfront.get_origin_access_identities() ``` ### Origin access identities filtered by comment/name Origin access identities whose comments are `example-comment1`, `example-comment2` ```python import pulumi import pulumi_aws as aws example = aws.cloudfront.get_origin_access_identities(comments=[ "example-comment1", "example-comment2", ]) ``` :param Sequence[str] comments: Filter origin access identities by comment. """ __args__ = dict() __args__['comments'] = comments opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('aws:cloudfront/getOriginAccessIdentities:getOriginAccessIdentities', __args__, opts=opts, typ=GetOriginAccessIdentitiesResult).value return AwaitableGetOriginAccessIdentitiesResult( comments=pulumi.get(__ret__, 'comments'), iam_arns=pulumi.get(__ret__, 'iam_arns'), id=pulumi.get(__ret__, 'id'), ids=pulumi.get(__ret__, 'ids'), s3_canonical_user_ids=pulumi.get(__ret__, 's3_canonical_user_ids'))
[ 19, 1788, 1089, 7949 ]
def METHOD_NAME(self): i = iter_with_sleep() it = TimeoutIterator(i, timeout=0.5) iterResults = [] for x in it: iterResults.append(x) self.assertEqual(iterResults, [1, it.get_sentinel(), 2, 3])
[ 9, 3110, 659, 43, 1751 ]
def METHOD_NAME(self): dbovirtutils = database.OvirtUtils( plugin=self, dbenvkeys=oclcons.Const.CINDERLIB_DB_ENV_KEYS, ) dbovirtutils.detectCommands() config = configfile.ConfigFile([ oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG_DEFAULTS, oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG ]) if config.get('CINDERLIB_DB_PASSWORD'): try: dbenv = {} for e, k in ( (oclcons.CinderlibDBEnv.HOST, 'CINDERLIB_DB_HOST'), (oclcons.CinderlibDBEnv.PORT, 'CINDERLIB_DB_PORT'), (oclcons.CinderlibDBEnv.USER, 'CINDERLIB_DB_USER'), (oclcons.CinderlibDBEnv.PASSWORD, 'CINDERLIB_DB_PASSWORD'), (oclcons.CinderlibDBEnv.DATABASE, 'CINDERLIB_DB_DATABASE'), ): dbenv[e] = config.get(k) for e, k in ( (oclcons.CinderlibDBEnv.SECURED, 'CINDERLIB_DB_SECURED'), ( oclcons.CinderlibDBEnv.SECURED_HOST_VALIDATION, 'CINDERLIB_DB_SECURED_VALIDATION' ) ): dbenv[e] = config.getboolean(k) dbovirtutils.tryDatabaseConnect(dbenv) self.environment.update(dbenv) # current cinderlib engine-setup code leaves the database # empty after creation, so we can't rely on # dbovirtutils.isNewDatabase for checking this (because it # checks if there are tables in the public schema). # Always set to False if we managed to connect. TODO think # of something more robust. Perhaps create our own dummy # table to mark that it's 'populated', or save in postinstall # something saying that it's created. self.environment[ oclcons.CinderlibDBEnv.NEW_DATABASE ] = False self.environment[ oclcons.CinderlibDBEnv.NEED_DBMSUPGRADE ] = dbovirtutils.checkDBMSUpgrade() except RuntimeError: self.logger.debug( 'Existing credential use failed', exc_info=True, ) msg = _( 'Cannot connect to ovirt cinderlib ' 'database using existing ' 'credentials: {user}@{host}:{port}' ).format( host=dbenv[oclcons.CinderlibDBEnv.HOST], port=dbenv[oclcons.CinderlibDBEnv.PORT], database=dbenv[oclcons.CinderlibDBEnv.DATABASE], user=dbenv[oclcons.CinderlibDBEnv.USER], ) if self.environment[ osetupcons.CoreEnv.ACTION ] == osetupcons.Const.ACTION_REMOVE: self.logger.warning(msg) else: raise RuntimeError(msg)
[ 102 ]
def METHOD_NAME( self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor = None, position_encoding: torch.Tensor = None, attention_mask: torch.LongTensor = None, unique_name: List[str] = None, ): loss, hidden_states, prediction = self.predict( x, label, label_mask, position_encoding, attention_mask ).slice(3) logs = Logs() logs.add_hidden_state("hidden_states", hidden_states) logs.add_hidden_state("prediction", prediction) return Output( loss=loss, prediction=prediction, label=label, unique_name=unique_name, logs=logs, )
[ 786, 76 ]
def METHOD_NAME(env, test_name): with get_dut(env, test_name, 'test_stack_overflow') as dut: dut.expect_gme('Unhandled debug exception') dut.expect('Stack canary watchpoint triggered (main)') dut.expect_reg_dump(0) dut.expect_backtrace() dut.expect_elf_sha256() dut.expect_none('Guru Meditation') test_common(dut, test_name)
[ 1501, 1482, 921 ]
def METHOD_NAME(pil_image): gpu_image = ( transforms.Compose( [ transforms.Resize( (blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC, ), transforms.ToTensor(), transforms.Normalize( (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711), ), ] )(image) .unsqueeze(0) .to(device) ) with torch.no_grad(): caption = blip_model.generate( gpu_image, sample=False, num_beams=3, max_length=20, min_length=5 ) return caption[0]
[ 567, 5876 ]
def METHOD_NAME(net, testloader, steps: int = None, device: str = "cpu"): """Validate the network on the entire test set.""" print("Starting evalutation...") net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 net.eval() with torch.no_grad(): for batch_idx, (images, labels) in enumerate(testloader): images, labels = images.to(device), labels.to(device) outputs = net(images) loss += criterion(outputs, labels).item() _, predicted = torch.max(outputs.data, 1) correct += (predicted == labels).sum().item() if steps is not None and batch_idx == steps: break accuracy = correct / len(testloader.dataset) net.to("cpu") # move model back to CPU return loss, accuracy
[ 9 ]
def METHOD_NAME(i): node_dir = tempdir / f'node{i}' node_dir.mkdir(parents=True, exist_ok=True) return node_dir
[ 19, 1716, 1190 ]
def METHOD_NAME(targets, predictions): return {"pearson_corrcoef": 100 * pearsonr(targets, predictions)[0]}
[ 4752, 6804 ]
def METHOD_NAME(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): """A socket pair usable as a self-pipe, for Windows. Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. """ if family == socket.AF_INET: host = '127.0.0.1' elif family == socket.AF_INET6: host = '::1' else: raise ValueError("Only AF_INET and AF_INET6 socket address " "families are supported") if type != socket.SOCK_STREAM: raise ValueError("Only SOCK_STREAM socket type is supported") if proto != 0: raise ValueError("Only protocol zero is supported") # We create a connected TCP socket. Note the trick with setblocking(0) # that prevents us from having to create a thread. lsock = socket.socket(family, type, proto) try: lsock.bind((host, 0)) lsock.listen(1) # On IPv6, ignore flow_info and scope_id addr, port = lsock.getsockname()[:2] csock = socket.socket(family, type, proto) try: csock.setblocking(False) try: csock.connect((addr, port)) except (BlockingIOError, InterruptedError): pass csock.setblocking(True) ssock, _ = lsock.accept() except: csock.close() raise finally: lsock.close() return (ssock, csock)
[ -1 ]
def METHOD_NAME(): assert_('old_func3' in new_func3.__doc__) assert_('new_func3' in new_func3.__doc__)
[ 9, 5709, 667 ]
def METHOD_NAME(): x: Final[Any] = 3 # This should generate an error because x is Final. x += 1 # This should generate an error because x is Final. a = (x := 4) # This should generate an error because x is Final. for x in [1, 2, 3]: pass # This should generate an error because x is Final. with open("Hi") as x: pass try: pass # This should generate an error because x is Final. except ModuleNotFoundError as x: pass # This should generate an error because x is Final. (a, x) = (1, 2)
[ 7640 ]
def METHOD_NAME(self, payload={}): """Launch using related->launch endpoint.""" # get related->launch launch_pg = self.get_related('launch') # launch the workflow_job_template result = launch_pg.post(payload) # return job jobs_pg = self.related.workflow_jobs.get(id=result.workflow_job) if jobs_pg.count != 1: msg = "workflow_job_template launched (id:{}) but job not found in response at {}/workflow_jobs/".format(result.json['workflow_job'], self.url) raise exc.UnexpectedAWXState(msg) return jobs_pg.results[0]
[ 1440 ]
def METHOD_NAME( self, config_dict: ConfigDict, filename: str, ) -> None: self.search_for_deprecated_keyword_usages( config_dict=config_dict, filename=filename, ) self.search_for_unset_required_keywords( config_dict=config_dict, filename=filename )
[ 250, 984 ]
def METHOD_NAME(self): """Test _extract_pipelineparams.""" p1 = PipelineParam( name='param1', op_name='op1', param_type={'customized_type_a': { 'property_a': 'value_a' }}) p2 = PipelineParam(name='param2', param_type='customized_type_b') p3 = PipelineParam( name='param3', value='value3', param_type={'customized_type_c': { 'property_c': 'value_c' }}) stuff_chars = ' between ' payload = str(p1) + stuff_chars + str(p2) + stuff_chars + str(p3) params = _extract_pipelineparams(payload) self.assertListEqual([p1, p2, p3], params) # Expecting the _extract_pipelineparam to dedup the pipelineparams among all the payloads. payload = [ str(p1) + stuff_chars + str(p2), str(p2) + stuff_chars + str(p3) ] params = _extract_pipelineparams(payload) self.assertListEqual([p1, p2, p3], params)
[ 9, 297, -1, 41, 119 ]
def METHOD_NAME(self): for node in self.nodes: self.logs["master_logs" + node.account.hostname] = { "path": self.master_log_path(node), "collect_default": True } self.logs["worker_logs" + node.account.hostname] = { "path": self.slave_log_path(node), "collect_default": True }
[ 176, 1099, 309 ]
METHOD_NAME(self, character, pos=False, overwrite=False, ins=False):
[ 408, 3874 ]
def METHOD_NAME(): library_json = Path(__file__).parent / 'library.json' parser = argparse.ArgumentParser( description='ACRN Configurator third part library manager.' ) parser.add_argument('operation', choices=['check', 'install', 'clean']) parser.add_argument('-c', '--config', dest='config', default=library_json) args = parser.parse_args() library_info = json.load(open(args.config, encoding='utf-8')) manager(args.operation, library_info)
[ 57 ]
def METHOD_NAME(output): """Normalize DTrace output for comparison. DTrace keeps a per-CPU buffer, and when showing the fired probes, buffers are concatenated. So if the operating system moves our thread around, the straight result can be "non-causal". So we add timestamps to the probe firing, sort by that field, then strip it from the output""" # When compiling with '--with-pydebug', strip '[# refs]' debug output. output = re.sub(r"\[[0-9]+ refs\]", "", output) try: result = [ row.split("\t") for row in output.splitlines() if row and not row.startswith('#') ] result.sort(key=lambda row: int(row[0])) result = [row[1] for row in result] return "\n".join(result) except (IndexError, ValueError): raise AssertionError( "tracer produced unparseable output:\n{}".format(output) )
[ 1137, 2576, 146 ]
def METHOD_NAME(dataset, indices=None): if indices is None: indices = range(len(dataset)) aspect_ratios = [] for i in indices: img_info = dataset.coco.imgs[dataset.ids[i]] aspect_ratio = float(img_info["width"]) / float(img_info["height"]) aspect_ratios.append(aspect_ratio) return aspect_ratios
[ 226, 1864, 14334, 777, 126 ]
def METHOD_NAME(self, type: str) -> BaseApiResponseX: body = {"type": type} path = "/integrations/authenticate" headers = {"Authorization": "GenieKey " + self.integration_key} return self.post(path=path, headers=headers, data=body)
[ 4797, 1911 ]
def METHOD_NAME(msg0, msg1, t0, t1): cprlat0 = get_cprlat(msg0) cprlat1 = get_cprlat(msg1) cprlon0 = get_cprlon(msg0) cprlon1 = get_cprlon(msg1) return cpr2position(cprlat0, cprlat1, cprlon0, cprlon1, t0, t1)
[ 19, 195 ]
def METHOD_NAME(self, predictor_name, timeout=60): start = time.time() status = None while (time.time() - start) < timeout: resp = self.sql_via_http('show models', RESPONSE_TYPE.TABLE) name_index = [x.lower() for x in resp['column_names']].index('name') status_index = [x.lower() for x in resp['column_names']].index('status') for row in resp['data']: if row[name_index] == predictor_name: status = row[status_index] if status in ['complete', 'error']: break time.sleep(1) return status
[ 4096, 4476 ]
def METHOD_NAME(error): print(f'*WARNING* Examples Revamp not generated: \n\n{error}') exit()
[ 2869 ]
def METHOD_NAME(mod, attention_mechanism, attention_mode, concat, edge_dim): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) edge_type = torch.tensor([0, 2, 1, 2]) edge_attr = torch.randn((4, edge_dim)) if edge_dim else None conv1 = RGATConv( # `num_bases` is not None: in_channels=8, out_channels=16, num_relations=4, num_bases=4, mod=mod, attention_mechanism=attention_mechanism, attention_mode=attention_mode, heads=2, dim=1, concat=concat, edge_dim=edge_dim, ) conv2 = RGATConv( # `num_blocks` is not `None` in_channels=8, out_channels=16, num_relations=4, num_blocks=4, mod=mod, attention_mechanism=attention_mechanism, attention_mode=attention_mode, heads=2, dim=1, concat=concat, edge_dim=edge_dim, ) conv3 = RGATConv( # Both `num_bases` and `num_blocks` are `None`: in_channels=8, out_channels=16, num_relations=4, mod=mod, attention_mechanism=attention_mechanism, attention_mode=attention_mode, heads=2, dim=1, concat=concat, edge_dim=edge_dim, ) conv4 = RGATConv( # `dropout > 0` and `mod` is `None`: in_channels=8, out_channels=16, num_relations=4, mod=None, attention_mechanism=attention_mechanism, attention_mode=attention_mode, heads=2, dim=1, concat=concat, edge_dim=edge_dim, dropout=0.5, ) for conv in [conv1, conv2, conv3, conv4]: assert str(conv) == 'RGATConv(8, 16, heads=2)' out = conv(x, edge_index, edge_type, edge_attr) assert out.size() == (4, 16 * (2 if concat else 1)) out, (adj, alpha) = conv(x, edge_index, edge_type, edge_attr, return_attention_weights=True) assert out.size() == (4, 16 * (2 if concat else 1)) assert adj.size() == edge_index.size() assert alpha.size() == (4, 2)
[ 9, -1, 1306 ]
def METHOD_NAME(seasonnum: int, crop: Optional[int] = None) -> Response: nice_path = os.path.join(str(APP.static_folder), 'images', 'banners', f'{seasonnum}.png') if os.path.exists(nice_path): return send_file(os.path.abspath(nice_path)) cardnames, background = banner_cards(seasonnum) loop = asyncio.new_event_loop() path = loop.run_until_complete(image_fetcher.generate_banner(cardnames, background, crop)) return send_file(os.path.abspath(path))
[ 3726 ]
def METHOD_NAME(self): repo_directory = os.path.join(self.workdir, 'repo') os.mkdir(repo_directory) config = """ [main] something = 1 [external] baseurl = http://exmaple.com/test/ [fedora] baseurl = {} """.format(repo_directory) mounts = self.get_user_bind_mounts_from_config(config) assert len(mounts) == 1 assert mounts[0].srcpath == repo_directory assert mounts[0].bindpath.startswith(self.workdir) assert mounts[0].bindpath.endswith(repo_directory)
[ 9, 4653, 157, 156, 623, 15880 ]
def METHOD_NAME(self): out = ContextGenerator(SCHEMA, mergeimports=True).serialize() with open(CONTEXT_OUTPUT, "w") as stream: stream.write(out) expected = { "BFO": {"@id": "http://purl.obolibrary.org/obo/BFO_", "@prefix": True}, "CL": {"@id": "http://purl.obolibrary.org/obo/CL_", "@prefix": True}, "GO": {"@id": "http://purl.obolibrary.org/obo/GO_", "@prefix": True}, "PR": {"@id": "http://purl.obolibrary.org/obo/PR_", "@prefix": True}, "SIO": {"@id": "http://semanticscience.org/resource/SIO_", "@prefix": True}, "SO": {"@id": "http://purl.obolibrary.org/obo/SO_", "@prefix": True}, "biolink": "https://w3id.org/biolink/", "dbont": "http://dbpedia.org/ontology/", "dce": "http://purl.org/dc/elements/1.1/", "lego": "http://geneontology.org/lego/", "linkml": "https://w3id.org/linkml/", "owl": "http://www.w3.org/2002/07/owl#", "pav": "http://purl.org/pav/", "prefixtest": "https://w3id.org/linkml/tests/prefixtest/", "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", "sdo": "http://schema.org/", "wd": "https://www.wikidata.org/wiki/", "@vocab": "https://w3id.org/linkml/tests/prefixtest/", "additionalName": {"@id": "sdo:additionalName"}, "id": "@id", "label": {"@id": "rdfs:label"}, "part_of": {"@id": "BFO:0000050"}, "type": {"@id": "rdf:type"}, } with open(CONTEXT_OUTPUT) as stream: obj = json.load(stream)["@context"] fails = 0 for k, v in expected.items(): if k in obj: if v != obj[k]: if not ("@id" in v and "@id" in obj[k] and v["@id"] == obj[k]["@id"]): logging.error(f"{k} = {v} expected {expected[k]}") fails += 1 else: logging.error(f"Missing key: {k}") fails += 1 assert fails == 0 # unexpected - we don't want to import unused prefixes from the default_curi_map assert "FOODON" not in obj assert "OBI" not in obj assert "ENVO" not in obj
[ 9, -1 ]
def METHOD_NAME(cls): """Return True if Git is installed on the system""" if cls._git_available is None: try: cls.run_command('--version') cls._git_available = True except (GitError): cls._git_available = False return cls._git_available
[ 1623, 1272 ]
def METHOD_NAME(client): """ Tests getJP2Image API method. """ filepath = client.download_jp2('2012/01/01', observatory='SOHO', instrument='MDI', measurement='continuum') assert "2011_01_11__22_39_00_000__SOHO_MDI_MDI_continuum.jp2" in filepath os.remove(filepath)
[ 9, 136, 3346 ]
def METHOD_NAME(self, fxt_coco_dataset: Path): pbar_out = io.StringIO() pbar = make_pbar(file=pbar_out) project = self.client.projects.create_from_dataset( spec=models.ProjectWriteRequest(name="project with data"), dataset_path=fxt_coco_dataset, dataset_format="COCO 1.0", pbar=pbar, ) assert project.get_tasks()[0].size == 1 assert "100%" in pbar_out.getvalue().strip("\r").split("\r")[-1] assert self.stdout.getvalue() == ""
[ 9, 1046, 129, 155, 280, 126 ]
def METHOD_NAME(): return { "ads" : {"id", "updated_time"}, "adcreative" : {"id"}, "adsets" : {"id", "updated_time"}, "campaigns" : {"id"}, "ads_insights" : {"campaign_id", "adset_id", "ad_id", "date_start"}, "ads_insights_age_and_gender" : {"campaign_id", "adset_id", "ad_id", "date_start", "age", "gender"}, "ads_insights_country" : {"campaign_id", "adset_id", "ad_id", "date_start", "country"}, "ads_insights_platform_and_device": {"campaign_id", "adset_id", "ad_id", "date_start", "publisher_platform", "platform_position", "impression_device"}, "ads_insights_region" : {"campaign_id", "adset_id", "ad_id", "date_start"}, "ads_insights_dma" : {"campaign_id", "adset_id", "ad_id", "date_start"}, "ads_insights_hourly_advertiser": {"campaign_id", "adset_id", "ad_id", "date_start", "hourly_stats_aggregated_by_advertiser_time_zone"}, #"leads" : {"id"}, }
[ 391, 13223 ]
def METHOD_NAME(self, input_cubes: CubeList) -> Cube: """Check input cubes, then calculate and interpolate a snow fraction cube. Args: input_cubes: Contains cubes of rain and snow, both must be either rates or accumulations. Returns: Cube of snow-fraction. The data within this cube will contain values between 0 and 1. Points where no precipitation is present will be filled using a nearest-neighbour interpolation. The cube meta-data will contain: * Input_cube name "snow_fraction" * Cube units set to (1). Raises: ValueError: if input cubes fail any comparison tests. """ self._get_input_cubes(input_cubes) return self._calculate_snow_fraction()
[ 356 ]
def METHOD_NAME(gateway_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, route_config_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGatewayRouteConfigResult]: """ Get the Spring Cloud Gateway route configs. :param str gateway_name: The name of Spring Cloud Gateway. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str route_config_name: The name of the Spring Cloud Gateway route config. :param str service_name: The name of the Service resource. """ ...
[ 19, 14, 2476, 200, 146 ]
def METHOD_NAME(): version = None version_path = "/proc/driver/nvidia/version" if os.path.isfile(version_path): with open(version_path) as f: for line in f: if line.startswith("NVRM version: NVIDIA"): match = re.search(r"Kernel Module\s+([0-9\.]+)\s+", line) if match: version = match.group(1) break return version
[ 11375, 281 ]
def METHOD_NAME(self): return self.expected_streams()
[ 1196, 24, 9 ]
def METHOD_NAME(i): return struct.unpack("<I", struct.pack(">I", i))[0]
[ 967 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata related to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(array, _idx): for ii in range(len(array)): if not ii == _idx: if 'out_arr' not in dir(): out_arr = array[ii] else: out_arr = np.concatenate((out_arr, array[ii])) return out_arr
[ 877, 1501 ]
def METHOD_NAME(client): user = UserFactory(is_staff=True) totp_device = user.totpdevice_set.create() client.post( reverse("account_login"), {"login": user.username, "password": SUPER_SECURE_TEST_PASSWORD}, ) token = get_token_from_totp_device(totp_device) client.post(reverse_lazy("two-factor-authenticate"), {"otp_token": token}) assert len(mail.outbox) == 1 assert "Security Alert" in mail.outbox[0].subject assert "We noticed a new login to your account." in mail.outbox[0].body assert mail.outbox[0].to == [user.email] mail.outbox.clear() user2 = UserFactory() totp_device = user2.totpdevice_set.create() client.post( reverse("account_login"), {"login": user2.username, "password": SUPER_SECURE_TEST_PASSWORD}, ) token = get_token_from_totp_device(totp_device) client.post(reverse_lazy("two-factor-authenticate"), {"otp_token": token}) assert len(mail.outbox) == 0
[ 9, 487, 1887, 3948, 273, 43, 1045 ]
def METHOD_NAME(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n')
[ 24, 1226, 534, 4768 ]
def METHOD_NAME(pipe_handle, parent_pid=None): """Run code specified by data received over pipe.""" assert is_forking(sys.argv), "Not forking" if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid ) else: source_process = None new_handle = reduction.duplicate( pipe_handle, source_process=source_process ) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process with os.fdopen(fd, "rb", closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = load(from_parent) spawn.prepare(preparation_data, parent_sentinel) self = load(from_parent) finally: del process.current_process()._inheriting exitcode = self._bootstrap(parent_sentinel) sys.exit(exitcode)
[ 57 ]
def METHOD_NAME( self, ) -> Callable[ [operations_pb2.GetOperationRequest], Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
[ 19, 2206 ]
def METHOD_NAME( payload: models.RateRequest, settings: provider_utils.Settings, ) -> lib.Serializable: packages = lib.to_packages( payload.parcels, max_weight=units.Weight(99, "LB"), ) service = lib.to_services( payload.services, initializer=provider_units.shipping_services_initializer, ).first options = lib.to_shipping_options( payload.options, package_options=packages.options, initializer=provider_units.shipping_options_initializer, ) unit = provider_units.MeasurementUnit.map(packages.weight_unit).value request = nationex.RateRequestType( CustomerId=lib.to_int(settings.customer_id), ExpeditionDate=lib.fdate(options.shipment_date.state, "%Y-%m-%d"), ShipmentType=service.value, SourcePostalCode=payload.shipper.postal_code, DestinationPostalCode=payload.recipient.postal_code, TotalWeight=packages.weight.value, TotalParcels=len(packages), UnitsOfMeasurement=unit, Accessory=nationex.AccessoryType( InsuranceAmount=options.nationex_insurance_amount.state, FrozenProtection=options.nationex_frozen_protection.state, DangerousGoods=options.nationex_dangerous_goods.state, SNR=( options.nationex_snr.state if options.nationex_snr.state is not None else True ), ), Parcels=[ nationex.ParcelType( NCV=( True if ( package.length.IN > 36 or package.width.IN > 36 or package.height.IN > 36 or package.weight.LB > 70 ) else False ), Weight=package.weight.map(provider_units.MeasurementOptions).value, Dimensions=( nationex.DimensionsType( Length=package.length.value, Width=package.width.value, Height=package.height.value, Cubing=lib.to_decimal( (package.length.IN * package.width.IN * package.height.IN) / 1728 ), ) if any( [ package.length.value, package.width.value, package.height.value, ] ) else None ), ) for package in packages ], ) return lib.Serializable(request, lib.to_dict)
[ 1585, 377 ]
def METHOD_NAME(self): commits = self.commits.filter(category=self.category) total_commits = len(self.commits.commits) already_done = total_commits - len(commits) i = 0 while i < len(commits): cur_commit = commits[i] next_commit = commits[i + 1] if i + 1 < len(commits) else None jump_to = self.handle_commit( cur_commit, already_done + i + 1, total_commits, commits ) # Increment counter if jump_to is not None: i = jump_to elif next_commit is None: i = len(commits) else: i = commits.index(next_commit)
[ 8689 ]
def METHOD_NAME(self): """ mysql_database.present """ self._test_database( "testdb1", "testdb1", test_conn=True, character_set="utf8", collate="utf8_general_ci", connection_user=self.user, connection_pass=self.password, connection_charset="utf8", )
[ 9, 2541, 1447 ]
def METHOD_NAME(self): self.testDir = tempfile.mkdtemp() shutil.copy('/etc/hosts', self.testDir + '/INPUT') fileForTransfer = {'LFN': '/INPUT', \ 'PFN': '%s/etc/hosts' % self.testDir, \ 'PNN': None, \ 'StageOutCommand': None} wrapper = StageInMgr(**{ 'command': 'cp', 'option': '', 'phedex-node': 'test-win', 'lfn-prefix': self.testDir}) retval = wrapper(fileForTransfer) print("got the retval %s" % retval) wrapper = DeleteMgr(**{ 'command': 'cp', 'option': '', 'phedex-node': 'test-win', 'lfn-prefix': self.testDir}) wrapper(retval)
[ 9, 34, 11750, 291 ]
def METHOD_NAME(self, backend): root_page = Page.objects.get(id=1) page = root_page.add_child( instance=SimplePage(title="test", slug="test", content="test") ) # Convert page into a generic "Page" object and add it into the index unspecific_page = page.page_ptr backend().reset_mock() index.insert_or_update_object(unspecific_page) # It should be automatically converted back to the specific version backend().add.assert_called_with(page)
[ 9, 4323, 24, 3303, 1174 ]
def METHOD_NAME(request): """Get configurations from the module.""" return request.param
[ 19, 830 ]
def METHOD_NAME(rule_obj, platforms): oval_file, oval_contents = ssg.checks.get_oval_contents(rule_obj, 'shared') current_platforms = ssg.oval.applicable_platforms(oval_file) new_platforms = set(current_platforms) for platform in platforms: parsed_platform = platform.split('~') if not len(parsed_platform) == 2: print("Invalid platform replacement description: %s" % platform, file=sys.stderr) sys.exit(1) match = ssg.rule_yaml.parse_prodtype(parsed_platform[0]) replacement = ssg.rule_yaml.parse_prodtype(parsed_platform[1]) if match.issubset(current_platforms): new_platforms.difference_update(match) new_platforms.update(replacement) print("Current platforms: %s" % ','.join(sorted(current_platforms))) print("New platforms: %s" % ','.join(sorted(new_platforms))) new_contents = ssg.checks.set_applicable_platforms(oval_contents, new_platforms) ssg.utils.write_list_file(oval_file, new_contents)
[ 369, 2120 ]
def METHOD_NAME(self) -> "Optional[HeadersType]": headers = Headers() headers['Access-Control-Allow-Origin'] = request.environ.get('HTTP_ORIGIN') headers['Access-Control-Allow-Headers'] = request.environ.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS') headers['Access-Control-Allow-Methods'] = '*' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Expose-Headers'] = 'X-Rucio-Auth-Token' return headers
[ 19, 2131 ]
def METHOD_NAME(self) -> Sequence[SqlOperation]: return [ # local first operations.CreateTable( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_local_new", columns=columns, engine=table_engines.ReplacingMergeTree( order_by="(project_id, toStartOfDay(receive_timestamp), primary_hash, cityHash64(occurrence_id))", version_column="deleted", partition_by="(retention_days, toMonday(receive_timestamp))", sample_by="cityHash64(occurrence_id)", settings={"index_granularity": "8192"}, storage_set=StorageSetKey.SEARCH_ISSUES, ttl="receive_timestamp + toIntervalDay(retention_days)", ), target=OperationTarget.LOCAL, ), operations.AddColumn( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_local_new", column=Column( "_tags_hash_map", Array(UInt(64), Modifiers(materialized=TAGS_HASH_MAP_COLUMN)), ), after="tags.value", target=OperationTarget.LOCAL, ), operations.DropTable( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_local", target=OperationTarget.LOCAL, ), operations.RenameTable( storage_set=StorageSetKey.SEARCH_ISSUES, old_table_name="search_issues_local_new", new_table_name="search_issues_local", target=OperationTarget.LOCAL, ), # dist second operations.CreateTable( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_dist_new", columns=columns, engine=table_engines.Distributed( local_table_name="search_issues_local", sharding_key="cityHash64(occurrence_id)", ), target=OperationTarget.DISTRIBUTED, ), operations.AddColumn( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_dist_new", column=Column( "_tags_hash_map", Array(UInt(64), Modifiers(materialized=TAGS_HASH_MAP_COLUMN)), ), after="tags.value", target=OperationTarget.DISTRIBUTED, ), operations.DropTable( storage_set=StorageSetKey.SEARCH_ISSUES, table_name="search_issues_dist", target=OperationTarget.DISTRIBUTED, ), operations.RenameTable( storage_set=StorageSetKey.SEARCH_ISSUES, old_table_name="search_issues_dist_new", new_table_name="search_issues_dist", target=OperationTarget.DISTRIBUTED, ), ]
[ 2368, 829 ]
def METHOD_NAME(fpath): cols_names = [ "S_SUPPKEY", "S_NAME", "S_ADDRESS", "S_NATIONKEY", "S_PHONE", "S_ACCTBAL", "S_COMMENT", ] cols = { "S_SUPPKEY": np.int64, "S_NAME": str, "S_ADDRESS": str, "S_NATIONKEY": np.int64, "S_PHONE": str, "S_ACCTBAL": np.float64, "S_COMMENT": str, } rel = pd.read_csv(fpath, sep="|", header=None, names=cols_names, dtype=cols) return rel
[ 557, 4485 ]
def METHOD_NAME(desc): # First, remove page header/footer desc = header_footer_regex.sub("", desc) # Next, combine lines that are separated by a singular newline desc = re.sub(r"(?<!\n)\n(?!\n)", " ", desc, flags=re.MULTILINE) # Remove leftovers from diagrams p = r"^(?:(?:\b\w+?\b\s*?){1,2}|.)$\n{2}" desc = re.sub(p, "", desc, flags=re.MULTILINE) return desc
[ 356, 1067 ]
def METHOD_NAME(self, *args): pass
[ 0, 8858 ]
def METHOD_NAME(x, n): assert_equal(n % 3, 0) g = np.zeros([n]) # Note: the first line is typoed in some of the references; # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16 g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 return g
[ 474, 1842 ]
def METHOD_NAME(_root, info: ResolveInfo, **kwargs): qs = models.TaxClass.objects.all() qs = filter_connection_queryset(qs, kwargs) return create_connection_slice(qs, info, kwargs, TaxClassCountableConnection)
[ 1014, 6131, 393 ]
def METHOD_NAME(request): user = request.user query = request.GET.get('q', '') context = { 'user': request.user, 'user_is_teacher': True if Course.objects.filter(teachers=user).count() else False, 'query': query, 'user_profiles': search_users(query, user)[1], 'courses': search_courses(query, user)[1], } return render(request, 'search.html', context)
[ 1070, 1174 ]
f METHOD_NAME(sample_shape):
[ 49, 333 ]
def METHOD_NAME(bot: Text, account: int, metric_type: Text, **kwargs): """ Adds custom metrics for an end user. :param bot: bot id :param account: account id :param metric_type: metric_type """ metric = Metering(bot=bot, metric_type=metric_type, account=account) for key, value in kwargs.items(): setattr(metric, key, value) metric.save() return metric.id.__str__()
[ 238, 1097 ]
def METHOD_NAME(train, batch_size, num_epochs): """Reads input data num_epochs times. Args: train: Selects between the training (True) and validation (False) data. batch_size: Number of examples per returned batch. num_epochs: Number of times to read the input data, or 0/None to train forever. Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS] in the range [-0.5, 0.5]. * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, mnist.NUM_CLASSES). This function creates a one_shot_iterator, meaning that it will only iterate over the dataset once. On the other hand there is no special initialization required. """ if not num_epochs: num_epochs = None filename = os.path.join(FLAGS.train_dir, TRAIN_FILE if train else VALIDATION_FILE) with tf.name_scope("input"): # TFRecordDataset opens a binary file and reads one record at a time. # `filename` could also be a list of filenames, which will be read in order. dataset = tf.data.TFRecordDataset(filename) # The map transformation takes a function and applies it to every element # of the dataset. dataset = dataset.map(decode) dataset = dataset.map(augment) dataset = dataset.map(normalize) # The shuffle transformation uses a finite-sized buffer to shuffle elements # in memory. The parameter is the number of elements in the buffer. For # completely uniform shuffling, set the parameter to be the same as the # number of elements in the dataset. dataset = dataset.shuffle(1000 + 3 * batch_size) dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) return iterator.get_next()
[ 1461 ]
def METHOD_NAME(self, scripts_data): self.scripts = [] for script_data in scripts_data: script: ScriptPostprocessing = script_data.script_class() script.filename = script_data.path if script.name == "Simple Upscale": continue self.scripts.append(script)
[ 15, 2942 ]
def METHOD_NAME(nodes, edges): labels = len(nodes) node = nodes[randint(0, labels - 1)] query = create_node_pattern(node, "$map") params = {"map": create_node_map(node, randint(100, 1000))} return params, query
[ 129, 1716 ]
def METHOD_NAME(img): return img.max()
[ 232, 976, 99 ]
def METHOD_NAME( self, add_access, clean_up, detach_disk, destroy ): mommy.make( 'Snapshot', volume=self.export ) mommy.make( 'Snapshot', instance=self.instance, volume=self.export, purge_at=datetime.now() ) self.assertTrue(purge_unused_exports()) add_access.assert_not_called() clean_up.assert_not_called() detach_disk.assert_not_called() destroy.assert_not_called()
[ 9, 2286, 34, 3439, 41, 923, 394 ]
def METHOD_NAME(require, temp_dir): require({'privileged_user': True}) isolation = {'rootfs': temp_dir, 'automount': {'language_deps': False}} client.load('empty', isolation=isolation) python_path = f'{temp_dir}/usr' assert findmnt().find(python_path) == -1 assert client.get()['status'] != 200, 'disabled language_deps' assert findmnt().find(python_path) == -1 isolation['automount']['language_deps'] = True client.load('empty', isolation=isolation) assert findmnt().find(python_path) == -1 assert client.get()['status'] == 200, 'enabled language_deps' assert waitformount(python_path), 'language_deps mount' client.conf({"listeners": {}, "applications": {}}) assert waitforunmount(python_path), 'language_deps unmount'
[ 9, 440, 5167, 16054, 654, 2938, 2520 ]
def METHOD_NAME(self): "Test the notification toggling" url = reverse("toggle_notify") request = fake_request(url=url, data={}, user=self.user) response = views.toggle_notify(request=request) self.assertEqual(response.status_code, 302)
[ 9, 959 ]
def METHOD_NAME(self): group = self.create_random_name(prefix='group', length=24) server = self.create_random_name(prefix='server', length=24) with mock.patch('azext_db_up.custom._run_mysql_commands'): with mock.patch('mysql.connector.connect', side_effect=mysql.connector.errors.DatabaseError()): output = self.cmd('mysql up -g {} -s {}'.format(group, server)).get_output_in_json() user, server_name = output['username'].split('@') password, database = output['password'], 'sampledb' self.assertEqual(server, server_name) # test followup iterations of up self.cmd('mysql up', checks=[JMESPathCheck('password', '*****')]) self.cmd('mysql up -p {}'.format(password), checks=[JMESPathCheck('password', password)]) # check that db and server exist self.cmd('mysql db show -n {} -g {} -s {}'.format(database, group, server)) # remove all resources used by up self.cmd('mysql down -y --delete-group') # check group no longer exists with self.assertRaises(SystemExit) as ex: self.cmd('group show -n {}'.format(group)) self.assertEqual(ex.exception.code, 3) # check that show-connection-string matches previous output output_mirror = self.cmd('mysql show-connection-string -p {} -u {} -d {} -s {}'.format( password, user, database, server)).get_output_in_json() self.assertEqual(output, output_mirror)
[ 9, 4001, 233 ]
def METHOD_NAME(self, msg): import gtk window = gtk.Dialog(title="Error") window.connect("destroy", self.closeErrorWindow) window.set_modal(True) text = gtk.Label(msg) button = gtk.Button(stock=gtk.STOCK_CLOSE) button.connect("clicked", self.closeErrorWindow) window.vbox.add(text) window.vbox.add(button) window.show_all()
[ 168, 1092 ]
async def METHOD_NAME(client): with pytest.raises(OverflowError): deserialize_iso(await client.datetime.get_overflow())
[ 9, 19, 1482 ]
def METHOD_NAME(cls): return list(cls)[-1]
[ 13201 ]
def METHOD_NAME(): return run_deriche(dace.dtypes.DeviceType.FPGA)
[ 9, 6642 ]
def METHOD_NAME(self): """ Regression tests for the highstate outputter. Calls a basic state with various flags. Each comparison should be identical when successful. """ simple_ping_sls = """ simple-ping: module.run: - name: test.ping """ with temp_file( "simple-ping.sls", simple_ping_sls, RUNTIME_VARS.TMP_BASEENV_STATE_TREE ): # Test basic highstate output. No frills. expected = [ "minion:", " ID: simple-ping", " Function: module.run", " Name: test.ping", " Result: True", " Comment: Module function test.ping executed", " Changes: ", " ret:", " True", "Summary for minion", "Succeeded: 1 (changed=1)", "Failed: 0", "Total states run: 1", ] state_run = self.run_salt('"minion" state.sls simple-ping') for expected_item in expected: self.assertIn(expected_item, state_run) # Test highstate output while also passing --out=highstate. # This is a regression test for Issue #29796 state_run = self.run_salt('"minion" state.sls simple-ping --out=highstate') for expected_item in expected: self.assertIn(expected_item, state_run) # Test highstate output when passing --static and running a state function. # See Issue #44556. state_run = self.run_salt('"minion" state.sls simple-ping --static') for expected_item in expected: self.assertIn(expected_item, state_run) # Test highstate output when passing --static and --out=highstate. # See Issue #44556. state_run = self.run_salt( '"minion" state.sls simple-ping --static --out=highstate' ) for expected_item in expected: self.assertIn(expected_item, state_run)
[ 9, 146, 3583 ]
def METHOD_NAME(self): return self.env.get_vm(self.params["main_vm"])
[ 123, 57, 944 ]
def METHOD_NAME(tmp_dir, dvc, vars_): with pytest.raises(ResolveError) as exc_info: DataResolver(dvc, tmp_dir.fs_path, {"vars": [vars_, {"bar": "foo"}]}) assert ( str(exc_info.value) == "failed to parse 'vars' in 'dvc.yaml': interpolating is not allowed" )
[ 9, 1659, 4239, 1096 ]
async def METHOD_NAME(self, monkeypatch, pre_checkout_query): async def make_assertion(*_, **kwargs): return kwargs["pre_checkout_query_id"] == pre_checkout_query.id assert check_shortcut_signature( PreCheckoutQuery.answer, Bot.answer_pre_checkout_query, ["pre_checkout_query_id"], [] ) assert await check_shortcut_call( pre_checkout_query.answer, pre_checkout_query.get_bot(), "answer_pre_checkout_query", ) assert await check_defaults_handling( pre_checkout_query.answer, pre_checkout_query.get_bot() ) monkeypatch.setattr( pre_checkout_query.get_bot(), "answer_pre_checkout_query", make_assertion ) assert await pre_checkout_query.answer(ok=True)
[ 9, 3485 ]
def METHOD_NAME(cls): pass
[ 0, 1, 2 ]
def METHOD_NAME(cls, name, path, **kwargs): return cls(name=name, path=tuple(path))
[ 280, 763, 553 ]
def METHOD_NAME(objects, i): """Check if all items have no user data.""" assert objects.history.itemAt(i).userData() is None
[ 9, 654, 12697 ]
def METHOD_NAME( wav_scp, librimix_dir, map_mix2enroll, output_dir, num_spk=2, prefix="enroll_spk" ): # noqa E501: ported from https://github.com/BUTSpeechFIT/speakerbeam/blob/main/egs/libri2mix/local/create_enrollment_csv_fixed.py mixtures = [] with Path(wav_scp).open("r", encoding="utf-8") as f: for line in f: if not line.strip(): continue mixtureID = line.strip().split(maxsplit=1)[0] mixtures.append(mixtureID) utt2path = {} for audio in chain( Path(librimix_dir).rglob("s1/*.wav"), Path(librimix_dir).rglob("s2/*.wav"), Path(librimix_dir).rglob("s3/*.wav"), ): pdir = audio.parent.stem utt2path[pdir + "/" + audio.stem] = str(audio.resolve()) mix2enroll = {} with open(map_mix2enroll) as f: for line in f: mix_id, utt_id, enroll_id = line.strip().split() sid = mix_id.split("_").index(utt_id) + 1 mix2enroll[mix_id, f"s{sid}"] = enroll_id with DatadirWriter(Path(output_dir)) as writer: for mixtureID in mixtures: # 100-121669-0004_3180-138043-0053 for spk in range(num_spk): enroll_id = mix2enroll[mixtureID, f"s{spk + 1}"] writer[f"{prefix}{spk + 1}.scp"][mixtureID] = utt2path[enroll_id]
[ 123, 10091, 5700, 820 ]
async def METHOD_NAME( self, cred_ex_record: V20CredExRecord, cred_offer_message: V20CredOffer ) -> None: """Receive foramt specific credential offer message."""
[ 375, 6179 ]
def METHOD_NAME(repo: str, branch: str) -> bool: command = f"git rev-parse --verify {branch} &> /dev/null " retcode = subprocess.call(command, cwd=repo, shell=True) return (retcode == 0)
[ 250, 1493, 1985, 125, 3653 ]
def METHOD_NAME(x): return o3.spherical_harmonics(l + 1, x, False)
[ 474 ]
def METHOD_NAME(self): """Check if file is disabled. Whole file is only disabled if the first line contains one line disabler.""" if not self.lines: return False return self.lines[0] == (1, 1)
[ 171, 1295 ]
def METHOD_NAME(network, options): """Takes an iterable of the tokenized ethtool command line arguments and applies them to the network devices""" command = [ETHTOOL_BINARY.cmd] + options rc, _, err = hooking.execCmd(command) if rc != 0: raise EthtoolError('Failed to set ethtool opts (%s) for network %s. ' 'Err: %s' % (' '.join(options), network, err))
[ 0, 13499, 2766 ]
def METHOD_NAME(self): psnr_list, ssim_list, lpips_list = [], [], [] with torch.no_grad(): for (pred, label) in zip(self.preds, self.labels): # norm to 0-1 height, width = label.size(2), label.size(3) pred = pred[:, :, 0:height, 0:width] psnr_list.append(calculate_psnr(label, pred)) ssim_list.append(calculate_ssim(label, pred)) lpips_list.append( calculate_lpips(label, pred, self.loss_fn_alex)) return { MetricKeys.PSNR: np.mean(psnr_list), MetricKeys.SSIM: np.mean(ssim_list), MetricKeys.LPIPS: np.mean(lpips_list) }
[ 1195 ]
def METHOD_NAME(self) -> u.kg: """The normalization for mass.""" ...
[ 2858 ]
def METHOD_NAME(Handle: int, info, _type) -> None: ...
[ 0, 21, 279, 1691 ]
def METHOD_NAME(self): return self.__c_header_files
[ 19, 2629, 572, 1537 ]
def METHOD_NAME(X_input: np.ndarray, F: Callable, D: float, dim: int, N: int, lamada_: float = 1) -> float: """Calculate action by path integral by Wang's method. Quantifying the Waddington landscape and biological paths for development and differentiation. Jin Wang, Kun Zhang, Li Xu, and Erkang Wang, PNAS, 2011 Args: X_input: The initial guess of the least action path. Default is a straight line connecting the starting and end path. F: The reconstructed vector field function. This is assumed to be time-independent. D: The diffusion constant. Note that this can be a space-dependent matrix. dim: The feature numbers of the input data. N: Number of waypoints along the least action path. lamada_: Regularization parameter Returns: The action function calculated by the Hamilton-Jacobian method. """ X_input = X_input.reshape((int(dim), -1)) if len(X_input.shape) == 1 else X_input delta, delta_l = delta_delta_l(X_input) V_m = np.zeros((N, 1)) F_l = np.zeros((N, 1)) E_eff = np.zeros((N, 1)) for i in range(N - 1): F_m = F(X_input[:, i]).reshape((1, -1)) V_m[i] = V(F, D, X_input[:, i]) E_eff[i] = np.sum(F(X_input[:, i]) ** 2) / (4 * D) - V_m[i] F_l[i] = F_m.dot(delta[:, i]) / delta_l[i] P = np.sum((delta_l - np.linalg.norm(X_input[:, N - 1] - X_input[:, 0]) / N) ** 2) S_HJ = np.sum((np.sqrt((E_eff + V_m[:N]) / D) - 1 / (2 * D) * F_l) * delta_l) + lamada_ * P print(S_HJ) return S_HJ
[ 11991, 1006 ]
def METHOD_NAME(z, s_min, s_scale, s_size, s_offset, *args, **kwargs): """Used by hs.map in the ReducedIntensity1D to extrapolate the reduced intensity signal to zero below s_min. Parameters ---------- z : np.array A reduced intensity np.array to be transformed. s_min : float Value of s below which data is extrapolated to zero. scale : float The scattering vector calibation of the reduced intensity array. size : int The size of the reduced intensity signal. (in pixels) *args: Arguments to be passed to map(). **kwargs: Keyword arguments to be passed to map(). """ s_min_num = int((s_min - s_offset) / s_scale) s_min_val = z[s_min_num] extrapolated_vals = np.arange(s_min_num) * s_scale + s_offset extrapolated_vals *= s_min_val / extrapolated_vals[-1] # scale zero to one z[:s_min_num] = extrapolated_vals return z
[ 9786, 12419, 13175, 24, 313 ]
def METHOD_NAME(tmp_path): model = GroupNormModel() nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8]) nncf_config["compression"]["algorithm"] = "filter_pruning" nncf_config["compression"]["params"]["prune_first_conv"] = True nncf_config["compression"]["pruning_init"] = 0.5 onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path) check_bias_and_weight_shape("nncf_module.conv1", onnx_model_proto, [8, 1, 1, 1], [8]) check_bias_and_weight_shape("nncf_module.conv2", onnx_model_proto, [16, 8, 1, 1], [16])
[ 9, 2421, 294, 15571, 578 ]
def METHOD_NAME(app_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedCCFResult]: """ Retrieves the properties of a Managed CCF app. :param str app_name: Name of the Managed CCF :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
[ 19, 3627, 16876, 146 ]
def METHOD_NAME( info, what="Height", below=None, above=None, relative_mean=False, relative_median=False, relative_max=False, ): if below is None and above is None: return info keep = np.full(len(info["Peaks"]), True) if relative_max is True: what = info[what] / np.max(info[what]) elif relative_median is True: what = standardize(info[what], robust=True) elif relative_mean is True: what = standardize(info[what]) else: what = info[what] if below is not None: keep[what > below] = False if above is not None: keep[what < above] = False info = _signal_findpeaks_filter(info, keep) return info
[ 900, 10578, 2947 ]