text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(buff): logs = [] line, sep, buff = buff.partition('\n') while sep and line: logs.append(LogLine(line)) line, sep, buff = buff.partition('\n') if not sep: buff = line # we put back the last unterminated line in the buffer return logs, buff
[ 214, 390 ]
def METHOD_NAME(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results under two different modes: gpu and cpu modes. By setting ``gpu_collect=True``, it encodes results to gpu tensors and use gpu communication for results collection. On cpu mode it saves the results on different gpus to ``tmpdir`` and collects them by the rank 0 worker. Args: model (nn.Module): Model to be tested. data_loader (nn.Dataloader): Pytorch data loader. tmpdir (str): Path of directory to save the temporary results from different gpus under cpu mode. gpu_collect (bool): Option to use either gpu or cpu to collect results. Returns: list: The prediction results. """ model.eval() results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, **data) results.extend(result) if rank == 0: batch_size = len(result) batch_size_all = batch_size * world_size if batch_size_all + prog_bar.completed > len(dataset): batch_size_all = len(dataset) - prog_bar.completed for _ in range(batch_size_all): prog_bar.update() # collect results from all ranks if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results
[ 457, 1667, 9 ]
def METHOD_NAME(encoded_image): image = tf.image.decode_jpeg(encoded_image, channels=3) image = tf.image.convert_image_dtype(image, dtype=tf.float32) if self.do_eval: return self._PreprocessForEval(image) else: return self._PreprocessForTraining(image)
[ 1268, 61, 666, 206 ]
def METHOD_NAME(file: str) -> None: logger.debug(f"Mark [{file}] as executable") st = os.stat(file) os.chmod(file, st.st_mode | stat.S_IEXEC)
[ 1743, 2777 ]
def METHOD_NAME(self, dispatch_table): for ancestor_class in self._loky_pickler_cls.mro(): dt_attribute = getattr(ancestor_class, "dispatch_table", None) if isinstance(dt_attribute, types.MemberDescriptorType): # Ancestor class (typically _pickle.Pickler) has a # member_descriptor for its "dispatch_table" attribute. Use # it to set the dispatch_table as a member instead of a # dynamic attribute in the __dict__ of the instance, # otherwise it will not be taken into account by the C # implementation of the dump method if a subclass defines a # class-level dispatch_table attribute as was done in # cloudpickle 1.6.0: # https://github.com/joblib/loky/pull/260 dt_attribute.__set__(self, dispatch_table) break # On top of member descriptor set, also use setattr such that code # that directly access self.dispatch_table gets a consistent view # of the same table. self.dispatch_table = dispatch_table
[ 0, 2506, 410 ]
def METHOD_NAME(p1, p2, fractions): for fraction in fractions: p = p1.interpolate(p2, fraction) yield p
[ 1783, 157 ]
def METHOD_NAME(self, outgoing: flight.ClientAuthSender, incoming: flight.ClientAuthReader) -> None: """Authenticate with Dremio user credentials. """ basic_auth = flight.BasicAuth(self.username, self.password) outgoing.write(basic_auth.serialize()) self.token = incoming.read()
[ 1805 ]
def METHOD_NAME(self, conf: ConfigTree) -> None: self._conf = conf self.query = """query { workbooks { name projectName projectVizportalUrlId vizportalUrlId upstreamTables { name schema database { name connectionType } } } }""" self._extractor = self._build_extractor() transformers = [] dict_to_model_transformer = DictToModel() dict_to_model_transformer.METHOD_NAME( conf=Scoped.get_scoped_conf(self._conf, dict_to_model_transformer.get_scope()).with_fallback( ConfigFactory.from_dict( {MODEL_CLASS: 'databuilder.models.dashboard.dashboard_table.DashboardTable'}))) transformers.append(dict_to_model_transformer) self._transformer = ChainedTransformer(transformers=transformers)
[ 176 ]
def METHOD_NAME(parent_item: ItemWidgetType, parent_folder: autokey.model.folder.Folder): for folder in parent_folder.folders: item = WidgetItemFactory._build_item(parent_item, folder) WidgetItemFactory.METHOD_NAME(item, folder) for childModelItem in parent_folder.items: WidgetItemFactory._build_item(parent_item, childModelItem)
[ 356, 451 ]
def METHOD_NAME(self, request_email): # email notifications if request_email.recipients: site_display_name = get_setting('site', 'global', 'sitedisplayname') site_url = get_setting('site', 'global', 'siteurl') params = { 'SITE_GLOBAL_SITEDISPLAYNAME': site_display_name, 'SITE_GLOBAL_SITEURL': site_url, 'MODULE_DIRECTORIES_LABEL_PLURAL': get_setting('module', 'directories', 'label_plural'), 'directory': self.to_directory, 'from_directory': self.from_directory, 'message': request_email.message, 'first_name': request_email.sender.first_name, 'last_name': request_email.sender.last_name, 'affiliate_request': self.instance.affiliate_request, } # to to_directory owner params['reply_to'] = request_email.sender.email notification.METHOD_NAME(request_email.recipients, 'affiliate_requested_to_owner', params) # to submitter submitter_email = (request_email.sender.email).strip() params['reply_to'] = request_email.recipients[0] notification.METHOD_NAME([submitter_email], 'affiliate_requested_to_submitter', params)
[ 353, 5814 ]
def METHOD_NAME(self): self.closed = True self.data = ''
[ 1462 ]
def METHOD_NAME(self): # 2b. Making a request to views.NewRegistration with new user request country = Country.objects.get(name='country') # We started to use the email as the username for new registrations newusr = '[email protected]' body = { 'email': newusr, 'username': newusr, 'password': '87654321', 'country': country.pk, 'organizationType': 'OTHR', 'organization': 'Zoo', 'firstname': 'Peter', 'lastname': 'Falk', } headers = {'CONTENT_TYPE': 'application/json'} resp = self.client.post('/register', body, format='json', headers=headers) # json.loads(resp.content): 'status': 'ok' self.assertEqual(resp.status_code, 200) # 3b. Accessing the Pending users table to obtain the user\'s token pending_user = Pending.objects.get(user__username=newusr) # 4b. Using the user token and user username to query views.VerifyEmail body1 = { 'user': newusr, 'token': pending_user.token, } resp = self.client.get('/verify_email', body1, format='json', headers=headers) # resp.content: ...validated your email address and your IFRC Go account is now approved self.assertEqual(resp.status_code, 200) # 5b. Confirming that a user with an official email is activated boarded_user = User.objects.get(username=newusr) self.assertTrue(boarded_user.is_active)
[ 9, 13902, 487 ]
def METHOD_NAME(self): path = os.path.dirname( robjects.packages_utils.get_packagepath('stats') ) stats = robjects.packages.importr('stats', lib_loc=path, on_conflict='warn', suppress_messages=False) assert isinstance(stats, robjects.packages.Package)
[ 9, 512, 577, 41, 8377, 61, -1 ]
def METHOD_NAME(project): org = project.organization rv = {} exclude_fields_key = "sentry:safe_fields" rv["excludeFields"] = org.get_option(exclude_fields_key, []) + project.get_option( exclude_fields_key, [] ) if org.get_option("sentry:require_scrub_data", False) or project.get_option( "sentry:scrub_data", True ): rv["scrubData"] = True if org.get_option("sentry:require_scrub_ip_address", False) or project.get_option( "sentry:scrub_ip_address", False ): rv["scrubIpAddresses"] = True sensitive_fields_key = "sentry:sensitive_fields" rv["sensitiveFields"] = org.get_option(sensitive_fields_key, []) + project.get_option( sensitive_fields_key, [] ) rv["scrubDefaults"] = org.get_option( "sentry:require_scrub_defaults", False ) or project.get_option("sentry:scrub_defaults", True) return rv
[ 19, -1, 817 ]
def METHOD_NAME(self) -> Optional['outputs.CassandraKeyspaceGetPropertiesResponseResource']: return pulumi.get(self, "resource")
[ 191 ]
def METHOD_NAME(mock_secret_provider): with mock.patch( "paasta_tools.secret_providers.vault.get_secret_name_from_ref", autospec=True ) as mock_get_secret_name_from_ref, mock.patch( "paasta_tools.secret_providers.vault.get_plaintext", autospec=False ) as mock_get_plaintext: mock_get_plaintext.return_value = b"SECRETSQUIRREL" mock_env = { "MY_VAR": "SECRET(test-secret)", "ANOTHER_VAR": "SECRET(another-secret)", } mock_get_secret_name_from_ref.return_value = "secret_name" ret = mock_secret_provider.decrypt_environment( environment=mock_env, some="kwarg" ) mock_get_secret_name_from_ref.assert_has_calls( [mock.call("SECRET(test-secret)"), mock.call("SECRET(another-secret)")] ) expected = {"MY_VAR": "SECRETSQUIRREL", "ANOTHER_VAR": "SECRETSQUIRREL"} assert ret == expected
[ 9, 443, 1027 ]
def METHOD_NAME(crispin_client, account_id, message_id): """ Create an email on the remote backend. Generic providers expect us to create a copy of the message in the sent folder. """ remote_save_sent(crispin_client, account_id, message_id)
[ 73, 2876, 487 ]
def METHOD_NAME(self): self.requires("freetype/2.12.1") if Version(self.version) < "1.10": self.requires("lodepng/cci.20200615") else: self.requires("libpng/1.6.39") self.requires("tinyxml2/9.0.0")
[ 5186 ]
async def METHOD_NAME(bot: Bot) -> None: """Load the Egghead Quiz Cog.""" await bot.add_cog(EggheadQuiz())
[ 102 ]
def METHOD_NAME(self, dest, src): dest.data[:] += src.data
[ 411 ]
def METHOD_NAME(string: str) -> bool: return string.islower()
[ 137, 826 ]
def METHOD_NAME(self, resource_uri: str, **kwargs: Any) -> AsyncIterable["_models.MetricDefinition"]: """Lists the metric definitions for the resource. :param resource_uri: The identifier of the resource. Required. :type resource_uri: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either MetricDefinition or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.monitor.v2017_05_01_preview.models.MetricDefinition] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2017-05-01-preview")) cls: ClsType[_models.MetricDefinitionCollection] = kwargs.pop("cls", None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_request( resource_uri=resource_uri, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("MetricDefinitionCollection", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
[ 245 ]
def METHOD_NAME(project_dir, env_names, debug=False): # pylint: disable=import-outside-toplevel from platformio import app from platformio.run.cli import cli as cmd_run args = ["--project-dir", project_dir, "--target", "__idedata"] if debug: args.extend(["--target", "__debug"]) for name in env_names: args.extend(["-e", name]) app.set_session_var("pause_telemetry", True) result = CliRunner().invoke(cmd_run, args) app.set_session_var("pause_telemetry", False) if result.exit_code != 0 and not isinstance( result.exception, exception.ReturnErrorCode ): raise result.exception if '"includes":' not in result.output: raise exception.UserSideException(result.output) return _get_cached_build_metadata(project_dir, env_names)
[ 557, 56, 773 ]
def METHOD_NAME(self, obj: Project) -> bool: return ( self.context["migration_status"] == ProjectIdentityMigrationStatus.MIGRATION_COMPLETED.value )
[ 19, 1080, 540, 7949 ]
def METHOD_NAME( readme_content, expected_metadata_configs_dict, expected_default_config_name ): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(readme_content) dataset_card_data = DatasetCard.load(path).data metadata_configs_dict = MetadataConfigs.from_dataset_card_data(dataset_card_data) assert metadata_configs_dict == expected_metadata_configs_dict assert metadata_configs_dict.get_default_config_name() == expected_default_config_name
[ 9, 773, 736, 126, 5427, 365 ]
f METHOD_NAME(self):
[ 1224, 156 ]
def METHOD_NAME(self): # capture output to stdout self.mocked_stdout = NativeStringIO() self.patch(sys, "stdout", self.mocked_stdout) # generate OS specific relative path to buildbot.tac inside basedir self.tac_file_path = os.path.join("testdir", "buildbot.tac")
[ 0, 1 ]
def METHOD_NAME( target_host, agent_binary_downloaded, mssql_exploit_client ) -> Callable[[], Tuple[bool, bool]]: def _inner() -> Tuple[bool, bool]: return mssql_exploit_client.METHOD_NAME( target_host, MSSQLOptions(agent_binary_download_timeout=0.001), CREDENTIALS, DOWNLOAD_COMMAND, LAUNCH_COMMAND, agent_binary_downloaded, set(), ) return _inner
[ 4714, 1806 ]
def METHOD_NAME() -> List[str]: raise NotImplementedError()
[ 19, 3403, 4774 ]
def METHOD_NAME(self): not_my_password = factories.PasswordFactory(user=factories.UserFactory()) self.assertEqual(1, models.Password.objects.all().count()) request = self.client.delete("/api/passwords/%s/" % not_my_password.id) self.assertEqual(404, request.status_code) self.assertEqual(1, models.Password.objects.all().count())
[ 9, 3438, 34, 2395, 2897 ]
def METHOD_NAME( self, conn: sqlite3.Connection, isolation_level: Optional[int] ) -> None: # All transactions are SERIALIZABLE by default in sqlite pass
[ 3142, 24, 0, 5167, 33 ]
def METHOD_NAME(self, enc_min, enc_max, is_symmetric, is_strict): """ Test that the recomputed encoding within libpymo TensorQuantizer matches with the way encodings are recomputed in calculate_delta_offset and compute_min_max_given_delta_offset. """ tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF, libpymo.RoundingMode.ROUND_NEAREST) tensor_quantizer.isEncodingValid = True in_tensor = np.array([-100.0, 100.0]) out_tensor = np.zeros(in_tensor.shape).astype(np.float32) tensor_quantizer.quantizeDequantize(in_tensor, out_tensor, enc_min, enc_max, 3, False) delta, offset = calculate_delta_offset(enc_min, enc_max, 3, is_symmetric, is_strict) new_enc_min, new_enc_max = compute_min_max_given_delta_offset(delta, offset, 3, is_symmetric, is_strict) assert np.allclose(out_tensor[0], new_enc_min, atol=1e-5) assert np.allclose(out_tensor[1], new_enc_max, atol=1e-5)
[ 9, 2300, 49, 7111, 440, 2505, 7728 ]
def METHOD_NAME(source_git_config): assert source_git_config["upstream_project_url"] == "https://example.com/hello.git" assert source_git_config["upstream_ref"] == HELLO_RELEASE assert source_git_config["downstream_package_name"] == "hello" assert source_git_config["specfile_path"] == ".distro/hello.spec" assert source_git_config["patch_generation_ignore_paths"] == [DISTRO_DIR] assert source_git_config["sync_changelog"] is True assert source_git_config["files_to_sync"] == [ { "src": ".distro/", "dest": ".", "delete": True, "filters": [ "protect .git*", "protect sources", f"exclude {SRC_GIT_CONFIG}", "exclude .gitignore", ], } ] assert source_git_config["sources"][0]["path"] == f"hello-{HELLO_RELEASE}.tar.gz"
[ 250, 1458, 1493, 200 ]
def METHOD_NAME(self): """ This test suite tests exceeding cput. """ a = {'Resource_List.cput': 10} j = Job(TEST_USER, a) # we need at least two processes otherwise the kernel # would kill the process first test = [] test += ['#!/bin/bash'] test += ['dd if=/dev/zero of=/dev/null & \
[ 9, 13231, -1 ]
def METHOD_NAME(self) -> ReadOnlyCredentials: self._refresh() with self._lock: return ReadOnlyCredentials(self._access_key, self._secret_key, self._token)
[ 19, 2639, 3568 ]
def METHOD_NAME( tx_to_address: str, tx_data: bytes, tx_type: int, access_list: List[AccessList], call_beacon_root_contract: bool, ) -> Transaction: """ Prepares transaction to call the beacon root precompile caller account. """ to = BEACON_ROOT_CONTRACT_ADDRESS if call_beacon_root_contract else tx_to_address kwargs: Dict = { "ty": tx_type, "nonce": 0, "data": tx_data, "to": to, "value": 0, "gas_limit": 1000000, } if tx_type > 0: kwargs["access_list"] = access_list if tx_type <= 1: kwargs["gas_price"] = 7 else: kwargs["max_fee_per_gas"] = 7 kwargs["max_priority_fee_per_gas"] = 0 if tx_type == 3: kwargs["max_fee_per_blob_gas"] = 1 kwargs["blob_versioned_hashes"] = add_kzg_version([0], BLOB_COMMITMENT_VERSION_KZG) if tx_type > 3: raise Exception(f"Unexpected transaction type: '{tx_type}'. Test requires update.") return Transaction(**kwargs)
[ 2543 ]
def METHOD_NAME(self, statusList, detail=True): # FIXME: this method needs to be fixed accordingly if statusList == 'staged': specName = "ReRecoTest_v%sEmulator" % self.count specUrl =self.specGenerator.createReRecoSpec(specName, "file", self.splitter, assignKwargs={'SiteWhitelist': ['T2_XX_SiteA']}) self.names.append(specName) self.status[specName] = 'staged' #specName = "FakeProductionSpec_%s" % self.count #specUrl =self.specGenerator.createProductionSpec(specName, "file") #specName = "FakeProcessingSpec_%s" % self.count #specUrl =self.specGenerator.createProcessingSpec(specName, "file") self.count += 1 # returns list of list(rquest name, spec url) return [[specName, specUrl],] else: return [
[ 19, 377, 604, 452 ]
def METHOD_NAME(self) -> None: indexes = [ IndexModel([("sender_id", pymongo.ASCENDING), ("event.event", pymongo.ASCENDING)]), IndexModel([("type", pymongo.ASCENDING), ("timestamp", pymongo.ASCENDING)]), IndexModel([("sender_id", pymongo.ASCENDING), ("conversation_id", pymongo.ASCENDING)]), IndexModel([("event.event", pymongo.ASCENDING), ("event.timestamp", pymongo.DESCENDING)]), IndexModel([("event.name", pymongo.ASCENDING), ("event.timestamp", pymongo.DESCENDING)]), IndexModel([("event.timestamp", pymongo.DESCENDING)]) ] self.conversations.create_indexes(indexes)
[ 602, 1894 ]
def METHOD_NAME(self, batch): """Fits train batches""" preds = self.compute_forward(batch, sb.Stage.TRAIN) loss = self.compute_objectives(preds, batch, sb.Stage.TRAIN) loss.backward() if self.check_gradients(loss): self.optimizer.step() self.optimizer.zero_grad() return loss.detach()
[ 90, 2277 ]
def METHOD_NAME(self, preprocessor): """Check that an error is raised when the grade id is blank""" nb = self._read_nb(os.path.join("files", "blank-grade-id.ipynb"), validate=False) with pytest.raises(ValidationError): preprocessor.preprocess(nb, {})
[ 9, 2882, 5560, 147 ]
def METHOD_NAME(self): pred_proba, confidence = self.clf.predict_proba(self.X_test, method='linear', return_confidence=True) assert (pred_proba.min() >= 0) assert (pred_proba.max() <= 1) assert_equal(confidence.shape, self.y_test.shape) assert (confidence.min() >= 0) assert (confidence.max() <= 1)
[ 9, 2726, 2550, 1783, 2727 ]
def METHOD_NAME( self, blackboard_api_client, plugin, grouping_service, course ): blackboard_api_client.group_set_groups.side_effect = ExternalRequestError( response=Mock(status_code=500) ) with pytest.raises(ExternalRequestError): plugin.get_groups_for_instructor( grouping_service, course, sentinel.group_set_id )
[ 9, 19, 861, 43, 6755, 45 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(cert_byes, pk_bytes, password=None, encoding=Encoding.PEM): """Create an SSL Context with the supplied cert/password. :param cert_bytes array of bytes containing the cert encoded using the method supplied in the ``encoding`` parameter :param pk_bytes array of bytes containing the private key encoded using the method supplied in the ``encoding`` parameter :param password array of bytes containing the passphrase to be used with the supplied private key. None if unencrypted. Defaults to None. :param encoding ``cryptography.hazmat.primitives.serialization.Encoding`` details the encoding method used on the ``cert_bytes`` and ``pk_bytes`` parameters. Can be either PEM or DER. Defaults to PEM. """ backend = default_backend() cert = None key = None if encoding == Encoding.PEM: cert = x509.load_pem_x509_certificate(cert_byes, backend) key = load_pem_private_key(pk_bytes, password, backend) elif encoding == Encoding.DER: cert = x509.load_der_x509_certificate(cert_byes, backend) key = load_der_private_key(pk_bytes, password, backend) else: raise ValueError('Invalid encoding provided: Must be PEM or DER') if not (cert and key): raise ValueError('Cert and key could not be parsed from ' 'provided data') check_cert_dates(cert) ssl_context = PyOpenSSLContext(PROTOCOL) ssl_context._ctx.use_certificate(X509.from_cryptography(cert)) ssl_context._ctx.use_privatekey(PKey.from_cryptography_key(key)) return ssl_context
[ 129, 1247, 198 ]
def METHOD_NAME(f, surf, include_ga, row, guesses, g): """return the endpoints (L,R) contained in the frustum f; if only one voxel both endpoints will be the same; if none both will be None f: frustum object surf: surface voxels row: current row guesses: estimates for endpoints g: grid boundaries""" # +x or right endpoint Rend, Lend = None, None check_surf_L, check_surf_R = (None, None), (None, None) stop = False Ri = guesses[1] ogrverts = verts_in(f, (Ri, row[0], row[1]), surf, g) if ogrverts == 0: going_in = True elif 1 <= ogrverts < 8: going_in = False check_surf_R = (True, Ri) else: going_in = False while (0 <= Ri and (g["xlo"] + (Ri) * g["dx"]) < g["xhi"]) and not stop: verts = verts_in(f, (Ri, row[0], row[1]), surf, g) if verts == 0: if not going_in: stop = True continue else: if Ri == guesses[0]: # row is empty between guesses return (None, None) Ri -= 1 continue elif verts == 8: Rend = Ri Ri += 1 continue else: Rend = Ri if going_in: check_surf_R = (True, Ri) break Ri += 1 # the -x or left endpoint stop = False Li = guesses[0] oglverts = verts_in(f, (Li, row[0], row[1]), surf, g) if oglverts == 0: going_in = True elif 1 <= oglverts < 8: going_in = False check_surf_L = (True, Li) else: going_in = False while (0 <= Li and (g["xlo"] + (Li) * g["dx"]) < g["xhi"]) and not stop: verts = verts_in(f, (Li, row[0], row[1]), surf, g) if verts == 0: if not going_in: stop = True continue else: # it's not empty or would have already returned Li += 1 continue elif verts == 8: Lend = Li Li -= 1 continue else: Lend = Li if going_in: check_surf_L = (True, Li) break Li -= 1 # check for extra surface voxels missed if check_surf_R[0] and Lend is not None: r = check_surf_R[1] while r > Lend: verts = verts_in(f, (r, row[0], row[1]), surf, g) if verts == 8: break else: r -= 1 if check_surf_L[0] and Rend is not None: l = check_surf_L[1] while l < Rend: verts = verts_in(f, (l, row[0], row[1]), surf, g) if verts == 8: break else: l += 1 # if keeping non-surface but grid-adjacent voxels: if include_ga: surf.add((Lend, row[0], row[1])) surf.add((Rend, row[0], row[1])) return (Lend, Rend)
[ 416, 1197 ]
def METHOD_NAME(self, other: GEOSGeometry) -> GEOSGeometry: ...
[ 2845 ]
def METHOD_NAME(self, ignite_version): """ Test add, update and remove user """ config = IgniteConfiguration( cluster_state="INACTIVE", auth_enabled=True, version=IgniteVersion(ignite_version), data_storage=DataStorageConfiguration( default=DataRegionConfiguration(persistence_enabled=True)), client_connector_configuration=ClientConnectorConfiguration() ) servers = IgniteService(self.test_context, config=config, num_nodes=self.NUM_NODES - 1) servers.start() ControlUtility(cluster=servers, username=DEFAULT_AUTH_USERNAME, password=DEFAULT_AUTH_PASSWORD).activate() client_cfg = IgniteThinClientConfiguration( addresses=[servers.nodes[0].account.hostname + ":" + str(config.client_connector_configuration.port)], version=IgniteVersion(ignite_version), username=DEFAULT_AUTH_USERNAME, password=DEFAULT_AUTH_PASSWORD) # Add new user check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD, True) self.run_with_creds(client_cfg, ADD_USER, TEST_USERNAME, TEST_PASSWORD) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD) # Update user password check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2, True) self.run_with_creds(client_cfg, UPDATE_USER, TEST_USERNAME, TEST_PASSWORD2) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD, True) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2) # Remove user self.run_with_creds(client_cfg, REMOVE_USER, TEST_USERNAME, free=False) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2, True)
[ 9, 194, 3467 ]
def METHOD_NAME(exitcodes): """Format a list of exit code with names of the signals if possible""" str_exitcodes = [ f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None ] return "{" + ", ".join(str_exitcodes) + "}"
[ 275, 12251 ]
def METHOD_NAME(path): return normsep(os.path.METHOD_NAME(path))
[ 2680 ]
def METHOD_NAME(self): self.assertEqual( self.gf.namePattern, 'np', 'namePattern not initialised as np') self.assertEqual( self.gf.localDir, '', 'localDir not default initialised as None') g1 = GoogleFile() self.assertEqual( g1.namePattern, '', 'namePattern not default initialised as empty') self.assertEqual( g1.localDir, '', 'localDir not default initialised as None') g2 = GoogleFile(namePattern='np') self.assertEqual( g2.namePattern, 'np', 'namePattern not keyword initialised as np') self.assertEqual( g1.localDir, '', 'localDir not default initialised as None')
[ 9, 176 ]
def METHOD_NAME(self, username): ldap_user = LDAPUser(self, username=username) user = ldap_user.METHOD_NAME() return user
[ 3914, 21 ]
def METHOD_NAME(): boto_session = MagicMock("boto_session") resource_mock = Mock("resource") client_mock = MagicMock("client") boto_attrs = {"region_name": "us-east-1"} boto_session.configure_mock(**boto_attrs) boto_session.resource = Mock(name="resource", return_value=resource_mock) boto_session.client = Mock(name="client", return_value=client_mock) local_session = sagemaker.local.local_session.LocalSession( boto_session=boto_session, s3_endpoint_url=ENDPOINT_URL ) local_session.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME) return local_session
[ 9638, 240, 343, 841 ]
def METHOD_NAME(num_attempts: int, time_scale: float) -> datetime.timedelta: """Computes a delay to the next attempt to poll the Vertex service. This does bounded exponential backoff, starting with $time_scale. If $time_scale == 0, it starts with a small time interval, less than 1 second. Args: num_attempts: The number of times have we polled and found that the desired result was not yet available. time_scale: The shortest polling interval, in seconds, or zero. Zero is treated as a small interval, less than 1 second. Returns: A recommended delay interval, in seconds. """ # The polling schedule is slow initially , and then gets faster until 4 # attempts (after that the sleeping time remains the same). small_interval = 30.0 # Seconds interval = max(time_scale, small_interval) * 0.76 ** min(num_attempts, 4) return datetime.timedelta(seconds=interval)
[ 2510, 1344 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self) -> str: """ Contents of the Policy as defined by the format. """ return pulumi.get(self, "value")
[ 99 ]
def METHOD_NAME(): output = """ select cast(l.source_dataset as varchar) || '-__-' || cast(l.unique_id as varchar) as concat_id """ output = sqlglot.parse_one(output).sql() sql = "select l.source_dataset || '-__-' || l.unique_id as concat_id" transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar) assert transformed_sql == output sql = """ select cast(l.source_dataset as varchar) || '-__-' || l.unique_id as concat_id """ transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar) assert transformed_sql == output sql = """ select cast(l.source_dataset as varchar) || '-__-' || cast(l.unique_id as varchar) as concat_id """ transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar) assert transformed_sql == output sql = "select source_dataset || '-__-' || unique_id as concat_id" transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar) assert transformed_sql == output.replace("l.", "")
[ 9, 3723, 2008, 947, 16324 ]
f METHOD_NAME(cls, model):
[ 280, 578 ]
def METHOD_NAME(item): columns = [ { "fieldname": "variant_name", "label": _("Variant"), "fieldtype": "Link", "options": "Item", "width": 200, } ] item_doc = frappe.get_doc("Item", item) for entry in item_doc.attributes: columns.append( { "fieldname": frappe.scrub(entry.attribute), "label": entry.attribute, "fieldtype": "Data", "width": 100, } ) additional_columns = [ { "fieldname": "avg_buying_price_list_rate", "label": _("Avg. Buying Price List Rate"), "fieldtype": "Currency", "width": 150, }, { "fieldname": "avg_selling_price_list_rate", "label": _("Avg. Selling Price List Rate"), "fieldtype": "Currency", "width": 150, }, {"fieldname": "current_stock", "label": _("Current Stock"), "fieldtype": "Float", "width": 120}, {"fieldname": "in_production", "label": _("In Production"), "fieldtype": "Float", "width": 150}, { "fieldname": "open_orders", "label": _("Open Sales Orders"), "fieldtype": "Float", "width": 150, }, ] columns.extend(additional_columns) return columns
[ 19, 1951 ]
def METHOD_NAME(self): add_layer = GlobalRandomDroppingPoints(drop_rate=0.5) point_clouds = np.random.random(size=(1, 50, 2)).astype("float32") point_clouds = np.concatenate([point_clouds, point_clouds], axis=0) bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32") inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} outputs = add_layer(inputs) self.assertNotAllClose(inputs, outputs) # The augmented point clouds in the first frame should be the same as # the augmented point clouds in the second frame. self.assertAllClose(outputs[POINT_CLOUDS][0], outputs[POINT_CLOUDS][1])
[ 9, 3303, 3725, 1669, 3793, 61, 1538 ]
def METHOD_NAME(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.METHOD_NAME() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].METHOD_NAME()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
[ 24, 553 ]
def METHOD_NAME(tmp_path, create_model_config): """ Test calibration of a forecast using a rainforests approach where thresholds are specified with json file. """ rainforests_dir = acc.kgo_root() / "apply-rainforests-calibration" kgo_path = rainforests_dir / "basic" / "kgo.nc" forecast_path = ( rainforests_dir / "features" / "20200802T0000Z-PT0024H00M-precipitation_accumulation-PT24H.nc" ) feature_paths = (rainforests_dir / "features").glob("20200802T0000Z-PT00*-PT24H.nc") model_config = create_model_config output_path = tmp_path / "output.nc" json_path = rainforests_dir / "threshold_config" / "thresholds.json" args = [ forecast_path, *feature_paths, "--model-config", model_config, "--output-threshold-config", json_path, "--output", output_path, ] run_cli(args) acc.compare(output_path, kgo_path)
[ 9, 763, 853, 200 ]
def METHOD_NAME(helm_release: HelmRelease) -> FindingSeverity: if helm_release.info.status in ["deployed", "uninstalled"]: return FindingSeverity.INFO return FindingSeverity.HIGH
[ 19, 32 ]
def METHOD_NAME(amplitude, N, dt, model, full_output=False, **kwargs): """ returns the voltage trace of an emitter We implement only the time-domain solution and obtain the frequency spectrum via FFT (with the standard normalization of NuRadioMC). This approach assures that the units are interpreted correctly. In the time domain, the amplitudes are well defined and not details about fourier transform normalizations needs to be known by the user. Parameters ---------- amplitude : float strength of a pulse N : int number of samples in the time domain dt: float time bin width, i.e. the inverse of the sampling rate model: string specifies the signal model * delta_pulse: a simple signal model of a delta pulse emitter * cw : a sinusoidal wave of given frequency * square : a rectangular pulse of given amplituede and width * tone_burst : a short sine wave pulse of given frequency and desired width * idl1 & hvsp1 : these are the waveforms generated in KU lab and stored in hdf5 files * gaussian : represents a gaussian pulse where sigma is defined through the half width at half maximum * ARA02-calPulser : a new normalized voltage signal which depicts the original CalPulser shape used in ARA-02 full_output: bool (default False) if True, can return additional output Returns ------- time trace: 2d array, shape (3, N) the amplitudes for each time bin additional information: dict only available if `full_output` enabled """ half_width = kwargs.get("half_width") emitter_frequency = kwargs.get("emitter_frequency") trace = None additional_output = {} if(amplitude == 0): trace = np.zeros(3, N) if(model == 'delta_pulse'): # this takes delta signal as input voltage trace = np.zeros(N) trace[N // 2] = amplitude elif(model == 'cw'): # generates a sine wave of given frequency time = np.linspace(-(N / 2) * dt, ((N - 1) - N / 2) * dt, N) trace = amplitude * np.sin(2 * np.pi * emitter_frequency * time) elif(model == 'square' or model == 'tone_burst'): # generates a rectangular or tone_burst signal of given width and frequency if(half_width > int(N / 2)): raise NotImplementedError(" half_width {} should be < half of the number of samples N " . format(half_width)) time = np.linspace(-(N / 2) * dt, ((N - 1) - N / 2) * dt, N) voltage = np.zeros(N) for i in range(0, N): if time[i] >= -half_width and time[i] <= half_width: voltage[i] = amplitude if(model == 'square'): trace = voltage else: trace = voltage * np.sin(2 * np.pi * emitter_frequency * time) elif(model == 'gaussian'): # generates gaussian pulse where half_width represents the half width at half maximum time = np.linspace(-(N / 2) * dt, ((N - 1) - N / 2) * dt, N) sigma = half_width / (np.sqrt(2 * np.log(2))) trace = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-1 / 2 * ((time - 500) / sigma) ** 2) trace = amplitude * 1 / np.max(np.abs(trace)) * trace elif(model == 'idl1' or model == 'hvsp1' or model == 'ARA02_calPulser'): # the idl1 & hvsp1 waveforms gemerated in KU Lab stored in hdf5 file path = os.path.dirname(os.path.dirname(__file__)) if(model == 'idl1'): input_file = os.path.join(path, 'data/idl1_data.hdf5') elif(model == 'hvsp1'): input_file = os.path.join(path, 'data/hvsp1_data.hdf5') else: input_file = os.path.join(path, 'data/ARA02_Cal_data.hdf5') read_file = h5py.File(input_file, 'r') time_original = read_file.get('time') voltage_original = read_file.get('voltage') time_new = np.linspace(time_original[0], time_original[len(time_original) - 1], (int((time_original[len(time_original) - 1] - time_original[0]) / dt) + 1)) interpolation = interp1d(time_original, voltage_original, kind='cubic') voltage_new = interpolation(time_new) # if the interpolated waveform has larger sample size than N , it will truncate the data keeping peak amplitude at center if len(voltage_new) > N: peak_amplitude_index = np.where(np.abs(voltage_new) == np.max(np.abs(voltage_new)))[0][0] voltage_new = np.roll(voltage_new, int(len(voltage_new) / 2) - peak_amplitude_index) lower_index = int(len(voltage_new) / 2 - N / 2) trace = voltage_new[lower_index: lower_index + N] # this truncate data making trace lenght of N # for the case with larger N, trace size will be adjusted depending on whether the number (N + len(voltage_new)) is even or odd else: add_zeros = int((N - len(voltage_new)) / 2) adjustment = 0 if ((N + len(voltage_new)) % 2 != 0): adjustment = 1 trace = np.pad(voltage_new, (add_zeros + adjustment, add_zeros), 'constant', constant_values=(0, 0)) trace = amplitude * trace / np.max(np.abs(trace)) # trace now has dimension of amplitude given from event generation file peak_amplitude_index_new = np.where(np.abs(trace) == np.max(np.abs(trace)))[0][0] trace = np.roll(trace, int(N / 2) - peak_amplitude_index_new) # this rolls the array(trace) to keep peak amplitude at center else: raise NotImplementedError("model {} unknown".format(model)) if(full_output): return trace, additional_output else: return trace
[ 19, 104, 2576 ]
def METHOD_NAME(): """ Function to parse comman line arguments. """ parser = argparse.ArgumentParser(description='Create embedding database file.') parser.add_argument('--db', '-db-path', type=str, default='db', help='path for face images') parser.add_argument('--db-filename', type=str, default='embeddings', help='filename to store embeddings') parser.add_argument('--include-path', type=str, default='embeddings', help='path to include folder') args = parser.parse_args() return args
[ 214, 134 ]
def METHOD_NAME(self, reddit): """Make the Reddit instance read-only.""" # Require tests to explicitly disable read_only mode. reddit.METHOD_NAME = True
[ 203, 246 ]
def METHOD_NAME(): ds = sc.Dataset({'a': sc.data.table_xyz(10), 'b': sc.data.table_xyz(10) * 1.123}) ds['x', ds.coords['x'][0]] = ds['x', ds.coords['x'][1]] assert sc.identical(ds['a'][0].data, ds['a'][1].data) assert sc.identical(ds['b'][0].data, ds['b'][1].data)
[ 9, 5719, 126, 99, 1435, 55 ]
def METHOD_NAME(self): """Return time elapsed using the start time and current time.""" return self.current_time - self.start_time
[ 3229 ]
def METHOD_NAME(img, lower=0.5, upper=1.5): e = np.random.uniform(lower, upper) return ImageEnhance.Color(img).enhance(e)
[ 236, 36 ]
def METHOD_NAME(schema_obj): """ Test whether expected columns are present and the requirement level is applied correctly. This should be robust with respect to schema format. """ # mri.MRISpatialEncoding selected for having some level and description addenda rendered_table = tables.make_columns_table( schema_obj, "modality_agnostic.Participants", ).split("\n") assert rendered_table[0].startswith("| **Column name**") assert rendered_table[1].startswith("|----------------") fields = schema_obj.rules.tabular_data.modality_agnostic.Participants.columns assert len(rendered_table) == len(fields) + 3 # header + orientation + add. cols. row for field, render_row in zip(fields, rendered_table[2:-1]): assert render_row.startswith(f"| [{field}](") spec = fields[field] if isinstance(spec, str): level = spec level_addendum = "" description_addendum = "" else: level = spec["level"] level_addendum = spec.get("level_addendum", "").replace("required", "REQUIRED") description_addendum = spec.get("description_addendum", "") assert level.upper() in render_row assert level_addendum.split("\n")[0] in render_row assert description_addendum.split("\n")[0] in render_row
[ 9, 93, 1951, 410 ]
def METHOD_NAME(fortran_reader): ''' Tests for the _copy_full_base_reference utility method. ''' code = ("subroutine my_sub()\n" " use some_mod, only: my_type\n" " type(my_type) :: var, vars(3)\n" " var%region%subgrid(3)%data(:) = 1.0\n" " vars(1)%region%subgrid(3)%data(:) = 1.0\n" " vars(1)%region%subgrid(:)%data(:) = 1.0\n" " vars(:)%region%subgrid(3)%xstop = 1.0\n" "end subroutine my_sub\n") psyir = fortran_reader.psyir_from_source(code) assignments = psyir.walk(Assignment) # var%region%subgrid(3)%data(:) assign = assignments[0] arg = _copy_full_base_reference(assign.lhs.member.member.member) assert arg.member.member.member.name == "data" assert isinstance(arg.member.member.member, Member) assert not isinstance(arg.member.member.member, ArrayMember) # vars(1)%region%subgrid(3)%data(:) assign = assignments[1] arg = _copy_full_base_reference(assign.lhs.member.member.member) assert arg.member.member.member.name == "data" assert isinstance(arg.member.member.member, Member) assert not isinstance(arg.member.member.member, ArrayMember) # vars(1)%region%subgrid(:)%data(:) assign = assignments[2] # For the first colon arg = _copy_full_base_reference(assign.lhs.member.member) assert arg.member.member.name == "subgrid" assert isinstance(arg.member.member, Member) assert not isinstance(arg.member.member, (ArrayMember, StructureMember)) # For the second colon arg = _copy_full_base_reference(assign.lhs.member.member.member) assert arg.member.member.member.name == "data" assert isinstance(arg.member.member.member, Member) assert not isinstance(arg.member.member.member, ArrayMember) # vars(:)%region%subgrid(3)%xstop assign = assignments[3] arg = _copy_full_base_reference(assign.lhs) assert arg.symbol.name == "vars" assert isinstance(arg, Reference) assert not isinstance(arg, ArrayReference)
[ 9, 16982, 17693, 718 ]
def METHOD_NAME(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(): ret = { } # Magic to generate unique ids for s, v in checkMetadataTable.items(): if s == "None": continue splits = s.split("-") main_id = int(splits[0], 16) sub_id = 0 if len(splits) > 1: sub_id = splits[1] if sub_id.isnumeric(): sub_id = (int(sub_id) + 1) * 1000 else: sub_id = 1000 name = f"{v.name} ({v.area})" ret[name] = BASE_ID + main_id + sub_id return ret
[ 19, 1081, 24, 147 ]
def METHOD_NAME(data): return (len(data) == 2 and ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
[ 137, 9148, 637 ]
def METHOD_NAME(self): env = self.server.base_environ.copy() env['SERVER_PROTOCOL'] = self.request_version env['SERVER_SOFTWARE'] = self.server_version env['REQUEST_METHOD'] = self.command if '?' in self.path: path,query = self.path.split('?',1) else: path,query = self.path,'' env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1') env['QUERY_STRING'] = query host = self.address_string() if host != self.client_address[0]: env['REMOTE_HOST'] = host env['REMOTE_ADDR'] = self.client_address[0] if self.headers.get('content-type') is None: env['CONTENT_TYPE'] = self.headers.get_content_type() else: env['CONTENT_TYPE'] = self.headers['content-type'] length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length for k, v in self.headers.items(): k=k.replace('-','_').upper(); v=v.strip() if k in env: continue # skip content length, type,etc. if 'HTTP_'+k in env: env['HTTP_'+k] += ','+v # comma-separate multiple headers else: env['HTTP_'+k] = v return env
[ 19, 4686 ]
f METHOD_NAME(self, func_name, arg1, arg2):
[ 947, 808, 559 ]
def METHOD_NAME(): transformations = enumerate_transformations() for index, transformation in enumerate(transformations): print(f'{index} {transformation}') print(f'There are {len(transformations)} transformations.')
[ 29 ]
def METHOD_NAME(): """ Fetch the SENTRY_CONF value, either from the click context if available, or SENTRY_CONF environment variable. """ try: ctx = click.get_current_context() return ctx.obj["config"] except (RuntimeError, KeyError, TypeError): try: return os.environ["SENTRY_CONF"] except KeyError: return "~/.sentry"
[ 19, 1063, 2546 ]
def METHOD_NAME( contract_order: contractOrder, config: AlgoConfig ) -> contractOrder: contract_order.algo_to_use = config.market_algo return contract_order
[ 783, 686, 6080 ]
def METHOD_NAME(self): self.course_financial_mode.save() self.request.user = None assert not FinancialAssistanceTool().is_enabled(self.request, self.course.id)
[ 9, 3081, 130, 2999, 1646, 21, 130 ]
def METHOD_NAME(self): """string: the version of this running JBoss progress.""" return self._parsed.METHOD_NAME
[ 281 ]
f METHOD_NAME(self):
[ 9, 808, 493 ]
def METHOD_NAME( data, kernel, stride, padding, dilation, count_include_pad, oshape, odtype="float16" ): """avg_pool2d compute""" if odtype != "float16": raise RuntimeError(f"Unsupported output dtype '{odtype}'") kh, kw = kernel rh = te.reduce_axis((0, kh), name="rh") rw = te.reduce_axis((0, kw), name="rw") sh, sw = stride dh, dw = dilation dilated_kh = (kh - 1) * dh + 1 dilated_kw = (kw - 1) * dw + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( get_const_tuple(padding), (dilated_kh, dilated_kw) ) # DOPAD if pad_top != 0 or pad_down != 0 or pad_left != 0 or pad_right != 0: pad_before = (0, 0, pad_top, pad_left) pad_after = (0, 0, pad_down, pad_right) data_pad = pad(data, pad_before, pad_after, name="data_pad") else: # By definition when True, zero-padding will be included in the averaging calculation # This is equivalent to PoolArea = (kh * kw) count_include_pad = True data_pad = data Sum = te.compute( oshape, lambda b, c, h, w: te.sum( data_pad[b, c, h * sh + dh * rh, w * sw + dw * rw].astype("float32"), axis=[rh, rw] ), name="pool_sum", ) if not count_include_pad: # Compute PoolArea using unpadded input tensor _, _, oh, ow = oshape _, _, ih, iw = data.shape PoolArea = te.compute( (oh, ow), lambda i, j: compute_PoolArea(i, j, ih, iw, kh, kw, sh, sw, dh, dw, pad_top, pad_left), name="pool_area", ) InvArea = te.compute( (oh, ow), lambda i, j: tir.if_then_else( tir.all(PoolArea[i, j] > 0), (float(1) / PoolArea[i, j]), 0 ), name="inverse_area", ) Avg = te.compute( oshape, lambda b, c, h, w: (Sum[b, c, h, w] * InvArea[h, w]).astype(odtype), name="pool_avg", ) else: InvArea = float(1) / (kh * kw) Avg = te.compute( oshape, lambda b, c, h, w: (Sum[b, c, h, w] * InvArea).astype(odtype), name="pool_avg" ) return Avg
[ 1654, 6768, 5234 ]
def METHOD_NAME(filename): # Read file. with open (filename, 'rb') as f: lines = f.readlines() fcontents = ''.join(lines) # Build line break index. line_starts = [0] for line in lines: line_starts.append(line_starts[-1] + len(line)) #print line_starts # Search for all comments. pattern = re.compile( r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE) for match in re.finditer(pattern, fcontents): line_start = bisect.bisect(line_starts, match.start(0)) line_end = bisect.bisect(line_starts, match.end(0) - 1) column_start = match.start(0) - line_starts[line_start - 1] column_end = match.end(0) - line_starts[line_end - 1] yield (SourceLocation(filename, line_start, column_start + 1, match.start(0)), SourceLocation(filename, line_end, column_end + 1, match.end(0)), match.group(0))
[ 702, 3528 ]
def METHOD_NAME(plugin_name: str, local_plugin_dir: str): module_name = plugin_name.replace("-", "_") module = _load_compat_x_prefix(plugin_name, module_name, local_plugin_dir) if module is None: sys.path = [local_plugin_dir] + sys.path logger.debug( f"Loading plugin module {module_name!r} with sys.path {sys.path!r}" ) try: module = importlib.import_module(module_name) finally: sys.path.pop(0) return module
[ 557, 125 ]
def METHOD_NAME(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> None: usage = self._check_usage(usage) # remove from engine super(MetricsLambda, self).METHOD_NAME(engine, usage) self.engine = None
[ 646 ]
def METHOD_NAME(self, execute_task): task = execute_task('test_multi_torrent_is_root_dir') assert ( len(task.accepted) == 1 ), 'Should have accepted multi_file_with_diff because its size is within threshold' assert task.accepted[0]['path'] == 'torrent_match_test_dir'
[ 9, 457, 3564, 137, 1563, 1190 ]
def METHOD_NAME( product, attribute_value_generator ): """Ensure multiple values in proper order are assigned.""" old_assignment = product.attributes.first() assert old_assignment is not None, "The product doesn't have attribute-values" assert old_assignment.values.count() == 1 attribute = old_assignment.attribute attribute_value_generator( attribute=attribute, slug="attr-value2", ) values = attribute.values.all() # Assign new values new_assignment = associate_attribute_values_to_instance( product, attribute, values[1], values[0] ) # Ensure the new assignment was created and ordered correctly assert new_assignment.pk == old_assignment.pk assert new_assignment.values.count() == 2 assert list( new_assignment.productvalueassignment.values_list("value_id", "sort_order") ) == [(values[1].pk, 0), (values[0].pk, 1)]
[ 9, 6888, 309, 24, 1188, 89, 107 ]
def METHOD_NAME(cls, database_id: int) -> dict[str, Any]: database: Any = cls.find_by_id(database_id) datasets = database.tables dataset_ids = [dataset.id for dataset in datasets] charts = ( db.session.query(Slice) .filter( Slice.datasource_id.in_(dataset_ids), Slice.datasource_type == DatasourceType.TABLE, ) .all() ) chart_ids = [chart.id for chart in charts] dashboards = ( ( db.session.query(Dashboard) .join(Dashboard.slices) .filter(Slice.id.in_(chart_ids)) ) .distinct() .all() ) sqllab_tab_states = ( db.session.query(TabState).filter(TabState.database_id == database_id).all() ) return { "charts": charts, "dashboards": dashboards, "sqllab_tab_states": sqllab_tab_states, }
[ 19, 252, 635 ]
def METHOD_NAME(self): self.assertEqual( Article._get_objects_for_user(self.superuser).count(), Article.objects.count() ) self.assertEqual( LongArticle._get_objects_for_user(self.superuser).count(), LongArticle.objects.count() )
[ 9, 19, 279, 43, 5733 ]
def METHOD_NAME( mocked_notify, site_settings, customer_user, channel_PLN ): old_email = "[email protected]" notifications.send_user_change_email_notification( old_email, customer_user, get_plugins_manager(), channel_slug=channel_PLN.slug ) expected_payload = { "user": get_default_user_payload(customer_user), "recipient_email": old_email, "channel_slug": channel_PLN.slug, "old_email": old_email, "new_email": customer_user.email, **get_site_context_payload(site_settings.site), } mocked_notify.assert_called_once_with( UserNotifyEvent.ACCOUNT_CHANGE_EMAIL_CONFIRM, payload=expected_payload, channel_slug=channel_PLN.slug, )
[ 9, 353, 487, 1180, 857 ]
def METHOD_NAME(session, generator, eval_names, eval_func, iterations): """ Evaluates the graph's performance by running data through the network and calling an evaluation function to generate the performance metric. :param session: The tensorflow session that contains the graph :param generator: The data generator providing the network with batch data :param eval_names: The names providing the nodes on which the network's performance should be judged :param eval_func: The customized function to evaluate the performance of the network :param iterations: The number of iterations (batches) to run through the network :return: """ # Ensure any uninitialized variables are initialized initialize_uninitialized_vars(session) # Get the first batch and ue it to create the tensor map t_map = _create_map_of_input_tensors(generator, session) eval_outputs = [] for name in eval_names: op = session.graph.get_operation_by_name(name) eval_outputs.append(op.outputs[0]) # Run the graph and verify the data is being updated properly for each iteration avg_metric = 0 log.info("Evaluating graph for %i iterations", iterations) for _, batch in zip(range(iterations), generator): # Setup the feed dictionary feed_dict = {} for name, data in batch.items(): feed_dict[t_map[name]] = data output_data = session.run(eval_outputs, feed_dict=feed_dict) avg_metric += eval_func(list(zip(eval_names, output_data))) log.info("Completed graph evaluation for %i iterations", iterations) return avg_metric / iterations
[ 1195, 303 ]
def METHOD_NAME(self, project): # test command vars_dict = { "test_run_schema": project.test_schema, } run_dbt(["seed", "--vars", yaml.safe_dump(vars_dict)]) results = run_dbt(["run", "--vars", yaml.safe_dump(vars_dict)]) assert len(results) == 4
[ 9, 9999, 450 ]
def METHOD_NAME(*args): return numpy.METHOD_NAME(datetime.datetime(*args))
[ 10332 ]
def METHOD_NAME(n_dim, order): assert n_dim > 0 assert order > 1 x = [[0.0, 1.0]] * (n_dim - 1) x.append(list(np.arange(0, order) / (order - 1.0))) x_data = _quadrature_combine(x) return x_data
[ 129, 4431, 753, 246, 5112 ]
def METHOD_NAME(val: float) -> str: """writes a Nastran formatted 11.4 float""" v2 = '%11.4E' % val if v2 in (' 0.0000E+00', '-0.0000E+00'): v2 = ' 0.0' return v2
[ 77, 1819, -1 ]
def METHOD_NAME(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self): """ Verifies that going to the courseware with a required, but non-existing survey, does not redirect """ resp = self.client.get( reverse( 'course_survey', kwargs={'course_id': str(self.course_without_survey.id)} ) ) self.assertRedirects( resp, course_home_url(self.course_without_survey.id), fetch_redirect_response=False, )
[ 9, 7528, 1281, 41, 654, 1122, 1281 ]
def METHOD_NAME(prefix): return prefix[:8] == olefile.MAGIC
[ 1437 ]
f METHOD_NAME(self, num_elements, tag1="record_latency", tag2="record_latency_2"):
[ 56, 126, 107, 114 ]
def METHOD_NAME(self, tmpdir): with setuptools.sandbox.DirectorySandbox(str(tmpdir)): self._file_writer(os.devnull)
[ 9, -1 ]