text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(*args, **kwargs): return True
[ 1051, 250 ]
def METHOD_NAME(self, artist): raise NotImplementedError
[ 19, 51 ]
def METHOD_NAME(exception): """Filter allowing retries on file I/O errors and service error.""" return isinstance(exception, IOError) or \ (HttpError is not None and isinstance(exception, HttpError))
[ 2052, 69, 249, 168, 61, 163, 168 ]
def METHOD_NAME( func: PrimFunc, features: Dict[str, np.ndarray], target: Target, dev: Device, remote: Optional[RPCSession], ) -> Tuple[float, float, str]: """ Estimate the maximum number of FLOP/s this target/device combo is capable of reaching by running a test program. This is a generic function that should be overridden for each target. Parameters ---------- func : PrimFunc Function to estimate peak flops for. Used to check if a specific kind intrinsic or dtype could be used with this function. features : Dict[str, np.ndarry] Features extracted from `func`. Used to check if a specific kind intrinsic or dtype could be used with this function. target : Target Target to run on. This should be as specific to the actual hardware as possible to make sure that LLVM generates the best vector code. dev : Device Device to run on. remote : Optional[RPCSession] Remote session used to upload artifacts for runtime evaluation. Must be the same session used to create `dev`. Returns ------- flops : float Estimated number of flops used by `func`. peak_flops : float Approximate sustained FLOP/s of this target/device combo assuming vectorized FMA instructions. Each FMA operation counts as two FLOPs. name : str Dtype/intrinsic used by `func` to achieve peak flops. """ raise NotImplementedError()
[ 918, 4626, 230 ]
def METHOD_NAME(self, x): self.__buf.write(struct.pack('>L', x))
[ 1699, 11068 ]
def METHOD_NAME(text): """Convert a string containing an SI value to an integer or return an integer if that is what was passed in.""" if isinstance(text, int): return text if not isinstance(text, str): raise ValueError("Source value must be string or integer") matches = si_regex.search(text.lower(), 0) if matches is None: raise ValueError("Invalid SI value '" + text + "'") number = int(matches.group(1)) if matches.group(2) is None \ else float(matches.group(1)) unit = matches.group(3) multiplier = 1 if unit is None else si_multipliers.get(unit.lower(), '') return number * multiplier
[ 4844, 947, 106 ]
def METHOD_NAME(): org1 = Organization.objects.create(name='org1') org2 = Organization.objects.create(name='org2') prj1 = Project.objects.create(name='prj1') prj2 = Project.objects.create(name='prj2') assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False prj1.organization = org1 prj1.save() assert org1.admin_role.is_ancestor_of(prj1.admin_role) assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False prj2.organization = org1 prj2.save() assert org1.admin_role.is_ancestor_of(prj1.admin_role) assert org1.admin_role.is_ancestor_of(prj2.admin_role) assert org2.admin_role.is_ancestor_of(prj1.admin_role) is False assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False prj1.organization = org2 prj1.save() assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False assert org1.admin_role.is_ancestor_of(prj2.admin_role) assert org2.admin_role.is_ancestor_of(prj1.admin_role) assert org2.admin_role.is_ancestor_of(prj2.admin_role) is False prj2.organization = org2 prj2.save() assert org1.admin_role.is_ancestor_of(prj1.admin_role) is False assert org1.admin_role.is_ancestor_of(prj2.admin_role) is False assert org2.admin_role.is_ancestor_of(prj1.admin_role) assert org2.admin_role.is_ancestor_of(prj2.admin_role)
[ 9, 803, 6003 ]
def METHOD_NAME(self): batch = self.state.batch self.assertIsNotNone(batch) self.state.write_batch(batch) self.assertEqual(self.state.total_coin_supply, 0)
[ 9, 756, 551, 3168 ]
def METHOD_NAME() -> Dict[str, int]: from .constants import is_macos from .fast_data_types import ( GLFW_MOD_ALT, GLFW_MOD_CAPS_LOCK, GLFW_MOD_CONTROL, GLFW_MOD_HYPER, GLFW_MOD_META, GLFW_MOD_NUM_LOCK, GLFW_MOD_SHIFT, GLFW_MOD_SUPER, ) return {'ctrl': GLFW_MOD_CONTROL, 'shift': GLFW_MOD_SHIFT, ('opt' if is_macos else 'alt'): GLFW_MOD_ALT, ('cmd' if is_macos else 'super'): GLFW_MOD_SUPER, 'hyper': GLFW_MOD_HYPER, 'meta': GLFW_MOD_META, 'caps_lock': GLFW_MOD_CAPS_LOCK, 'num_lock': GLFW_MOD_NUM_LOCK}
[ -1 ]
def METHOD_NAME(self): """Function decorated with `deprecate` should raise a warning.""" @deprecate def foo(a): return 2*a @deprecate("use baz instead", version="0.2.0") def bar(a): return 4*a with warnings.catch_warnings(record=True) as w: self.assertEqual(foo(1), 2, "Decorated function does not return original " "return value") self.assertTrue(len(w) > 0, "No warning raised!") self.assertEqual(w[0].category, DeprecationWarning, "Raised warning is not a DeprecationWarning") with warnings.catch_warnings(record=True) as w: self.assertEqual(bar(1), 4, "Decorated function does not return original " "return value") expected_message = "use baz instead. It is not guaranteed to be in " \ "service in vers. 0.2.0" self.assertTrue( w[0].message.args[0].endswith(expected_message), "Warning message does not reflect decorator arguments.") @deprecate_soon def baz(a): return 3*a with warnings.catch_warnings(record=True) as w: self.assertEqual(baz(1), 3, "Decorated function does not return original " "return value") self.assertEqual(w[0].category, PendingDeprecationWarning, "Raised warning is not a PendingDeprecationWarning")
[ 9, 5709 ]
def METHOD_NAME(_context, from_manager): return from_manager
[ 362, 280, 362, 722 ]
def METHOD_NAME(defer, worker): from .param import Param defer("sum_task", a=5, b=7) defer("sum_task_param", p1=Param(3), p2=Param(4)) defer("increment_task", a=3) stdout, stderr = worker() print(stdout, stderr) assert stdout.splitlines() == ["Launching a worker on all queues", "12", "7", "4"] assert stderr.startswith("DEBUG:procrastinate.") defer("product_task", a=5, b=4) stdout, stderr = worker("default") print(stdout, stderr) assert "20" not in stdout stdout, stderr = worker("product_queue") print(stdout, stderr) assert stdout.splitlines() == ["Launching a worker on product_queue", "20"] defer("two_fails") stdout, stderr = worker() print(stdout, stderr) assert "Print something to stdout" in stdout assert stderr.count("Exception: This should fail") == 2 defer("multiple_exception_failures") stdout, stderr = worker() print(stdout, stderr) assert ( stdout == """Launching a worker on all queues
[ 9, 4203 ]
def METHOD_NAME(computation: BaseComputation, gas_per_byte: int) -> None: """ Exponentiation """ base, exponent = computation.stack_pop_ints(2) bit_size = exponent.bit_length() byte_size = ceil8(bit_size) // 8 if exponent == 0: result = 1 elif base == 0: result = 0 else: result = pow(base, exponent, constants.UINT_256_CEILING) computation.consume_gas( gas_per_byte * byte_size, reason="EXP: exponent bytes", ) computation.stack_push_int(result)
[ 2962 ]
def METHOD_NAME(self) -> Graph: assert self._graph is not None, "Please trace the graph by calling forward function of current tracer." return self._graph
[ 303 ]
def METHOD_NAME(rhr: models.RentalHistoryRequest) -> str: rh_link = slack.hyperlink( text="rent history", href=absolute_reverse("admin:rh_rentalhistoryrequest_change", args=[rhr.pk]), ) if rhr.user: user_text = slack.hyperlink(text=rhr.user.best_first_name, href=rhr.user.admin_url) else: user_text = slack.escape(rhr.first_name) return f"{user_text} has requested {rh_link}!"
[ 19, 729, 959, 526 ]
def METHOD_NAME(self) -> str: """ Specifies the name of the resource. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self, phrase): return phrase is None or not invalid_phrase_re.search(phrase)
[ 1205, 4576 ]
def METHOD_NAME(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum())
[ 9, 6603 ]
async def METHOD_NAME(coresys, sys_machine, sys_supervisor): """Test arch for tinker.""" sys_machine.return_value = "tinker" sys_supervisor.arch = "armv7" await coresys.arch.load() assert coresys.arch.default == "armv7" assert coresys.arch.supported == ["armv7", "armhf"]
[ 9, -1, 2837 ]
def METHOD_NAME(self): cli = Cli() assert cli.get_servicename() == 'system'
[ 9, 19, 11671, 112 ]
def METHOD_NAME(self): self.fifo.write(b"ABCD") self.fifo.write(b"EF") self.assertEqual(self.fifo.read(1), b"A") self.assertEqual(self.fifo.read(1), b"B") self.assertEqual(self.fifo.read(2), b"CD") self.assertEqual(self.fifo.read(), b"EF")
[ 9, 619 ]
def METHOD_NAME(self, query: str, documents: List[Document], top_k: Optional[int] = None): pass
[ 2103 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_by_sku_request( sku=sku, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list_by_sku.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self): self.assertEqual(moose.element(self.mcell).className, 'Neuron') self.assertEqual(moose.element(self.mcell).name, self.ncell.name)
[ 9, 129, 118, 8298 ]
def METHOD_NAME(): """ get a list of graphids that have html export templates available. """ valid_graphs = [] existing_graphids = GraphModel.objects.filter(isresource=True).values_list("graphid", flat=True) for existing_graphid in existing_graphids: if HtmlWriter.has_html_template(existing_graphid): valid_graphs.append(str(existing_graphid)) return valid_graphs
[ 19, -1, 41, 294, 671 ]
def METHOD_NAME(epoch): """ eval model on each epoch in testset """ global best_acc global trainloader global testloader global net global criterion global optimizer logger.debug("Eval on epoch: %d", epoch) net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() acc = 100.0 * correct / total logger.debug( "Loss: %.3f | Acc: %.3f%% (%d/%d)", test_loss / (batch_idx + 1), 100.0 * correct / total, correct, total, ) acc = 100.0 * correct / total if acc > best_acc: best_acc = acc return acc, best_acc
[ 9 ]
def METHOD_NAME(cfg, outs): """Schedule for binary_dense. Parameters ---------- outs: Array of Tensor The computation graph description of bitserial dense operator. in the format of an array of tensors. Returns ------- s: Schedule The computation schedule for bitserial_dense. """ outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) def _schedule(cfg, s, data_vec, weight_vec, output, unipolar): z, k, _, y, x = s[weight_vec].op.axis s[weight_vec].parallel(z) s[weight_vec].vectorize(x) x, y = s[output].op.axis wb, db, k = s[output].op.reduce_axis _, DB, _ = get_const_tuple(data_vec.shape) _, _, WB, _, _ = get_const_tuple(weight_vec.shape) yo, yi = cfg["tile_y"].apply(s, output, y) xo, xi = cfg["tile_x"].apply(s, output, x) ko, ki = cfg["tile_k"].apply(s, output, k) cfg["reorder_0"].apply(s, output, [yo, xo, ko, xi, wb, db, yi, ki]) fused = s[output].fuse(xo, yo) s[output].parallel(fused) nfactor = cfg["tile_y"].size[-1] kfactor = cfg["tile_k"].size[-1] if nfactor % 8 == 0: pc = _intrin_popcount(nfactor, kfactor, WB, DB, unipolar) s[output].tensorize(wb, pc) return s def traverse(op): """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(op.tag) or "elemwise" in op.tag: if op not in s.outputs: s[op].compute_inline() for tensor in op.input_tensors: if isinstance(tensor.op, tvm.te.ComputeOp): traverse(tensor.op) elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar": output = op.output(0) weight_vec = op.input_tensors[0] data_vec = op.input_tensors[1] data = data_vec.op.input_tensors[0] if "QuantizeInput" in data.op.name: data = data.op.input_tensors[0] unipolar = output.op.tag == "bitserial_dense_unipolar" _schedule(cfg, s, data_vec, weight_vec, output, unipolar) else: raise RuntimeError(f"Unsupported operator: {op.tag}") traverse(outs[0].op) return s
[ 507, 6464, 3829 ]
def METHOD_NAME( change_type_processors: list[ChangeTypeProcessor], bundle_changes: list[BundleFileChange], approver_resolver: ApproverResolver, ) -> list[tuple[BundleFileChange, ChangeTypeContext]]: change_type_contexts: list[tuple[BundleFileChange, ChangeTypeContext]] = [] processors_with_implicit_ownership = [ ctp for ctp in change_type_processors if ctp.implicit_ownership ] for ctp in processors_with_implicit_ownership: for bc in bundle_changes: for ownership in ctp.find_context_file_refs( change=FileChange( file_ref=bc.fileref, old=bc.old, new=bc.new, ), expansion_trail=set(), ): for io in ctp.implicit_ownership: if isinstance(io, ChangeTypeImplicitOwnershipJsonPathProviderV1): if ownership.context_file_ref != bc.fileref: logging.warning( f"{io.provider} provider based implicit ownership is not supported for ownership context files that are not the changed file." ) continue implicit_owner_refs = ( find_approvers_with_implicit_ownership_jsonpath_selector( bc=bc, implicit_ownership=io, ) ) else: raise NotImplementedError( f"unsupported implicit ownership provider: {io}" ) implicit_approvers = list( filter( None, [ approver_resolver.lookup_approver_by_path(owner_path) for owner_path in implicit_owner_refs ], ) ) if implicit_approvers: change_type_contexts.append( ( bc, ChangeTypeContext( change_type_processor=ctp, context=f"implicit ownership - (via {ownership.change_type.name})", origin=ownership.change_type.name, approvers=implicit_approvers, context_file=ownership.context_file_ref, ), ) ) return change_type_contexts
[ 194, 44, 6295, 43, 2273, 3982 ]
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.ResourceSku"]: """Lists all of the available skus of the Microsoft.AppPlatform provider. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ResourceSku or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_09_01_preview.models.ResourceSku] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-09-01-preview"] = kwargs.pop( "api_version", _params.pop("api-version", "2022-09-01-preview") ) cls: ClsType[_models.ResourceSkuCollection] = kwargs.pop("cls", None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ResourceSkuCollection", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
[ 245 ]
def METHOD_NAME(self): parameters = { **self.serialize_header_param( "Content-Type", "application/json", ), } return parameters
[ 572, 386 ]
def METHOD_NAME(self, rollback_registry): registry = rollback_registry query = registry.System.Cache.query("id", "method").limit(2) caches = query.all() def to_dict(cache): return { "id": cache.id, "method": cache.method, } dictall = query.dictall() for i in range(2): assert to_dict(caches[i]) in dictall
[ 9, -1, 69, 3368, 105 ]
def METHOD_NAME(self): expected_query = torch.tensor( [[[[-1.0235, 0.0409], [0.4008, 1.3077], [0.5396, 2.0698]]]] ) expected_key = torch.tensor( [[[[0.5053, -0.4965], [-0.3730, -0.9473], [-0.7019, -0.1935]]]] ) expected_val = torch.tensor( [[[[-0.9940, 0.5403], [0.5924, -0.7619], [0.7504, -1.0892]]]] ) sample_t = self.sample.transpose(0, 1) query, key, val = self.MHA.forward_qkv(sample_t, sample_t, sample_t) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), query.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_key.cpu().detach().numpy(), key.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_val.cpu().detach().numpy(), val.cpu().detach().numpy(), atol=1e-4, ) )
[ 9, 76, 7838 ]
def METHOD_NAME( self, request, # type: HttpRequest **kwargs: Any ) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client._send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs)
[ 353, 377 ]
def METHOD_NAME(self): parameters = { **self.serialize_header_param( "Accept", "application/json", ), } return parameters
[ 572, 386 ]
def METHOD_NAME(): assert uniprot_client.is_human('P00533')
[ 9, 137, 2047 ]
def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ 24, 3 ]
def METHOD_NAME(self, url: str, download_path: Path, role: str | None = None) -> Path: """Download a given URL, caching it. If it has already been downloaded, return the value that has been cached. This is a utility method used to obtain assets used by the installation process. The cached filename will be the filename portion of the URL, appended to the download path. :param url: The URL to download :param download_path: The path to the download cache folder. This path will be created if it doesn't exist. :param role: A string describing the role played by the file being downloaded; used to construct log and error messages. Should be able to fit into the sentence "Error downloading {role}". :returns: The filename of the downloaded (or cached) file. """ download_path.mkdir(parents=True, exist_ok=True) filename: Path = None try: response = self.tools.requests.get(url, stream=True) if response.status_code == 404: raise MissingNetworkResourceError(url=url) elif response.status_code != 200: raise BadNetworkResourceError(url=url, status_code=response.status_code) # The initial URL might (read: will) go through URL redirects, so # we need the *final* response. We look at either the `Content-Disposition` # header, or the final URL, to extract the cache filename. cache_full_name = urlparse(response.url).path header_value = response.headers.get("Content-Disposition") if header_value: # Neither requests nor httplib provides a way to parse RFC6266 headers. # The cgi module *did* have a way to parse these headers, but # it was deprecated as part of PEP594. PEP594 recommends # using the email.message module to parse these headers as they # are near identical format. # See also: # * https://tools.ietf.org/html/rfc6266 # * https://peps.python.org/pep-0594/#cgi msg = Message() msg["Content-Disposition"] = header_value filename = msg.get_filename() if filename: cache_full_name = filename cache_name = cache_full_name.split("/")[-1] filename = download_path / cache_name if filename.exists(): self.tools.logger.info(f"{cache_name} already downloaded") else: self.tools.logger.info(f"Downloading {cache_name}...") self._fetch_and_write_content(response, filename) except requests_exceptions.ConnectionError as e: if role: description = role else: description = filename.name if filename else url raise NetworkFailure(f"download {description}") from e return filename
[ 171 ]
def METHOD_NAME(): """ Test inequality constraints applied to input variables. The sum of the input variables must be less than or equal to -5.0, and the second output variable must have a value of at least 70.0 """ network = load_network() # Input inequality constraint inputVars = network.inputVars[0][0] weights = np.ones(inputVars.shape)/inputVars.size averageInputValue = -5.0 network.addInequality(inputVars, weights, averageInputValue) # Add lower bound on second output variable outputVars = network.outputVars[0].flatten() outputVar = outputVars[1] minOutputValue = 70.0 network.setLowerBound(outputVar, minOutputValue) # Call to Marabou solver exitCode, vals, _ = network.solve(options = OPT, verbose = False) assert np.dot([vals[inVar] for inVar in inputVars], weights) <= averageInputValue assert vals[outputVar] >= minOutputValue
[ 9, 6073, 362 ]
def METHOD_NAME(err: BeancountError) -> SerialisedError: """Get a serialisable error from a Beancount error.""" source = copy(err.source) if source is not None: source.pop("__tolerances__", None) return SerialisedError(err.__class__.__name__, source, err.message)
[ 280, -1, 168 ]
def METHOD_NAME(self): pyfunc = unpack_arbitrary with self.assertRaises(errors.TypingError) as raises: compile_isolated(pyfunc, (types.int32,), flags=no_pyobj_flags) self.assertIn("failed to unpack int32", str(raises.exception))
[ 9, 532, 789 ]
def METHOD_NAME(self, session_respond_500, app_spec, deployment): conf = Configuration(["--extension-hook-url", URL_PARAM]) extension_hook_caller = ExtensionHookCaller(conf, session_respond_500) obj = copy.deepcopy(deployment) with pytest.raises(HTTPError): extension_hook_caller.apply(obj, app_spec)
[ 9, 241, 442, 1646, 4616, 8140 ]
def METHOD_NAME(): return { "home": { "passwd": "password", "sudo": "password", "host": "12.34.56.78", "port": 23, "user": "gtmanfred", "minion_opts": {"http_port": 80}, }, "salt.gtmanfred.com": { "passwd": "password", "sudo": "password", "host": "127.0.0.1", "port": 22, "user": "gtmanfred", "minion_opts": {"http_port": 80}, }, }
[ 391, 672, 465, 1413 ]
def METHOD_NAME(self, name: str | None = None, **subsegment_kwargs) -> SubsegmentContextManager: ...
[ 623, 13090 ]
def METHOD_NAME(input): if java: beginMsg = re.compile("^<REQUEST domainType=\"MARKET_PRICE\" .*") endMsg = re.compile("^</REQUEST>$") updateMsg = re.compile("^<UPDATE domainType=\"MARKET_PRICE\" .*") else: beginMsg = re.compile("^<requestMsg domainType=\"RSSL_DMT_MARKET_PRICE\" .*") endMsg = re.compile("^</requestMsg>$") updateMsg = re.compile("^<updateMsg domainType=\"RSSL_DMT_MARKET_PRICE\" .*") eventMsg = re.compile("^event: .*") streamPos = input.tell() line = input.readline() while (line): if updateMsg.match(line): return None if eventMsg.match(line): input.seek(streamPos) return None if beginMsg.match(line): msg = line line = input.readline() while(line): msg += line if endMsg.match(line): return msg line = input.readline() streamPos = input.tell() line = input.readline() return None
[ 416, 243, 377 ]
def METHOD_NAME(self): self.arm_mock_dashboard_server('right')
[ 9, 3208, 248, 3029, 163, 2786 ]
def METHOD_NAME(self): r""" The hook lengths of the elements of the d-complete poset. See [KY2019]_ for the definition of hook lengths for d-complete posets. TESTS:: sage: from sage.combinat.posets.d_complete import DCompletePoset sage: P = DCompletePoset(DiGraph({0: [1, 2], 1: [3], 2: [3], 3: []})) sage: P._hooks {0: 1, 1: 2, 2: 2, 3: 3} sage: from sage.combinat.posets.poset_examples import Posets sage: P = DCompletePoset(Posets.YoungDiagramPoset(Partition([3,2,1]))._hasse_diagram.reverse()) # optional - sage.combinat sage: P._hooks # optional - sage.combinat {0: 5, 1: 3, 2: 1, 3: 3, 4: 1, 5: 1} """ hooks = {} min_diamond = {} # Maps max of double-tailed diamond to min of double-tailed diamond max_diamond = {} # Maps min of double-tailed diamond to max of double-tailed diamond H = self._hasse_diagram diamonds, _ = H.diamonds() # Tuples of four elements that are diamonds diamond_index = {} # Map max elmt of double tailed diamond to index of diamond # Find all the double-tailed diamonds and map the mins and maxes for index, d in enumerate(diamonds): min_diamond[d[3]] = d[0] max_diamond[d[0]] = d[3] diamond_index[d[3]] = index min_elmt = d[0] max_elmt = d[3] while True: potential_min = H.neighbors_in(min_elmt) potential_max = H.neighbors_out(max_elmt) # Check if any of these make a longer double tailed diamond found_diamond = False for (mn, mx) in [(i, j) for i in potential_min for j in potential_max]: if len(H.neighbors_in(mx)) != 1: continue if len(H.all_paths(mn, mx)) == 2: # Success min_elmt = mn max_elmt = mx min_diamond[mx] = mn max_diamond[mn] = mx diamond_index[mx] = index found_diamond = True break if not found_diamond: break # Compute the hooks queue = deque(H.sources()) enqueued = set() while queue: elmt = queue.popleft() if elmt not in diamond_index: hooks[elmt] = H.order_ideal_cardinality([elmt]) else: diamond = diamonds[diamond_index[elmt]] side1 = diamond[1] side2 = diamond[2] hooks[elmt] = hooks[side1] + hooks[side2] - hooks[min_diamond[elmt]] enqueued.add(elmt) for c in H.neighbors_out(elmt): if c not in enqueued: queue.append(c) enqueued.add(c) poset_hooks = {self._vertex_to_element(key): ZZ(value) for (key, value) in hooks.items()} return poset_hooks
[ 4602 ]
async def METHOD_NAME(request: web.Request): return [{"x": 3, "y": "3"}] * 3
[ 19, 245 ]
def METHOD_NAME(): c = rinterface.globalenv.find('c') d = dict([(u'哈哈', 1)]) res = c(**d) assert u'哈哈' == res.do_slot('names')[0]
[ 9, 3690, 1545, 156 ]
def METHOD_NAME(): vars_error = 'VERIFICATION FAILED: Vars field not found' fake_playbook = [{'name': "test playbook"}] with raises(PlaybookVerificationError) as error: verify(fake_playbook, skipVerify=False) assert vars_error in str(error.value)
[ 9, 1659, 130, 622, 168 ]
def METHOD_NAME(url, **kw): return request("PATCH", url, **kw)
[ 1575 ]
def METHOD_NAME(self) -> Response: """ Get a list with all of the tabels in MySQL """ query = f""" SELECT table_schema, table_name, table_type FROM {self.database}.INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE in ('BASE TABLE', 'VIEW'); """ result = self.native_query(query) return result
[ 19, 2253 ]
def METHOD_NAME( self, account_id: str, resource_ids: Optional[List[str]], resource_name: Optional[str], limit: int, next_token: Optional[str], backend_region: Optional[str] = None, resource_region: Optional[str] = None, aggregator: Any = None, ) -> Tuple[List[Dict[str, Any]], Optional[str]]: # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID # There is no resource name -- it should be a blank string "" if provided. # The resource name can only ever be None or an empty string: if resource_name is not None and resource_name != "": return [], None pab = None regions = [region for region in Session().get_available_regions("config")] # If a resource ID was passed in, then filter accordingly: if resource_ids: for resource_id in resource_ids: if account_id == resource_id: pab = self.backends[account_id]["global"].public_access_block break # Otherwise, just grab the one from the backend: if not resource_ids: pab = self.backends[account_id]["global"].public_access_block # If it's not present, then return nothing if not pab: return [], None # Filter on regions (and paginate on them as well): if backend_region: pab_list = [backend_region] elif resource_region: # Invalid region? if resource_region not in regions: return [], None pab_list = [resource_region] # Aggregated query where no regions were supplied so return them all: else: pab_list = regions # Pagination logic: sorted_regions = sorted(pab_list) new_token = None # Get the start: if not next_token: start = 0 else: # Tokens for this moto feature is just the region-name: # For OTHER non-global resource types, it's the region concatenated with the resource ID. if next_token not in sorted_regions: raise InvalidNextTokenException() start = sorted_regions.index(next_token) # Get the list of items to collect: pab_list = sorted_regions[start : (start + limit)] if len(sorted_regions) > (start + limit): new_token = sorted_regions[start + limit] return ( [ { "type": "AWS::S3::AccountPublicAccessBlock", "id": account_id, "region": region, } for region in pab_list ], new_token, )
[ 245, 200, 549, 1614 ]
def METHOD_NAME(self): return ["id", "summary", "needinfos", "last_comment"]
[ 1951 ]
def METHOD_NAME(self, items_count, new_identities, sh_db, connector_name): identities_count = len(new_identities) SortingHat.add_identities(sh_db, new_identities, connector_name) return identities_count
[ 557, 2278, 7949 ]
def METHOD_NAME() -> None: # make sure the running config is empty Config.running_config.data.clear() config = Config("dummy", "dummy") KubernetesCollectorPlugin.add_config(config) config.init_default_config() k8s: K8sConfig = config.k8s assert k8s.configs == [] assert k8s.config_files == [] assert k8s.collect == [] assert k8s.no_collect == [] assert k8s.pool_size == num_default_threads() assert k8s.fork_process is False
[ 9, 35, 200 ]
def METHOD_NAME(utterances, averaging_mode, weights): self_bleu = [] for i in range(len(utterances)): hypo = utterances[i] rest = utterances[:i] + utterances[i+1:] self_bleu.append(sentence_bleu(rest, hypo, weights, no_length_penalty=True, averaging_mode=averaging_mode)) return self_bleu
[ 19, 927, 8612 ]
async def METHOD_NAME(self, credential_definition_id) -> str: """Create a credential offer for the given credential definition id. Args: credential_definition_id: The credential definition to create an offer for Returns: The created credential offer """
[ 129, 2540, 6179 ]
def METHOD_NAME( monkeypatch, arch, does_dir_exist, expected_path, expected_result ): def _mock_is_dir(path): assert path == expected_path return does_dir_exist monkeypatch.setattr(deb.os.path, "isdir", _mock_is_dir) assert deb._is_chroot_available(arch) == expected_result
[ 9, 137, 11707, 1272 ]
def METHOD_NAME(): assert add(4) == 5 assert Add(4) == 5
[ 9, 559, 947, 735, 41, 235, 2001 ]
def METHOD_NAME(self, resource, limit): return resource, limit
[ 129 ]
def METHOD_NAME(): syntax_output = vimsupport.CaptureVimCommand( 'syntax list' ) return _KeywordsFromSyntaxListOutput( syntax_output )
[ 1207, 2537, 43, 1056, 2376 ]
def METHOD_NAME( cls, v: Optional[DatahubClientConfig], values: Dict[str, Any], **kwargs: Any ) -> Optional[DatahubClientConfig]: if v is None and "sink" in values and hasattr(values["sink"], "type"): sink_type = values["sink"].type if sink_type == "datahub-rest": sink_config = values["sink"].config v = DatahubClientConfig.parse_obj_allow_extras(sink_config) return v
[ 6100, 58, 427, 1080, 3183, 1079, 947 ]
def METHOD_NAME(session, module_org): """When existing architecture is deleted, corresponding audit entry appear in the application :id: 30f2dc85-f6be-410a-9ed5-b2ea00278f49 :expectedresults: Audit entry for deleted architecture contains valid data :CaseAutomation: Automated :CaseLevel: Component :CaseImportance: Medium """ architecture = entities.Architecture().create() architecture.delete() with session: values = session.audit.search('type=architecture and action=destroy') assert values['action_type'] == 'destroy' assert values['resource_type'] == 'ARCHITECTURE' assert values['resource_name'] == architecture.name assert len(values['action_summary']) == 1 assert values['action_summary'][0]['column0'] == 'Name' assert values['action_summary'][0]['column1'] == architecture.name
[ 9, 2302, 34, 417 ]
def METHOD_NAME(): cache = LRUCache(3) assert not cache cache["foo"] = "bar" assert cache
[ 9, 174, 596, 863 ]
def METHOD_NAME(self): return self._params["event_col"]
[ 417, 105 ]
def METHOD_NAME(self): actual_signature = PaymentMethod.update_signature() expected_signature = [ "billing_address_id", "cardholder_name", "cvv", "device_data", "expiration_date", "expiration_month", "expiration_year", "number", "payment_method_nonce", "token", "venmo_sdk_payment_method_code", "device_session_id", "fraud_merchant_id", { "options": [ "make_default", "skip_advanced_fraud_checking", "us_bank_account_verification_method", "venmo_sdk_session", "verification_account_type", "verification_amount", "verification_merchant_account_id", "verify_card", { "adyen":[ "overwrite_brand", "selected_brand" ] } ] }, { "billing_address" : Address.update_signature() + [{"options": ["update_existing"]}] }, { "three_d_secure_pass_thru": [ "cavv", "ds_transaction_id", "eci_flag", "three_d_secure_version", "xid" ] }, ] self.assertEqual(expected_signature, actual_signature)
[ 9, 86, 1334 ]
def METHOD_NAME(self): self.fh = StringIO() self.workbook = Workbook() self.workbook._set_filehandle(self.fh)
[ 0, 1 ]
def METHOD_NAME(self, signing_key_path): return { "api_url": "http://account.example.com/api", "redirect_url": "http://account.example.com/redirect", "sign_key": signing_key_path, }
[ 598, 4559, 200 ]
def METHOD_NAME(self): n_gen_texture.n_create_blocks(self.n_data) n_nitrishape = self.n_data.roots[0].children[0] n_gen_material.n_attach_material_prop(n_nitrishape) # add nimaterialprop n_gen_texture.n_create_store_normal_data(n_nitrishape) # store normal data as NiBinaryExtraData n_gen_texture.n_create_texture_property(n_nitrishape) # add nitexturingprop n_textureprop = n_nitrishape.properties[0] n_gen_diffusemap.n_create_diffuse_map(n_textureprop, self.diffuse_texture_path) # add nitexturesource diffuse return self.n_data
[ 293, 129, 365 ]
def METHOD_NAME(self): pass
[ 1798, 157 ]
def METHOD_NAME(self): super().METHOD_NAME() self.user = UserFactory(password='test')
[ 0, 1 ]
def METHOD_NAME(request): # ballotItemOptionsRetrieve google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0)) search_string = request.GET.get('search_string', '') state_code = request.GET.get('state_code', '') results = ballot_item_options_retrieve_for_api(google_civic_election_id, search_string, state_code) response = HttpResponse(json.dumps(results['json_data']), content_type='application/json') return response
[ 2155, 1024, 1881, 404, 1179 ]
def METHOD_NAME(data, selection): """ Use either Orange's create_annotated_table (for at most 1 selected class or create_groups_table (for more selected classes) :param data: Orange data table :param selection: classes for selected indices (0 for unselected) :return: Orange data table with an added column """ if len(selection) and np.max(selection) > 1: return create_groups_table(data, selection) else: return create_annotated_table(data, np.flatnonzero(selection))
[ 861, 894, 9109, 410 ]
def METHOD_NAME(target, source, env): assert len(source) == 1 filepath = source[0].srcnode().abspath slicc = SLICC(filepath, protocol_base.abspath, verbose=True) slicc.process() slicc.writeCodeFiles(output_dir.abspath, slicc_includes) if env['CONF']['SLICC_HTML']: slicc.writeHTMLFiles(html_dir.abspath)
[ -1, 1006 ]
def METHOD_NAME(): imp = Import(ImportSplit(".foo", "bar", None)) assert imp.fullname == ".foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit(".foo", "bar", None) assert str(imp) == "from .foo import bar"
[ 9, 512, 280, 265, 1170 ]
def METHOD_NAME(self, mock_historical): """Test nothing returned for any sensor.""" mock_historical.return_value = (LocationSeries(), []) test_sensors = [SensorConfig("i", "j", "k", 3), SensorConfig("a", "b", "c", 1), SensorConfig("x", "y", "z", 2)] test_ground_truth = [LocationSeries("ca", "state")] assert historical_sensors( None, None, test_sensors, test_ground_truth) == {}
[ 9, 6841, 6977, 654, 365 ]
def METHOD_NAME(parent, info, ffi, ffic, Event, Param): # log.debug("Plugin.ProcessViewerEvent({0}, {1})".format(Event, Param)) return 0
[ 356, 1574, 417 ]
def METHOD_NAME(lineno, comment): """Return the multiline comment at lineno split into a list of comment line numbers and the accompanying comment line""" return [ (lineno + index, line) for index, line in enumerate(comment.splitlines()) ]
[ 265, 1591 ]
def METHOD_NAME(code, path_target=None, sdk="macosx", min_os_version=None): """Compile metal with CLI tool from env. Parameters ---------- code : str The cuda code. path_target : str, optional Output file. sdk : str, optional The target platform SDK. Return ------ metallib : bytearray The bytearray of the metallib """ temp = utils.tempdir() temp_code = temp.relpath("my_lib.metal") temp_ir = temp.relpath("my_lib.air") temp_target = temp.relpath("my_lib.metallib") with open(temp_code, "w") as out_file: out_file.write(code) file_target = path_target if path_target else temp_target # See: # - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long # # xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air # xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib min_target = __get_min_os_version_cmd(sdk, min_os_version) if sdk == "macosx": language_version = "-std=macos-metal2.3" elif sdk in ("iphoneos", "iphonesimulator"): language_version = "-std=ios-metal2.3" else: raise RuntimeError(f"Unsupported sdk: {sdk}") cmd1 = ["xcrun", "-sdk", sdk, "metal", language_version, min_target, "-O3"] cmd1 += ["-c", temp_code, "-o", temp_ir] cmd2 = ["xcrun", "-sdk", sdk, "metallib"] cmd2 += [temp_ir, "-o", file_target] proc = subprocess.Popen( " ".join(cmd1) + ";" + " ".join(cmd2), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) (out, _) = proc.communicate() if proc.returncode != 0: sys.stderr.write("Compilation error:\n") sys.stderr.write(py_str(out)) sys.stderr.flush() libbin = None else: libbin = bytearray(open(file_target, "rb").read()) return libbin
[ 296, 9079 ]
def METHOD_NAME( table: IndexerTable, seen_ints: Set[int], new_last_seen_time: Optional[datetime.datetime] = None ) -> int: if new_last_seen_time is None: new_last_seen_time = timezone.now() # TODO: filter out ints that we've handled recently in memcache to reduce DB load # we may not need a cache, we should see as we dial up the accept rate return int( table.objects.filter( id__in=seen_ints, last_seen__lt=(timezone.now() - timedelta(hours=12)) ).update(last_seen=new_last_seen_time) )
[ 86, 5754, 679, 3959 ]
def METHOD_NAME(cls): super().METHOD_NAME() cls.press = helpers.create_press() cls.journal_one, cls.journal_two = helpers.create_journals() install.update_settings(management_command=False)
[ 0, 1, 2 ]
def METHOD_NAME(shape, bits, out_dtype): min_val = 0 max_val = 1 << bits return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
[ 567, 833, 2212 ]
def METHOD_NAME(self, session): data = self.deserialize_http_content(session) self.ctx.set_var( "instance", data, schema_builder=self._build_schema_on_200 )
[ 69, 1072 ]
def METHOD_NAME(self, msg, vtimeout): boto.log.info('Task[%s] - running:%s' % (self.name, self.command)) log_fp = StringIO() process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) nsecs = 5 current_timeout = vtimeout while process.poll() is None: boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) if nsecs >= current_timeout: current_timeout += vtimeout boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout)) if msg: msg.change_visibility(current_timeout) time.sleep(5) nsecs += 5 t = process.communicate() log_fp.write(t[0]) log_fp.write(t[1]) boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue())) self.last_executed = self.now self.last_status = process.returncode self.last_output = log_fp.getvalue()[0:1023]
[ 22 ]
def METHOD_NAME(self, path): self._metadata = {} with open(path, 'rb') as f: doc = etree.parse(f) srch = doc.find('/corplist') if srch is None: raise CorparchError(f'Failed to process {path} - /corplist element not found') return self.parse_node(srch, None), self._metadata
[ 214, 399, 151 ]
def METHOD_NAME(m): # 'm' if the name of the method to override def method(self, already_in_loop = False): # WARNING: recursive function, so 'already_in_loop' necessary for # us to know if we are recursing (True) or not (False) NB_DB_CONN_TRY = 2 # FIXME: NOT YET IMPLEMENTED, but do not implement this using a # simple time.sleep(), it would lock the main loop !!! #TIME_BTW_CONN_TRY = 0.1 # in seconds, floats are authorized, # pyflakes.ignore try: # do a libmysql client call : attempt to perform the old call (_old_<method>) old_m = getattr(self, '_old_%s' % m) ret = old_m() # success, will send the result back return ret # send the result back except DBAPIError, e: # failure, catch libmysql client error if already_in_loop : # just raise the exception, they will take care of it raise e # see http://dev.mysql.com/doc/refman/5.1/en/error-messages-client.html # we try to handle only situation where a reconnection worth a try if e.orig.args[0] == 2013: # Lost connection to MySQL server during query error, but we do not raise the exception (will attempt again) logging.getLogger().warn("Lost connection to MySQL server during query") elif e.orig.args[0] == 2006: # MySQL server has gone away, but we do not raise the exception (will attempt again) logging.getLogger().warn("MySQL server connection has gone away") elif e.orig.args[0] == 2002: # Can't contact SQL server, give up logging.getLogger().error("MySQL server is unreachable by socket while doing query") raise e elif e.orig.args[0] == 2003: # Can't contact SQL server, give up logging.getLogger().error("MySQL server is unreachable by network while doing query") raise e else: # Other SQL error, give-up logging.getLogger().error("Unknown MySQL error while doing query") raise e # handle cases where reco can be attempted again # this is where things became tricky: # we call ourself (new_m) with already_in_loop = True # we also silently drop the potentially raised exception for i in range(0, NB_DB_CONN_TRY): try: new_m = getattr(self, m) ret = new_m(True) return ret except Exception, e: pass # the loop was unsuccessful, finally raise the original exception raise e return method
[ 129, 103 ]
def METHOD_NAME(year: int, export_flattened_df_filepath: str = None) -> None: """ Ici on va nettoyer et formatter les donnés ERFS-FPR, pour les rendre OpenFisca-like """ # Step 01 : le formattage OpenFisca # # - Formattage des différentes variables # - On merge les tables individus / menages # # Note : c'est ici où on objectivise les hypothèses, step 1 log.info('\n [[[ Year {} - Step 1 / 5 ]]] \n'.format(year)) preprocessing.build_merged_dataframes(year = year) # Step 02 : Si on veut calculer les allocations logement, il faut faire le matching avec une autre enquête (ENL) # # openfisca_survey_collection = SurveyCollection(name = 'openfisca') # stata_directory = openfisca_survey_collection.config.get('data', 'stata_directory') # stata_file = os.path.join(stata_directory, 'log_men_ERFS.dta') # imputation_loyer.merge_imputation_loyer(stata_file = stata_file, year = year) log.info('\n [[[ Year {} - Step 2 / 5 SKIPPED ]]] \n'.format(year)) menage.build_variables_menage(year = year) # Step 03 : on commence par les variables indivuelles log.info('\n [[[ Year {} - Step 3 / 5 ]]] \n'.format(year)) variables_individuelles.build_variables_individuelles(year = year) # Step 04 : ici on va constituer foyer et famille à partir d'invididu et ménage # # - On fait individu/ménage pour pouvoir faire des familles (foyers sociaux) # - On va faire des suppositions pour faire les familles # - On va faire les foyers fiscaux à partir des familles # - On va faire de suppositions pour faire les foyers fiscaux log.info('\n [[[ Year {} - Step 4 / 5 ]]] \n'.format(year)) famille.build_famille(year = year) # Affreux ! On injectait tout dans un même DataFrame !!! # C'est très moche ! # # On crée une df par entité par période. # Elles sont stockées dans un fichier h5 log.info('\n [[[ Year {} - Step 5 / 5 ]]] \n'.format(year)) final.create_input_data_frame(year = year, export_flattened_df_filepath = export_flattened_df_filepath)
[ 56 ]
def METHOD_NAME(self): return mavutil.mavlink.MAV_MISSION_TYPE_RALLY
[ 10598, 11600, 44 ]
def METHOD_NAME(self): """ The set_language view can be used to change the session language. The user is redirected to user redirect if the "next" argument is invalid. """ lang_code = self._get_inactive_language_code() response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), translate_url(reverse("base"), lang_code), ) self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) next_url = "/not/a/real/url" lang_code = None response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), translate_url(reverse("base"), "en"), ) self.assertFalse(LANGUAGE_SESSION_KEY in self.client.session)
[ 9, 13763, 1051, 243, 532 ]
def METHOD_NAME(self, data): m = re.match("(.*)\t(.*)\n", to_unicode(data)) if m is None: raise Exception("did not match") status, message = m.groups() if status == "ok": return message else: raise CapError(message)
[ 356, 17 ]
def METHOD_NAME(event): del event["ingestionTime"] event["timestamp"] = to_iso_timestr(to_utc_datetime(event["timestamp"])) return LogEvent.from_dict(event)
[ 197, 390, 417 ]
def METHOD_NAME(self, preds, target, ref_metric, fs, extended): """Test dtype support of the metric on GPU.""" self.run_precision_test_gpu( preds=preds, target=target, metric_module=ShortTimeObjectiveIntelligibility, metric_functional=partial(short_time_objective_intelligibility, fs=fs, extended=extended), metric_args={"fs": fs, "extended": extended}, )
[ 9, 13095, 627, 1667 ]
def METHOD_NAME(self): return "GET"
[ 103 ]
def METHOD_NAME(edges_kev): """Calculate bin centers from bin edges. Args: edges_kev: an iterable representing bin edge values Returns: np.array of length (len(edges_kev) - 1), representing bin center values with the same units as the input """ edges_kev = np.array(edges_kev) centers_kev = (edges_kev[:-1] + edges_kev[1:]) / 2 return centers_kev
[ 762, 2683, 280, 491 ]
def METHOD_NAME() -> None: do_test('{3}{W/P}{W/P}', ['3', 'W/P', 'W/P'])
[ 9, -1, 13343 ]
async def METHOD_NAME( _: _DownloadPythonBuildStandaloneBinaryRequest, platform: Platform, tar_binary: TarBinary, python_bootstrap: PythonBootstrapSubsystem, bash: BashBinary, ) -> _PythonBuildStandaloneBinary: url, fingerprint, bytelen = python_bootstrap.internal_python_build_standalone_info[ platform.value ] filename = url.rsplit("/", 1)[-1] python_archive = await Get( Digest, DownloadFile( url, FileDigest( fingerprint=fingerprint, serialized_bytes_length=bytelen, ), ), ) download_result = await Get( ProcessResult, Process( argv=[tar_binary.path, "-xvf", filename], input_digest=python_archive, env={"PATH": os.pathsep.join(SEARCH_PATHS)}, description="Extract Pants' execution Python", level=LogLevel.DEBUG, output_directories=("python",), ), ) installation_root = f"{PythonBuildStandaloneBinary._SYMLINK_DIRNAME}/{download_result.output_digest.fingerprint}" # NB: This is similar to what we do for every Python provider. We should refactor these into # some shared code to centralize the behavior. installation_script = dedent( f"""\ if [ ! -f "{installation_root}/DONE" ]; then cp -r python "{installation_root}" touch "{installation_root}/DONE" fi """ ) env_vars = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"])) await Get( ProcessResult, Process( [bash.path, "-c", installation_script], level=LogLevel.DEBUG, input_digest=download_result.output_digest, description="Install Python for Pants usage", env={"PATH": env_vars.get("PATH", "")}, append_only_caches=PythonBuildStandaloneBinary.APPEND_ONLY_CACHES, # Don't cache, we want this to always be run so that we can assume for the rest of the # session the named_cache destination for this Python is valid, as the Python ecosystem # mainly assumes absolute paths for Python interpreters. cache_scope=ProcessCacheScope.PER_SESSION, ), ) return _PythonBuildStandaloneBinary(f"{installation_root}/bin/python3")
[ 136, 440, 808 ]
def METHOD_NAME(notifications: int, machines:List[str]) -> None: _log.debug('updateNotifications(%s,%s)',notifications,machines) try: if config.app_window: aw = config.app_window # we fetch notifications if notifications are enabled within the Artisan settings, there are some unqualified notifications, or # our machine name is in the list of machines indicating that there is a qualified notification for us if aw.notificationsflag and (notifications>0 or aw.qmc.roastertype_setup in machines): # should happen with less delay (0.7s) then the stock.update() (2.5s) triggered controller.connect() to avoid duplicate fetching on startup QTimer.singleShot(700, retrieveNotifications) except Exception as e: # pylint: disable=broad-except _log.exception(e)
[ 86, 609 ]
def METHOD_NAME(self): with open(self.data_json, 'rb') as f: dataset_json = json.load(f) modalities = dataset_json["modality"] modalities = {int(k): modalities[k] for k in modalities.keys()} return modalities
[ 19, -1 ]
def METHOD_NAME(distributed_context_multi_node_nccl): device = idist.device() _test_distrib_integration(device) _test_distrib_accumulator_device(device)
[ 9, 4340, 5745, 12511, 1667 ]
def METHOD_NAME(): filename = "tests/xdsl_opt/empty_program.mlir" opt = xDSLOptMain(args=[filename]) f = StringIO("") with redirect_stdout(f): opt.run() with open(filename) as file: expected = file.read() assert f.getvalue().strip() == expected.strip()
[ 9, 35, 735 ]