text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, text, classes, **attrs) -> str: if classes: attrs["class"] = classes return self._make_tag("span", attrs, text)
[ 1244 ]
def METHOD_NAME(self): raises = self.assertRaises db = DB(self._storage) conn = db.open() root = conn.root() root.obj = obj1 = MinPO(1) txn = transaction.get() txn.note('root -> obj') txn.commit() root.obj.obj = obj2 = MinPO(2) txn = transaction.get() txn.note('root -> obj -> obj') txn.commit() del root.obj txn = transaction.get() txn.note('root -X->') txn.commit() # Now copy the transactions to the destination self._dst.copyTransactionsFrom(self._storage) # Now pack the destination. snooze() self._dst.pack(time.time(), referencesf) # And check to see that the root object exists, but not the other # objects. data, serial = load_current(self._dst, root._p_oid) raises(KeyError, load_current, self._dst, obj1._p_oid) raises(KeyError, load_current, self._dst, obj2._p_oid)
[ 9, 1699, 41, 6752, 69, 3836, 1887 ]
def METHOD_NAME(self): outlier = np.ones(np.shape(self.x_test_iris[:3])) * (np.max(self.x_test_iris.flatten()) * 10.0) # uncertainty should increase as we go deeper into data self.assertTrue( np.mean( self.classifier.predict_uncertainty(outlier) > self.classifier.predict_uncertainty(self.x_test_iris[:3]) ) == 1.0 )
[ 9, 2103, 8538 ]
def METHOD_NAME(self, node: workermanage.WorkerController) -> None: """Assign a work unit to a node.""" assert self.workqueue assigned_to_node = self.assigned_work.setdefault(node, default=collections.OrderedDict()) scope, work_unit = None, None # check if there are any long-running tests already pending long_pending = self._is_long_pending(assigned_to_node) if long_pending: # try to find a work unit with no long-running test if there is already a long-running # test pending scope = self._get_short_scope() if scope: work_unit = self.workqueue.pop(scope) else: # Try to find a work unit with long-running test if there is no long-running test # pending. We want to schedule long-running tests as early as possible scope = self._get_long_scope() if scope: work_unit = self.workqueue.pop(scope) # grab the first unit of work if none was grabbed above if work_unit is None: scope, work_unit = self.workqueue.popitem(last=False) # keep track of the assigned work assigned_to_node[scope] = work_unit # ask the node to execute the workload worker_collection = self.registered_collections[node] nodeids_indexes = [ worker_collection.index(nodeid) for nodeid, completed in work_unit.items() if not completed ] node.send_runtest_some(nodeids_indexes)
[ 1283, 3160, 805 ]
def METHOD_NAME(line, line_num): ''' parses lines of the form "# <title>", "## <sub title>", etc. if line is not a header, a regular string is returned. headers must contain exactly one space between the '#' and title, and may not contain trailing spaces. tabs are not friends. ''' num_headers = 0 for c in line: if c == '#': num_headers += 1 else: break if num_headers == 0: return 0, line line = line[num_headers:] if line == "": raise MalformedHeaderError(line, line_num) if line[0] != " ": raise MalformedHeaderError(line, line_num) line = line[1:] if line.startswith(" ") or line.endswith(" "): raise MalformedHeaderError(line, line_num) return num_headers, line
[ 214, 534 ]
def METHOD_NAME(builder): """This method is deprecated. Please switch to Start.""" return Start(builder)
[ 6582, 386, 447 ]
METHOD_NAME(self):
[ 22 ]
f METHOD_NAME(self):
[ 243 ]
def METHOD_NAME(self, scanner_id: int) -> None: ''' Delete and unlink the scanner. Args: scanner_id (int): Id of the scanner to delete Example: >>> nessus.scanners.delete(1) ''' self._delete(f'{scanner_id}')
[ 34 ]
def METHOD_NAME(tmp_path, grid, data): filepath = tmp_path / "gridprop.grdecl" prop = data.draw(st.sampled_from(grid.get_xyz_corners())) prop.to_file(filepath, fformat="grdecl") prop_from_file = xtgeo.gridproperty_from_file( filepath, name=prop.name, fformat="grdecl", grid=grid ) assert_allclose(prop.get_npvalues1d(), prop_from_file.get_npvalues1d(), atol=1e-3)
[ 9, 4302, 24, 280, 171, 137, 2989 ]
def METHOD_NAME(self): """Test the standard phase estimation circuit.""" with self.subTest("U=S, psi=|1>"): unitary = QuantumCircuit(1) unitary.s(0) eigenstate = QuantumCircuit(1) eigenstate.x(0) # eigenvalue is 1j = exp(2j pi 0.25) thus phi = 0.25 = 0.010 = '010' # using three digits as 3 evaluation qubits are used phase_as_binary = "0100" pec = PhaseEstimation(4, unitary) self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary) with self.subTest("U=SZ, psi=|11>"): unitary = QuantumCircuit(2) unitary.z(0) unitary.s(1) eigenstate = QuantumCircuit(2) eigenstate.x([0, 1]) # eigenvalue is -1j = exp(2j pi 0.75) thus phi = 0.75 = 0.110 = '110' # using three digits as 3 evaluation qubits are used phase_as_binary = "110" pec = PhaseEstimation(3, unitary) self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary) with self.subTest("a 3-q unitary"): # β”Œβ”€β”€β”€β” # q_0: ─ X β”œβ”€β”€β– β”€β”€β”€β”€β– β”€β”€β”€β”€β”€β”€β”€ # β”œβ”€β”€β”€β”€ β”‚ β”‚ # q_1: ─ X β”œβ”€β”€β– β”€β”€β”€β”€β– β”€β”€β”€β”€β”€β”€β”€ # β”œβ”€β”€β”€β”€β”Œβ”€β”€β”€β”β”Œβ”€β”΄β”€β”β”Œβ”€β”€β”€β” # q_2: ─ X β”œβ”€ H β”œβ”€ X β”œβ”€ H β”œ # β””β”€β”€β”€β”˜β””β”€β”€β”€β”˜β””β”€β”€β”€β”˜β””β”€β”€β”€β”˜ unitary = QuantumCircuit(3) unitary.x([0, 1, 2]) unitary.cz(0, 1) unitary.h(2) unitary.ccx(0, 1, 2) unitary.h(2) # β”Œβ”€β”€β”€β” # q_0: ─ H β”œβ”€β”€β– β”€β”€β”€β”€β– β”€β”€ # β””β”€β”€β”€β”˜β”Œβ”€β”΄β”€β” β”‚ # q_1: ────── X β”œβ”€β”€β”Όβ”€β”€ # β””β”€β”€β”€β”˜β”Œβ”€β”΄β”€β” # q_2: ─────────── X β”œ # β””β”€β”€β”€β”˜ eigenstate = QuantumCircuit(3) eigenstate.h(0) eigenstate.cx(0, 1) eigenstate.cx(0, 2) # the unitary acts as identity on the eigenstate, thus the phase is 0 phase_as_binary = "00" pec = PhaseEstimation(2, unitary) self.assertPhaseEstimationIsCorrect(pec, eigenstate, phase_as_binary)
[ 9, 3200, 9846 ]
def METHOD_NAME(self): self.custom_field_str = CustomField.objects.create( name='test str', type=CustomFieldTypes.STRING, default_value='xyz' )
[ 0, 1 ]
def METHOD_NAME( subject_pex, # type: str subcommand, # type: str *args # type: str ): # type: (...) -> Text return subprocess.check_output( args=[sys.executable, "-m", "pex.tools", subject_pex, subcommand] + list(args) ).decode("utf-8")
[ 22, 1056, 12165, 3081 ]
def METHOD_NAME(self, value): try: return function(self, value) except struct.error as e: raise ConversionError(e.args[0])
[ 1571 ]
def METHOD_NAME(self): # have to finalize 'plat_name' before 'bdist_base' if self.plat_name is None: if self.skip_build: self.plat_name = get_platform() else: self.plat_name = self.get_finalized_command('build').plat_name # 'bdist_base' -- parent of per-built-distribution-format # temporary directories (eg. we'll probably have # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.) if self.bdist_base is None: build_base = self.get_finalized_command('build').build_base self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name) self.ensure_string_list('formats') if self.formats is None: try: self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError( "don't know how to create built distributions " "on platform %s" % os.name ) if self.dist_dir is None: self.dist_dir = "dist"
[ 977, 1881 ]
def METHOD_NAME() -> list[Any]: # Create varying inner models with different sizes and fields (not actually realistic) models = [] for i in range(INNER_DATA_MODEL_COUNT): fields = {} for j in range(i): type_ = TYPES[j % len(TYPES)] type_default = TYPES_DEFAULTS[type_] if j % 4 == 0: type_ = List[type_] type_default = [] default = ... if j % 2 == 0 else type_default fields[f'f{j}'] = (type_, default) models.append(create_model(f'M1{i}', **fields)) # Crate varying outer models where some fields use the inner models (not really realistic) models_with_nested = [] for i in range(OUTER_DATA_MODEL_COUNT): fields = {} for j in range(i): type_ = models[j % len(models)] if j % 2 == 0 else TYPES[j % len(TYPES)] if j % 4 == 0: type_ = List[type_] fields[f'f{j}'] = (type_, ...) models_with_nested.append(create_model(f'M2{i}', **fields)) return [*models, *models_with_nested]
[ 129, 365, 379 ]
def METHOD_NAME(self): # TODO: deprecated option, to remove in few months del self.info.options.ssl
[ 360, 147 ]
def METHOD_NAME(cls, part, source_params, creator, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, rootfs_dir, native_sysroot): """ Called after the partition is created. It is useful to add post operations e.g. security signing the partition. """ logger.debug("SourcePlugin: do_post_partition: part: %s", part)
[ 74, 72, 2312 ]
def METHOD_NAME(cls): """Return a dict representation containing all required information for templates.""" ret = {x.name: x.value for x in cls.values()} ret['list'] = cls.list() return ret
[ 671, 198 ]
def METHOD_NAME(x, y, fatal=True): passed = (x == y) if not passed: print(str(x), ' != ', str(y)) assertCommon(passed, fatal)
[ 638, 926 ]
def METHOD_NAME(): return 0 return rng.randint(0, 100)
[ 8621 ]
def METHOD_NAME(self): return """\ b Sets the padding form the bottom (in px). l Sets the padding form the left (in px). r Sets the padding form the right (in px). t Sets the padding form the top (in px). """
[ 1302, 1303 ]
def METHOD_NAME(self) -> bool: ...
[ 31 ]
def METHOD_NAME( self, cmd: str = "copy running-configuration startup-configuration", confirm: bool = False, confirm_response: str = "", ) -> str: """Saves Config""" return super().METHOD_NAME( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
[ 73, 200 ]
def METHOD_NAME(self): self.song_loaded = False with self._progress_value_lock: self._progress_value_count = 0
[ 656 ]
def METHOD_NAME(self): self.api.authorize_account('production', self.application_key_id, self.master_key)
[ 4797, 598 ]
def METHOD_NAME(self) -> Optional[ScriptRequest]: """Called by the ScriptRunner when it's at a yield point. If we have no request, return None. If we have a RERUN request, return the request and set our internal state to CONTINUE. If we have a STOP request, return the request and remain stopped. """ if self._state == ScriptRequestType.CONTINUE: # We avoid taking a lock in the common case. If a STOP or RERUN # request is received between the `if` and `return`, it will be # handled at the next `on_scriptrunner_yield`, or when # `on_scriptrunner_ready` is called. return None with self._lock: if self._state == ScriptRequestType.RERUN: self._state = ScriptRequestType.CONTINUE return ScriptRequest(ScriptRequestType.RERUN, self._rerun_data) assert self._state == ScriptRequestType.STOP return ScriptRequest(ScriptRequestType.STOP)
[ 69, -1, 764 ]
def METHOD_NAME(self): cpu_sample = {'user': 1.0, 'sys': 2.0} first_sample = fake.HostSample(1.0, {0: cpu_sample, 1: cpu_sample}) # CPU one suddenly came online and the second sample is still missing last_sample = fake.HostSample(2.0, {0: cpu_sample}) with MonkeyPatchScope([(numa, 'topology', self._fakeNumaTopology)]): result = hoststats._get_cpu_core_stats(first_sample, last_sample) self.assertEqual(len(result), 1) self.assertEqual(result['0'], self._core_zero_stats)
[ 9, 2423, 577, 69, 1038, 679, 734 ]
def METHOD_NAME(self, is_cae: bool = False) -> Optional[TokenCache]: # If no cache options were provided, the default cache will be used. This credential accepts the # user's default cache regardless of whether it's encrypted. It doesn't create a new cache. If the # default cache exists, the user must have created it earlier. If it's unencrypted, the user must # have allowed that. cache_options = self._cache_persistence_options or TokenCachePersistenceOptions(allow_unencrypted_storage=True) if platform.system() not in {"Darwin", "Linux", "Windows"}: raise CredentialUnavailableError(message="Shared token cache is not supported on this platform.") if not self._cache and not is_cae: try: self._cache = _load_persistent_cache(cache_options, is_cae) except Exception: # pylint:disable=broad-except return None if not self._cae_cache and is_cae: try: self._cae_cache = _load_persistent_cache(cache_options, is_cae) except Exception: # pylint:disable=broad-except return None return self._cae_cache if is_cae else self._cache
[ 15, 596 ]
def METHOD_NAME(operations, threshold): total_ms = 0 for op in operations: assert len(op.qubits) <= 2 if len(op.qubits) == 2: assert isinstance(op, cirq.GateOperation) assert isinstance(op.gate, cirq.XXPowGate) total_ms += abs(op.gate.exponent) assert total_ms <= threshold
[ 638, 3665, 3144, 2234 ]
def METHOD_NAME(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
[ 123, 377 ]
METHOD_NAME(self):
[ 187 ]
def METHOD_NAME(self, data): return strip_empty_values(data)
[ 1360, 35 ]
def METHOD_NAME(x): check_type((int, str), x, "x")
[ 44, 7598 ]
def METHOD_NAME( self, config_commands: Union[str, Sequence[str], Iterator[str], TextIO, None] = None, exit_config_mode: bool = False, **kwargs: Any, ) -> str: """Remain in configuration mode.""" return super().METHOD_NAME( config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs )
[ 353, 200, 0 ]
def METHOD_NAME(self, name): """Gets processor from sequencer # Arguments name: String indicating the process name """ for processor in self.processors: if processor.name == name: return processor
[ 19, 2422 ]
def METHOD_NAME(self): headers = { "X-GoOG-CHANNEL-ID": "myid", "X-Goog-MESSAGE-NUMBER": "1", "X-Goog-rESOURCE-STATE": "sync", "X-Goog-reSOURCE-URI": "http://example.com/", "X-Goog-resOURCE-ID": "http://example.com/resource_1", } ch = channel.Channel( "web_hook", "myid", "mytoken", "http://example.org/callback", expiration=0, params={"extra": "info"}, resource_id="the_resource_id", resource_uri="http://example.com/resource_1", ) # Good test case. n = channel.notification_from_headers(ch, headers) self.assertEqual("http://example.com/resource_1", n.resource_id) self.assertEqual("http://example.com/", n.resource_uri) self.assertEqual("sync", n.state) self.assertEqual(1, n.message_number) # Detect id mismatch. ch.id = "different_id" try: n = channel.notification_from_headers(ch, headers) self.fail("Should have raised exception") except errors.InvalidNotificationError: pass # Set the id back to a correct value. ch.id = "myid"
[ 9, 857, 280, 2131 ]
def METHOD_NAME(self, node, /): return expr.Binary(node.op, node.left.accept(self), node.right.accept(self), node.type)
[ 716, 808 ]
def METHOD_NAME(params, x): return params['p0'] * jnp.log(x)
[ -1 ]
def METHOD_NAME(integration_runtime_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationRuntimeConnectionInfoResult: """ Get connection info for an integration runtime Azure REST API version: 2021-06-01. :param str integration_runtime_name: Integration runtime name :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['integrationRuntimeName'] = integration_runtime_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:synapse:getIntegrationRuntimeConnectionInfo', __args__, opts=opts, typ=GetIntegrationRuntimeConnectionInfoResult).value return AwaitableGetIntegrationRuntimeConnectionInfoResult( host_service_uri=pulumi.get(__ret__, 'host_service_uri'), identity_cert_thumbprint=pulumi.get(__ret__, 'identity_cert_thumbprint'), is_identity_cert_exprired=pulumi.get(__ret__, 'is_identity_cert_exprired'), public_key=pulumi.get(__ret__, 'public_key'), service_token=pulumi.get(__ret__, 'service_token'), version=pulumi.get(__ret__, 'version'))
[ 19, 1911, 1888, 550, 100 ]
def METHOD_NAME(self, view, prefix, locations): settings = sublime.load_settings('CSS.sublime-settings') if settings.get('disable_default_completions'): return None selector = settings.get('default_completions_selector', '') if isinstance(selector, list): selector = ''.join(selector) pt = locations[0] if not match_selector(view, pt, selector): return None if match_selector(view, pt, "meta.property-value.css meta.function-call.arguments"): items = self.complete_function_argument(view, prefix, pt) elif view.match_selector(pt - 1, "meta.property-value.css, punctuation.separator.key-value"): items = self.complete_property_value(view, prefix, pt) elif view.match_selector(pt - 1, "meta.property-name.css, meta.property-list.css - meta.selector"): items = self.complete_property_name(view, prefix, pt) else: # TODO: provide selectors, at-rules items = None if items: return sublime.CompletionList(items, sublime.INHIBIT_WORD_COMPLETIONS) return None
[ 69, 539, 2044 ]
def METHOD_NAME(): filter_shape = [filter_c, filter_m, filter_h, filter_w] # data_format = 'CMHW' return np.random.random(filter_shape).astype(np.float32)
[ 567, 527 ]
def METHOD_NAME(self) -> Optional[str]: """ Gets or sets the ETAG for optimistic concurrency control. """ return pulumi.get(self, "etag")
[ 431 ]
def METHOD_NAME(qobj): """ Return a new Qobj with the gauge fixed for the global phase. Explicitly, we set the first non-zero element to be purely real-positive. """ flat = qobj.tidyup(1e-14).full().flat.copy() for phase in flat: if phase != 0: # Fix the gauge for any global phase. flat = flat * np.exp(-1j * np.angle(phase)) break return qutip.Qobj(flat.reshape(qobj.shape), dims=qobj.dims)
[ 188, 285, 3200 ]
def METHOD_NAME(self, form): """ Redirect to Stripe Checkout for users to buy a subscription. Users can buy a new subscription if the current one has been deleted after they canceled it. """ stripe_subscription = self.get_object() if ( not stripe_subscription or stripe_subscription.status != SubscriptionStatus.canceled ): raise Http404() stripe_price = get_object_or_404(djstripe.Price, id=form.cleaned_data["plan"]) url = self.request.build_absolute_uri(self.get_success_url()) organization = self.get_organization() # pylint: disable=broad-except try: stripe_customer = get_or_create_stripe_customer(organization) checkout_session = stripe.checkout.Session.create( customer=stripe_customer.id, payment_method_types=['card'], line_items=[ { "price": stripe_price.id, "quantity": 1, } ], mode='subscription', success_url=url + '?upgraded=true', cancel_url=url, ) return HttpResponseRedirect(checkout_session.url) except Exception: log.exception( 'Error while creating a Stripe checkout session.', organization_slug=organization.slug, price=stripe_price.id, ) messages.error( self.request, _('There was an error connecting to Stripe, please try again in a few minutes.'), ) return HttpResponseRedirect(self.get_success_url())
[ 1736, 24, 2170 ]
def METHOD_NAME(self): """Controller operation context.""" return self.response_builder
[ 198 ]
def METHOD_NAME(self): return [n.nutrient_name_unit for n in self._nutrients]
[ 9106, 83 ]
async def METHOD_NAME(self) -> None: await self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(self, value): try: return float(value) except: return float(0)
[ 19, 1819, 99 ]
def METHOD_NAME(self): """Tests when neither name nor source is specified""" # Storage cannot be specified without name or source - invalid spec with pytest.raises(exceptions.StorageSpecError) as e: storage_lib.Storage() assert 'Storage source or storage name must be specified' in str(e)
[ 9, -1, 61, 11184 ]
def METHOD_NAME(self, balance_hp=None, emotion_reduce=None, func=None, call_submarine_at_boss=None, save_get_items=None, expected_end=None, fleet_index=1): self.battle_status_click_interval = 7 if save_get_items else 0 super().METHOD_NAME(balance_hp=False, expected_end='no_searching', auto_mode='hide_in_bottom_left', save_get_items=False)
[ 14629 ]
def METHOD_NAME(graph): """ Sets the zoom level to fit selected nodes. """ graph.METHOD_NAME()
[ 90, 24, 3115 ]
def METHOD_NAME(key: _Key | None) -> bool: ...
[ 137, 2872 ]
async def METHOD_NAME() -> None: """Going home should always land at position zero.""" async with InputTester().run_test() as pilot: for input in pilot.app.query(Input): input.action_home() assert input.cursor_position == 0
[ 9, 362, 624 ]
def METHOD_NAME(self, emu, op, eip): if self.arch is None: self.arch = op.iflags & envi.ARCH_MASK if self.arch == envi.ARCH_I386: if op.opcode == INS_OUT: emu.stopEmu() raise v_exc.BadOutInstruction(op.va) if op.opcode == INS_TRAP: reg = emu.getRegister(envi.archs.i386.REG_EAX) if reg == 1: emu.stopEmu() self.vw.addNoReturnVa(eip) if self.arch == envi.ARCH_AMD64: if op.opcode == INS_OUT: emu.stopEmu() raise v_exc.BadOutInstruction(op.va) if op in self.badops: emu.stopEmu() raise v_exc.BadOpBytes(op.va) if op.iflags & envi.IF_RET: self.hasret = True emu.stopEmu() self.lastop = op loc = self.vw.getLocation(eip) if loc is not None: va, size, ltype, linfo = loc if ltype != vivisect.LOC_OP: emu.stopEmu() raise Exception("HIT LOCTYPE %d AT 0x%.8x" % (ltype, va)) cnt = self.mndist.get(op.mnem, 0) self.mndist[op.mnem] = cnt + 1 self.insn_count += 1 if self.vw.isNoReturnVa(eip): self.hasret = True emu.stopEmu() # FIXME do we need a way to terminate emulation here?
[ 14080 ]
def METHOD_NAME(self): element = VectorElement("CG", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) i, j, k, l = indices(4) a = v[i] self.assertSameIndices(a, (i,)) a = outer(v, u)[i, j] self.assertSameIndices(a, (i, j)) a = outer(v, u)[i, i] self.assertSameIndices(a, ()) self.assertIsInstance(a, IndexSum)
[ 9, 3460 ]
def METHOD_NAME(self): """A test for the boxing logic on unknown type """ Dummy = self.Dummy @njit def foo(x): return Dummy(x) with self.assertRaises(TypeError) as raises: foo(123) self.assertIn("cannot convert native Dummy to Python object", str(raises.exception))
[ 9, 16567 ]
def METHOD_NAME(self): assert isinstance(self.benchmark_schema, str) if self.benchmark_schema.startswith('<'): try: ElementTree.fromstring(self.benchmark_schema) except ElementTree.ParseError as e: assert False, "The 'benchmark_schema' value contains invalid XML: " + str(e) else: try: json.loads(self.benchmark_schema) except json.decoder.JSONDecodeError as e: assert False, "The 'benchmark_schema' value contains invalid JSON: " + str(e)
[ 638, 1205, 1668, 135 ]
def METHOD_NAME(self) -> None: self.model = DioptasModel() self.model.calibration_model.is_calibrated = True self.model.calibration_model.pattern_geometry.wavelength = 0.31E-10 self.model.calibration_model.integrate_1d = MagicMock(return_value=(self.model.calibration_model.tth, self.model.calibration_model.int)) self.widget = IntegrationWidget() self.controller = PhaseInPatternController(self.widget, self.model) self.pattern_controller = PatternController(self.widget, self.model) self.model.pattern_model.load_pattern(os.path.join(data_path, 'pattern_001.xy'))
[ 0, 1 ]
def METHOD_NAME(self, item): return {self._item_attr: item}
[ 335 ]
def METHOD_NAME(self) -> str: """ ARM resource id of the guest configuration assignment. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(trial_id: int, timing_enabled: bool, gpu_enabled: bool) -> None: def validate_labels(labels: Sequence[Dict[str, Any]]) -> None: # Check some labels against the expected labels. Return the missing labels. expected = { "cpu_util_simple": PROFILER_METRIC_TYPE_SYSTEM, "dataloader_next": PROFILER_METRIC_TYPE_TIMING, "disk_iops": PROFILER_METRIC_TYPE_SYSTEM, "disk_throughput_read": PROFILER_METRIC_TYPE_SYSTEM, "disk_throughput_write": PROFILER_METRIC_TYPE_SYSTEM, "free_memory": PROFILER_METRIC_TYPE_SYSTEM, "from_device": PROFILER_METRIC_TYPE_TIMING, "net_throughput_recv": PROFILER_METRIC_TYPE_SYSTEM, "net_throughput_sent": PROFILER_METRIC_TYPE_SYSTEM, "reduce_metrics": PROFILER_METRIC_TYPE_TIMING, "step_lr_schedulers": PROFILER_METRIC_TYPE_TIMING, "to_device": PROFILER_METRIC_TYPE_TIMING, "train_batch": PROFILER_METRIC_TYPE_TIMING, } if gpu_enabled: expected.update( { "gpu_free_memory": PROFILER_METRIC_TYPE_SYSTEM, "gpu_util": PROFILER_METRIC_TYPE_SYSTEM, } ) if not timing_enabled: expected = {k: v for k, v in expected.items() if v != PROFILER_METRIC_TYPE_TIMING} for label in labels: metric_name = label["name"] metric_type = label["metricType"] if expected.get(metric_name, None) == metric_type: del expected[metric_name] if len(expected) > 0: pytest.fail( f"expected completed experiment to have all labels but some are missing: {expected}" ) with api.get( conf.make_master_url(), "api/v1/trials/{}/profiler/available_series".format(trial_id), stream=True, ) as r: for line in r.iter_lines(): labels = json.loads(line)["result"]["labels"] validate_labels(labels) # Just check 1 iter. return
[ 377, 5176, 1341, 415 ]
def METHOD_NAME(self): """ <foo> tags should be removed, even when nested. """ soup = self.get_soup("<b><foo>bar</foo></b>", "html5lib") tag = soup.b self.whitelister.clean_tag_node(tag, tag) self.assertEqual(str(tag), "<b>bar</b>")
[ 9, 1356, 82, 1716, 15975, 612, 12025 ]
def METHOD_NAME(self, blocking=True): ''' Move stage to factory default home location. Note: Thorlabs API allows resetting stages home but this not implemented as it isnt' advised ''' response = self.query_device("ho") if blocking: self._block_until_stopped() return self._decode_position_response(response)
[ 132, 624 ]
def METHOD_NAME(self): return f"AU - { self._get_full_last_name() }\n"
[ 280, 679 ]
def METHOD_NAME(self): data = [ ('2', 2), ('5', 5), ('8', 8), ('A', 1), ('10', 10), ('J', 10), ('Q', 10), ('K', 10)] for variant, (card, value) in enumerate(data, 1): with self.subTest(f'variation #{variant}', input=card, output=value): error_msg = f'Expected {value} as the value of {card}.' self.assertEqual(value_of_card(card), value, msg=error_msg)
[ 9, 99, 47, 5427 ]
def METHOD_NAME(self): """Tests trident roi head predict.""" if not torch.cuda.is_available(): # RoI pooling only support in GPU return unittest.skip('test requires GPU and torch+cuda') roi_head = MODELS.build(self.roi_head_cfg) roi_head = roi_head.cuda() s = 256 feats = [] for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)): feats.append( torch.rand(1, 256, s // (2**(i + 2)), s // (2**(i + 2))).to(device='cuda')) image_shapes = [(3, s, s)] batch_data_samples = demo_mm_inputs( batch_size=1, image_shapes=image_shapes, num_items=[0], num_classes=4, with_mask=True, device='cuda')['data_samples'] proposals_list = demo_mm_proposals( image_shapes=image_shapes, num_proposals=100, device='cuda') roi_head.predict(feats, proposals_list, batch_data_samples)
[ 9, 361, 5451, 65, 373, 2103 ]
def METHOD_NAME(self): T = nx.bfs_tree(self.G, source=0) assert sorted(T.nodes()) == sorted(self.G.nodes()) assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)]
[ 9, 9551, 151 ]
async def METHOD_NAME(process): try: with anyio.fail_after(SHUTDOWN_TIMEOUT): await process.wait() except TimeoutError: # try twice in case process.wait() hangs with anyio.fail_after(SHUTDOWN_TIMEOUT): await process.wait()
[ 1209, 158 ]
def METHOD_NAME(self): errorbars = ErrorBars([(0, 0, 0.1, 0.2, 1), (0, 1, 0.2, 0.4, 4), (0, 2, 0.6, 1.2, 8)], vdims=['y', 'perr', 'nerr', 'line_width']).opts(line_width='line_width') plot = bokeh_renderer.get_plot(errorbars) cds = plot.handles['cds'] glyph = plot.handles['glyph'] self.assertEqual(cds.data['line_width'], np.array([1, 4, 8])) self.assertEqual(property_to_dict(glyph.line_width), {'field': 'line_width'})
[ 9, 10018, 534, 2327, 441 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(): """ True when the ConEmu Windows console is used. """ return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
[ 137, -1, 1113 ]
def METHOD_NAME(self): ret = app_theme.lighten("#000000", 50) self.is_valid_hex_color(ret) self.assertEqual("#7f7f7f", ret)
[ 9, 8287, 5879, 6974 ]
def METHOD_NAME(self, _root): error_message = r"The model file name must be a Vensim \(\.mdl\),"\ r" a Xmile \(\.xmile, \.xml, \.stmx\) or a PySD \(\.py\) "\ r"model file\.\.\.$" with pytest.raises(ValueError, match=error_message): runner(_root.joinpath("more-tests/not_vensim/test_not_vensim.txt"))
[ 9, 256, 1205, 578 ]
def METHOD_NAME(): for i, pdb in enumerate(pdbs): preamble = "bad_cis_peptide_%02d" % i f=open("%s.pdb" % preamble, "w") f.write(pdb) f.close() for param, results in params.items(): f=open("%s.params" % preamble, "w") f.write(param) f.close() cmd = "phenix.geometry_minimization %(preamble)s.pdb %(preamble)s.params" % locals() print(cmd) ero = easy_run.fully_buffered(command=cmd) out = StringIO() ero.show_stdout(out=out) with open("%(preamble)s_minimized.geo" % locals(), "r") as f: lines = f.read() geo_spec=geo_specs[i] print(geo_spec % results[0]) if lines.find(geo_spec % results[0])==1: if lines.find(geo_spec % abs(results[0]))==1: assert 0, ".geo specification not found"
[ 6075, 4185, 3255 ]
def METHOD_NAME(): """ Define and process the command line interface to the varmat compatibility summarize script. """ parser = FullErrorMsgParser( description="Process results of varmat compatibility test.", formatter_class=ArgumentDefaultsHelpFormatter, ) parser.add_argument( "results_file", type=str, default=[], help="File with results of varmat compatibility test.", ) parser.add_argument( "--functions", nargs="+", type=str, default=[], help="Function names to summarize. By default summarize everything.", ) parser.add_argument( "--which", default = "compatible", choices = ["compatible", "incompatible", "irrelevant"], help = "Print by compatibility." ) parser.add_argument( "--fully", default = False, help = "When printing compatible or incompatible function names, print those that are fully compatible or incompatible, ignoring irrelevant functions. When printing irrelevant functions, print only those with no compatible or incompatible versions.", action = "store_true" ) parser.add_argument( "--names", default=False, help="Print function names, not signatures.", action="store_true", ) args = parser.parse_args() with open(args.results_file, "r") as f: results = json.load(f) names_to_print = process_results( results, args.functions, print_which = args.which, print_fully = args.fully, print_names = args.names ) for name in sorted(names_to_print): print(name.strip())
[ 356, 615, 335 ]
def METHOD_NAME(self) -> str: self._completeIfNotSet(self._body) return self._body.value
[ 2829 ]
def METHOD_NAME(self): parsed_args = mock.Mock() # Make the value a file with JSON located inside. parsed_args.cli_input_json = 'file://' + self.temp_file call_parameters = {} self.argument.add_to_call_parameters( service_operation=None, call_parameters=call_parameters, parsed_args=parsed_args, parsed_globals=None ) self.assertEqual(call_parameters, {'A': 'foo', 'B': 'bar'})
[ 9, 238, 24, 128, 386, 41, 171 ]
def METHOD_NAME(stderr): new_result = None if ( b"NotCompressibleException" in stderr or b"CantPackException" in stderr or b"AlreadyPackedException" in stderr ): stderr = b"" new_result = 0 return new_result, stderr
[ 527, 15179, 168 ]
def METHOD_NAME(self): N_ICC_SIDE_LENGTH = 10 DIPOLE_DISTANCE = 5.0 DIPOLE_CHARGE = 10.0 part_slice_lower, normals_lower, areas_lower = self.add_icc_particles( N_ICC_SIDE_LENGTH, -0.0001, 0.) part_slice_upper, normals_upper, areas_upper = self.add_icc_particles( N_ICC_SIDE_LENGTH, 0.0001, BOX_L) assert (part_slice_upper.id[-1] - part_slice_lower.id[0] + 1) == 2 * N_ICC_SIDE_LENGTH**2, "ICC particles not continuous" normals = np.vstack((normals_lower, -normals_upper)) areas = np.hstack((areas_lower, areas_upper)) epsilons = np.full_like(areas, 1e8) sigmas = np.zeros_like(areas) icc = espressomd.electrostatic_extensions.ICC( n_icc=2 * N_ICC_SIDE_LENGTH**2, normals=normals, areas=areas, epsilons=epsilons, sigmas=sigmas, convergence=1e-6, max_iterations=100, first_id=part_slice_lower.id[0], eps_out=1., relaxation=0.75, ext_field=[0, 0, 0]) # Dipole in the center of the simulation box BOX_L_HALF = BOX_L / 2 self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF - DIPOLE_DISTANCE / 2], q=DIPOLE_CHARGE, fix=[True, True, True]) self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF + DIPOLE_DISTANCE / 2], q=-DIPOLE_CHARGE, fix=[True, True, True]) p3m = espressomd.electrostatics.P3M( prefactor=1., mesh=32, cao=7, accuracy=1e-5) p3m.charge_neutrality_tolerance = 1e-11 self.system.electrostatics.solver = p3m self.system.electrostatics.extension = icc self.system.integrator.run(0) charge_lower = sum(part_slice_lower.q) charge_upper = sum(part_slice_upper.q) testcharge_dipole = DIPOLE_CHARGE * DIPOLE_DISTANCE induced_dipole = 0.5 * (abs(charge_lower) + abs(charge_upper)) * BOX_L self.assertAlmostEqual(1, induced_dipole / testcharge_dipole, places=4)
[ 9, 1630, 112 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(self) -> None: helper_dir = os.path.join(get_main_dir(), "chipsec", "helper") helpers = [os.path.basename(f) for f in os.listdir(helper_dir) if os.path.isdir(os.path.join(helper_dir, f)) and not os.path.basename(f).startswith("__")] for helper in helpers: helper_path = '' try: helper_path = f'chipsec.helper.{helper}.{helper}helper' hlpr = importlib.import_module(helper_path) self.avail_helpers[f'{helper}helper'] = hlpr except ImportError as msg: logger().log_debug(f"Unable to load helper: {helper}")
[ 557, 4272 ]
def METHOD_NAME(self): data = self._get_data_items() return data.METHOD_NAME()
[ 219 ]
def METHOD_NAME(schema, link_prefix=None): """ Load json schema stored in the .schema dir """ if link_prefix: update_links(link_prefix, schema) return schema
[ 557, 763, 135 ]
def METHOD_NAME(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag))
[ 9, 584 ]
async def METHOD_NAME(): # [START run_indexer_async] result = await indexers_client.METHOD_NAME("async-sample-indexer") print("Ran the Indexer 'async-sample-indexer'") return result # [END run_indexer_async]
[ 22, 895 ]
def METHOD_NAME( self, state, auth_data, urlopen, expect_success=True, customer_domain="", has_2fa=False, **kwargs, ): headers = {"Content-Type": "application/json"} urlopen.return_value = MockResponse(headers, json.dumps(auth_data)) query = urlencode({"code": "1234", "state": state}) resp = self.client.get(f"{self.sso_path}?{query}", **kwargs) if expect_success: if has_2fa: assert resp["Location"] == "/auth/2fa/" with mock.patch( "sentry.auth.authenticators.TotpInterface.validate_otp", return_value=True ): assert resp.status_code == 302 resp = self.client.post(reverse("sentry-2fa-dialog"), {"otp": "something"}) assert resp.status_code == 302 assert resp["Location"].startswith("http://testserver/auth/sso/?") resp = self.client.get(resp["Location"]) assert resp.status_code == 302 assert resp["Location"] == f"{customer_domain}/auth/login/" resp = self.client.get(resp["Location"], follow=True) assert resp.status_code == 200 assert resp.redirect_chain == [("/organizations/baz/issues/", 302)] assert resp.context["user"].id == self.user.id assert urlopen.called data = urlopen.call_args[1]["data"] assert data == { "grant_type": "authorization_code", "code": "1234", "redirect_uri": "http://testserver/auth/sso/", "client_id": "my_client_id", "client_secret": "my_client_secret", } return resp
[ 7155, 1076 ]
def METHOD_NAME(self): has_one = self.builder_for_features('foo') has_the_other = self.builder_for_features('bar') has_both_early = self.builder_for_features('foo', 'bar', 'baz') has_both_late = self.builder_for_features('foo', 'bar', 'quux') lacks_one = self.builder_for_features('bar') has_the_other = self.builder_for_features('foo') # There are two builders featuring 'foo' and 'bar', but # the one that also features 'quux' was registered later. assert self.registry.lookup('foo', 'bar') == has_both_late # There is only one builder featuring 'foo', 'bar', and 'baz'. assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early
[ 9, 1906, 9452, 759, 3628, 348, 10565 ]
def METHOD_NAME(msg: str, col: Colour_Types, end: str = "\n") -> None: """Print a string, in a specified ansi colour.""" print(f"{col!r}{msg}{Ansi.RESET!r}", end=end)
[ -1 ]
def METHOD_NAME(request): """ :param request: :return: """ latest_id = int(request.query_params.get("latest", 0)) page_size = int(request.query_params.get("pageSize", 20)) if page_size > 100: page_size = 100 rule_id = request.data.get("name", _("Temporary search")) rule_msg = request.data.get("msg") rule_level = request.data.get("level") rule_sources = request.data.get("sources") rule_sinks = request.data.get("sinks") rule_propagators = request.data.get("propagators") sink_set = set(rule_sinks) if rule_sinks else set() source_set = set(rule_sources) if rule_sources else set() propagator_set = set(rule_propagators) if rule_propagators else set() return ( latest_id, page_size, rule_id, rule_msg, rule_level, source_set, sink_set, propagator_set, )
[ 214, 1070, 405 ]
def METHOD_NAME(self): # we only send data that was requested to actually be sent to prevent exploitation # of the form as much as possible accepted_fields = self.accepted_fields form = self.request.form values = [] for field in form.keys(): if field.strip() not in accepted_fields: continue values.append("{}: {}".format(field, form[field])) return "\n".join(values)
[ 377, 100 ]
def METHOD_NAME(aggregate): """Helper function waiting for URL queue.""" while True: try: aggregate.urlqueue.join(timeout=30) break except urlqueue.Timeout: # Cleanup threads every 30 seconds aggregate.remove_stopped_threads() if not any(aggregate.get_check_threads()): break
[ 250, 274 ]
def METHOD_NAME(self, fail_silently: bool = ...) -> Any: ...
[ 19, 550 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self): """Return the features supported by this vacuum cleaner.""" support = ( VacuumEntityFeature.STATE | VacuumEntityFeature.STATUS | VacuumEntityFeature.SEND_COMMAND ) if self._fan_dps: support |= VacuumEntityFeature.FAN_SPEED if self._power_dps: support |= VacuumEntityFeature.TURN_ON | VacuumEntityFeature.TURN_OFF if self._locate_dps: support |= VacuumEntityFeature.LOCATE cmd_dps = self._command_dps or self._status_dps cmd_support = cmd_dps.values(self._device) if SERVICE_RETURN_TO_BASE in cmd_support: support |= VacuumEntityFeature.RETURN_HOME if SERVICE_CLEAN_SPOT in cmd_support: support |= VacuumEntityFeature.CLEAN_SPOT if SERVICE_STOP in cmd_support: support |= VacuumEntityFeature.STOP if self._activate_dps: support |= VacuumEntityFeature.START | VacuumEntityFeature.PAUSE else: if "start" in cmd_support: support |= VacuumEntityFeature.START if "pause" in cmd_support: support |= VacuumEntityFeature.PAUSE return support
[ 616, 2247 ]
def METHOD_NAME(self): dtype = number.min_numerical_convertible_type("+.5") self.assertTrue(numpy.issubdtype(dtype, numpy.floating))
[ 9, 2302, 3397 ]
def METHOD_NAME(aggregator, tags): metrics = set(NODE_METRICS_MAP.values()) # Summaries for metric in ('go.gc.duration.seconds',): metrics.remove(metric) metrics.update({'{}.count'.format(metric), '{}.quantile'.format(metric), '{}.sum'.format(metric)}) for metric in sorted(metrics): metric = 'aws.msk.{}'.format(metric) for tag in tags: aggregator.assert_metric_has_tag(metric, tag)
[ 638, 1716, 1097, 3116 ]
def METHOD_NAME(self, values): """ Convert list of file paths to list of BioPython object based on the files. Only certain formats of sequences and MSA allow such a conversion. Other files will stay in string representation. """ if not _BioAvailable: return values resultList = [] for path in values: resultList.append(self._createObjectBasedOnFile(path)) return resultList
[ 144, 245, 24, 3111, 245 ]
def METHOD_NAME(self): instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS} t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance) # the box center must fall into the cropped region self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
[ 9, 370, 712 ]
f METHOD_NAME(self):
[ 1575 ]