text
stringlengths 15
7.82k
| ids
listlengths 1
7
|
---|---|
def METHOD_NAME(self, write_cursor: DBCursor, margin_trade: MarginPosition) -> None:
self._margin_trades.append(margin_trade)
self.maybe_flush_all(write_cursor) | [
238,
9193,
4304
]
|
def METHOD_NAME(self, token):
if self._proactive_refresh:
interval = timedelta(minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES)
else:
interval = timedelta(minutes=self._ON_DEMAND_REFRESHING_INTERVAL_MINUTES)
return (token.expires_on - get_current_utc_as_int()) < interval.total_seconds() | [
137,
466,
3193,
6400
]
|
def METHOD_NAME(mocker):
env = {}
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
# crt_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
orig_exist = os.path.exists
def exists(path):
return True if path in token_path else orig_exist(path)
def magic(path, *args, **kwargs):
if path == token_path:
return io.StringIO("token")
mocker.patch("wandb.util.open", magic, create=True)
mocker.patch("wandb.util.os.path.exists", exists)
env["KUBERNETES_SERVICE_HOST"] = "k8s"
env["KUBERNETES_PORT_443_TCP_PORT"] = "123"
env["HOSTNAME"] = "test"
return env | [
248,
3761
]
|
def METHOD_NAME(self, arch):
if Path(self.python_exe).exists():
# no need to build, but we must set hostpython for our Context
self.ctx.hostpython = self.python_exe
return False
return True | [
427,
56
]
|
def METHOD_NAME(self, x):
return F.leaky_relu(x, 2e-1) | [
-1
]
|
def METHOD_NAME(arn: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetArnResult]:
"""
Parses an ARN into its constituent parts.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
db_instance = aws.get_arn(arn="arn:aws:rds:eu-west-1:123456789012:db:mysql-db")
```
:param str arn: ARN to parse.
"""
... | [
19,
1059,
146
]
|
def METHOD_NAME(form: KeywordsFormArgs) -> str:
if manatee_is_custom_cnc():
key = (f'{form.corpname}:{form.usesubcorp}:{form.ref_corpname}:{form.ref_usesubcorp}:{form.wlattr}:{form.wlpat}:'
f'{form.include_nonwords}:{form.wltype}:{form.wlnums}:{form.wlminfreq}:{form.wlmaxfreq}:{form.score_type}')
else:
key = (f'{form.corpname}:{form.usesubcorp}:{form.ref_corpname}:{form.ref_usesubcorp}:{form.wlattr}:{form.wlpat}:'
f'{form.include_nonwords}:{form.wltype}:{form.wlnums}:{form.wlminfreq}:{form.wlmaxfreq}')
result_id = hashlib.sha1(key.encode('utf-8')).hexdigest()
return os.path.join(settings.get('corpora', 'freqs_cache_dir'), f'kwords_{result_id}.jsonl') | [
129,
596,
157
]
|
def METHOD_NAME():
result = interface.call("person_detection")
if result is not None:
print(result.tobytes()) | [
3198,
1349,
381
]
|
def METHOD_NAME():
'''Tests that feature= works for optional features'''
t = BoostBuild.Tester()
t.write('Jamroot.jam', '''
import feature : feature ;
import toolset : flags ;
feature f1 : 1 2 : optional ;
make output.txt : : @run ;
flags run OPTIONS <f1> ;
actions run { echo b $(OPTIONS) > $(<) }
''')
t.run_build_system(['f1='])
t.expect_content("bin/*/output.txt", "b")
t.cleanup() | [
9,
665
]
|
def METHOD_NAME(self, ispan, ospan):
"""Function passed to `CallbackBlock`, which
checks data before scrunch"""
self.assertLessEqual(ispan.nframe, self.gulp_nframe)
self.assertEqual(ospan.nframe, ispan.nframe)
self.assertEqual(ispan.data.shape, (ispan.nframe,1,2))
self.assertEqual(ospan.data.shape, (ospan.nframe,1,2)) | [
250,
365,
1553
]
|
async def METHOD_NAME():
with pytest.raises(AssertionError) as exc_info:
await validate_signature(verifier=None, metadata={}, records=[], timestamp=42)
assert exc_info.value.args[0] == "Missing signature" | [
9,
1038,
1334
]
|
async def METHOD_NAME(
limit: int = LimitBody(),
offset: int = Body(0, ge=0),
variables: Optional[filters.VariableFilter] = None,
sort: sorting.VariableSort = Body(sorting.VariableSort.NAME_ASC),
db: PrefectDBInterface = Depends(provide_database_interface), | [
203,
2045
]
|
def METHOD_NAME(self):
self._test_posting_duplicate_source_system_username(self.sources_url, self.rest_client)
self.assertEqual(SourceSystem.objects.count(), 2)
self.assertEqual(User.objects.count(), 3)
source1_duplicate_username = SourceSystem.objects.get(name=self.source1_duplicate_username_dict["name"])
self.assertEqual(source1_duplicate_username.name, self.source1_duplicate_username_dict["name"])
self.assertEqual(source1_duplicate_username.type.pk, self.source1_duplicate_username_dict["type"])
duplicate_username = self.source1_duplicate_username_dict["username"]
generated_username = source1_duplicate_username.user.username
self.assertNotEqual(generated_username, duplicate_username)
self.assertTrue(generated_username.startswith(duplicate_username)) | [
9,
6361,
1119,
1458,
112,
2072,
24
]
|
def METHOD_NAME(cls: Any) -> Any:
"""Register an action, used as a decorator.
:param cls: The class to register
:returns: The class after registration
"""
package, _, action = cls.__module__.rpartition(".")
pkg_info = _ACTIONS.setdefault(package, {})
pkg_info[action] = ActionT(name=action, cls=cls, kegex=re.compile(cls.KEGEX))
return cls | [
372
]
|
def METHOD_NAME():
pass | [
1502
]
|
def METHOD_NAME ():
"""
Read in the electron affinities and ionization energies from file
"""
electron_affinities = {}
ionization_energies = {}
for el,l in data.items():
electron_affinities[el] = data[el][0]
ionization_energies[el] = data[el][1]
return electron_affinities, ionization_energies | [
203,
13414,
11154
]
|
def METHOD_NAME(self):
self._test_after_dynamo(
"cuda", "relu_accuracy_error_TESTING_ONLY", "AccuracyError"
) | [
9,
1887,
7397,
2590,
5100,
168
]
|
def METHOD_NAME(self):
"""Test refresh identities"""
result = self._test_refresh_identities()
# ... ? | [
9,
1920,
7949
]
|
def METHOD_NAME():
content = {
"example": {
"nested": {"path": {"internal": "uh oh"}, "path/internal": "found it!"},
},
"reference": {"$ref": "#/example/nested/path/internal"},
}
config = resolver.preprocess_manifest(content)
assert config["example"]["nested"]["path"]["internal"] == "uh oh"
assert config["example"]["nested"]["path/internal"] == "found it!"
assert config["reference"] == "found it!" | [
9,
2026,
1766
]
|
def METHOD_NAME(self):
# Send a tx from which to conflict outputs later
txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.generate(self.nodes[0], 1)
# Disconnect node1 from others to reorg its chain later
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.connect_nodes(0, 2)
# Send a tx to be unconfirmed later
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.generate(self.nodes[0], 4, sync_fun=self.no_op)
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
# Disconnect node0 from node2 to broadcast a conflict on their respective chains
self.disconnect_nodes(0, 2)
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
inputs.append({"txid": txid_conflict_from, "vout": nA})
outputs_1 = {}
outputs_2 = {}
# Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from
outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1))
conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2))
conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
self.generate(self.nodes[2], 9, sync_fun=self.no_op)
# Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted
self.connect_nodes(0, 2)
self.sync_blocks([self.nodes[0], self.nodes[2]])
conflicted = self.nodes[0].gettransaction(conflicted_txid)
conflicting = self.nodes[0].gettransaction(conflicting_txid)
assert_equal(conflicted["confirmations"], -9)
assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])
# Node0 wallet is shutdown
self.restart_node(0)
# The block chain re-orgs and the tx is included in a different block
self.generate(self.nodes[1], 9, sync_fun=self.no_op)
self.nodes[1].sendrawtransaction(tx["hex"])
self.generate(self.nodes[1], 1, sync_fun=self.no_op)
self.nodes[1].sendrawtransaction(conflicted["hex"])
self.generate(self.nodes[1], 1, sync_fun=self.no_op)
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].chain_path, self.default_wallet_name, self.wallet_data_filename))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different blockhash
assert_equal(tx_after_reorg["confirmations"], 2)
assert tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"]
conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
# Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx
assert_equal(conflicted_after_reorg["confirmations"], 1)
assert conflicting["blockhash"] != conflicted_after_reorg["blockhash"] | [
22,
9
]
|
def METHOD_NAME(
self, mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
"""Partition the relay graph in parts supported and unsupported by the
target hardware accelerator.
Parameters
----------
mod : tvm.IRModule
The relay module to be partitioned.
params: Optional[Dict[str, tvm.runtime.NDArray]]
Returns
-------
out : tvm.IRModule
The partitioned relay module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pass_sequence = []
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.PRE_PARTITIONING]
)
pass_sequence.append(relay.transform.MergeComposite(self._pattern_table()))
pass_sequence.append(relay.transform.AnnotateTarget(self.target_name))
if self.merge_compiler_regions:
pass_sequence.append(relay.transform.MergeCompilerRegions())
pass_sequence.append(relay.transform.PartitionGraph())
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_0]
)
sequential_passes = tvm.transform.Sequential(pass_sequence)
mod = sequential_passes(mod)
# Defunctionalize the partitioned functions to allow lowering
for gvar, func in mod.functions.items():
mod.update_func(gvar, relay.transform.Defunctionalization(func, mod))
post_partition_passes_1 = tvm.transform.Sequential(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_1]
)
mod = post_partition_passes_1(mod)
return mod | [
2312
]
|
def METHOD_NAME(self):
command = super().METHOD_NAME()
# Default to sfdx defaultdevhubusername
if "username" not in self.options:
self._set_default_username()
command += " -u {}".format(self.options.get("username"))
command += " -d {}".format(self.tempdir)
return command | [
19,
462
]
|
def METHOD_NAME(self):
self._storage.tpc_abort(TransactionMetaData()) | [
9,
2869,
130,
13696
]
|
def METHOD_NAME(self, data):
"""Set the contents of this node.
:param data: the list to set
"""
if not isinstance(data, list):
raise TypeError('Must supply list type')
self.base.attributes.set(self._LIST_KEY, data.copy()) | [
0,
245
]
|
def METHOD_NAME(self):
encryption_config1 = self._make_one(self.KMS_KEY_NAME)
encryption_config2 = self._make_one(self.KMS_KEY_NAME)
set_one = {encryption_config1, encryption_config2}
set_two = {encryption_config1, encryption_config2}
self.assertEqual(set_one, set_two) | [
9,
1161,
0,
1392
]
|
def METHOD_NAME(tensor):
return torch.sum(tensor != 0) | [
75
]
|
def METHOD_NAME(self):
vals = ["foo", -1, 256]
for val in vals:
with self.assertRaises(RuntimeError):
self.exec_turo_cmd('turo_simple_array_u8 '
'{}'.format(val)) | [
9,
12827,
53,
877,
10745,
180
]
|
def METHOD_NAME(reader_tm5: ReadGridded):
assert reader_tm5.data_dir == path_tm5
assert reader_tm5._vars_2d == ["abs550aer", "od550aer"]
assert reader_tm5._vars_3d == [] | [
9,
203,
6367,
365,
1190
]
|
async def METHOD_NAME(version_server: VersionCheckManager):
# This test ensures that there is no Application crash in the case of failed new version request.
# see: https://github.com/Tribler/tribler/issues/5816
# first check that the `_check_urls` returns expected value if there is no errors
with patch.object(VersionCheckManager, '_raw_request_new_version', AsyncMock(return_value={'name': 'mock'})):
actual = await version_server._check_urls()
assert actual == {'name': 'mock'}
# second check that the `_check_urls` returns None if there is an error
with patch.object(VersionCheckManager, '_raw_request_new_version', AsyncMock(side_effect=SSLError)):
actual = await version_server._check_urls()
assert not actual | [
9,
250,
2248,
168
]
|
def METHOD_NAME(f: F) -> F:
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
# turn off warnings altogether
warnings.simplefilter("ignore")
return
warnings.simplefilter("default")
state = ctx.ensure_object(CommandState)
state.debug = True
_setup_logging(level="DEBUG")
return click.option(
"--debug",
is_flag=True,
hidden=True,
expose_value=False,
callback=callback,
is_eager=True,
)(f) | [
290,
1335
]
|
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
return cls._args_schema | [
56,
134,
135
]
|
def METHOD_NAME():
char = minimal(characters(min_codepoint=48, max_codepoint=48), lambda _: True)
assert char == "0" | [
9,
416,
206
]
|
def METHOD_NAME():
"""Command for listing dataset files."""
return Command().command(list_dataset_files).with_database().require_migration() | [
245,
1537,
462
]
|
def METHOD_NAME(index: int, wallet_id: int) -> DerivationRecord:
return DerivationRecord(
uint32(index),
bytes32(token_bytes(32)),
AugSchemeMPL.key_gen(token_bytes(32)).get_g1(),
WalletType.STANDARD_WALLET,
uint32(wallet_id),
False,
) | [
19,
784,
148
]
|
def METHOD_NAME():
"""Tests that we can put and get a bytes object on Datagram."""
dg = core.Datagram(b'abc\x00')
dg.append_data(b'\xff123')
assert bytes(dg) == b'abc\x00\xff123'
dgi = core.DatagramIterator(dg)
dgi.get_remaining_bytes() == b'abc\x00\xff123' | [
9,
6975,
321
]
|
def METHOD_NAME(self, geoip_db):
result = geoip_db.lookup("2a02:ffc0::")
assert result["region_code"] == "GI"
assert result["region_name"] == "Gibraltar"
assert result["radius"] == geoip_db.radius("GI", Location(100))[0] | [
9,
1899
]
|
def METHOD_NAME(entry: Collection, customize: Dict[str, Customize]):
c = customize.get(entry.type)
if c is None:
return True
else:
return c.show | [
527,
559
]
|
def METHOD_NAME(self, indices):
self.dataset.METHOD_NAME(indices) | [
518
]
|
def METHOD_NAME(self):
with captured_stdout():
# magictoken.ex_typed_dict_from_cpython.begin
import numpy as np
from numba import njit
from numba.core import types
from numba.typed import Dict
# The Dict.empty() constructs a typed dictionary.
# The key and value typed must be explicitly declared.
d = Dict.empty(
key_type=types.unicode_type,
value_type=types.float64[:],
)
# The typed-dict can be used from the interpreter.
d['posx'] = np.asarray([1, 0.5, 2], dtype='f8')
d['posy'] = np.asarray([1.5, 3.5, 2], dtype='f8')
d['velx'] = np.asarray([0.5, 0, 0.7], dtype='f8')
d['vely'] = np.asarray([0.2, -0.2, 0.1], dtype='f8')
# Here's a function that expects a typed-dict as the argument
@njit
def move(d):
# inplace operations on the arrays
d['posx'] += d['velx']
d['posy'] += d['vely']
print('posx: ', d['posx']) # Out: posx: [1. 0.5 2. ]
print('posy: ', d['posy']) # Out: posy: [1.5 3.5 2. ]
# Call move(d) to inplace update the arrays in the typed-dict.
move(d)
print('posx: ', d['posx']) # Out: posx: [1.5 0.5 2.7]
print('posy: ', d['posy']) # Out: posy: [1.7 3.3 2.1]
# magictoken.ex_typed_dict_from_cpython.end
# Test
np.testing.assert_array_equal(d['posx'], [1.5, 0.5, 2.7])
np.testing.assert_array_equal(d['posy'], [1.7, 3.3, 2.1]) | [
9,
2258,
3499,
553,
280,
13563
]
|
def METHOD_NAME(testdir: Testdir) -> None:
"""For backwards compat #8192"""
p1 = testdir.makefile("", "")
assert "test_testdir_makefile" in str(p1) | [
9,
15850,
5484,
1661,
35,
144,
1670
]
|
def METHOD_NAME(self) -> str:
"""
The connection status with the Security Partner Provider.
"""
return pulumi.get(self, "connection_status") | [
550,
452
]
|
def METHOD_NAME(self):
l = "bar"
self.pg.createlayer(l)
self.pg.layers[l][0][0] = 0
self.assertNotEqual(101, self.pg.layers[l][0][0])
self.pg.setcellvalue(l, [0, 0], 101)
self.assertEqual(101, self.pg.getcellvalue(l, [0, 0])) | [
9,
-1
]
|
def METHOD_NAME(capacity, disable=True):
"""Context manager disabling an OpenGL capacity.
This is not checking the current state of the capacity.
:param capacity: The OpenGL capacity enum to disable/enable
:param bool disable:
True (default) to disable during context, False to enable
"""
return enabled(capacity, not disable) | [
1295
]
|
def METHOD_NAME(fast_microvm, record_property, metrics):
"""Check boot time of microVM with a network device."""
vm = fast_microvm
vm.jailer.extra_args.update({"boot-timer": None})
_configure_and_run_vm(vm, network=True)
boottime_us = _get_microvm_boottime(vm)
print(f"Boot time with network configured is: {boottime_us} us")
record_property(
"boottime_with_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us"
)
metrics.set_dimensions(DIMENSIONS)
metrics.put_metric("boot_time_with_net", boottime_us, unit="Microseconds")
assert (
boottime_us < MAX_BOOT_TIME_US
), f"boot time {boottime_us} cannot be greater than: {MAX_BOOT_TIME_US} us" | [
9,
18197,
41,
1228
]
|
def METHOD_NAME(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0 | [
1632,
3014,
2735,
307
]
|
async def METHOD_NAME(systemState):
await wrapped(systemState)
async def resolve():
sdk = ScryptedStatic()
sdk.api = api
sdk.remote = remote
sdk.systemManager = SystemManager(api, remote.systemState)
sdk.deviceManager = DeviceManager(
remote.nativeIds, sdk.systemManager
)
sdk.mediaManager = MediaManager(await api.getMediaManager())
ret.set_result(sdk)
asyncio.run_coroutine_threadsafe(resolve(), transport.loop) | [
2437,
0,
112,
551
]
|
f METHOD_NAME(self): | [
9,
8938,
549,
200
]
|
def METHOD_NAME(self, kp: int = None, ki: int = None, kd: int = None, integral_range: int = None, integral_rate: int = None, feed_forward: int = None) -> Tuple[int, int, int, int, int, int]:
"""
Gets or sets the PID values for position and speed control.
If no arguments are given, this will return the current values.
Args:
kp (int): Proportional position (or integral speed) control constant.
ki (int): Integral position control constant.
kd (int): Derivative position (or proportional speed) control constant.
integral_range (int): Region around the target angle (degrees) or distance (millimeters), in which integral control errors are accumulated.
integral_rate (int): Maximum rate at which the error integral is allowed to grow. Rotational (degrees/second) or Linear (millimeters/second).
feed_forward (int): This adds a feed forward signal to the PID feedback signal, in the direction of the speed reference. This value is expressed as a percentage (0 - 100) of the absolute maximum duty cycle.
Returns:
kp, ki, kd, integral range, integral rate, and feed forward (if no arguments are provided), None otherwise.
"""
if kp is None and ki is None and kd is None and integral_range is None and integral_rate is None and feed_forward is None:
return (0, 0, 0, 0, 0, 0)
else:
return None | [
2243
]
|
def METHOD_NAME(registry: CollectorRegistry = REGISTRY, verbose: bool = False):
"""
Collects timeseries samples from prometheus metric collector registry
adds a common timestamp, and encodes them to protobuf
Arguments:
registry: a prometheus CollectorRegistry instance
verbose: whether to optimize for bandwidth and ignore metric name/help
Returns:
a prometheus MetricFamily protobuf stream
"""
timestamp_ms = int(time.time() * 1000)
for metric_family in registry.collect():
if metric_family.type in ('counter', 'gauge'):
family_proto = encode_counter_gauge(metric_family, timestamp_ms)
elif metric_family.type == 'summary':
family_proto = encode_summary(metric_family, timestamp_ms)
elif metric_family.type == 'histogram':
family_proto = encode_histogram(metric_family, timestamp_ms)
family_proto.name = metric_family.name
if verbose:
family_proto.help = metric_family.documentation
yield family_proto | [
19,
1097
]
|
def METHOD_NAME(self):
super().METHOD_NAME()
self.network = 'testnet'
self.manager = self.create_peer(self.network, unlock_wallet=True)
self.public_keys = [
bytes.fromhex('0250bf5890c9c6e9b4ab7f70375d31b827d45d0b7b4e3ba1918bcbe71b412c11d7'),
bytes.fromhex('02d83dd1e9e0ac7976704eedab43fe0b79309166a47d70ec3ce8bbb08b8414db46'),
bytes.fromhex('02358c539fa7474bf12f774749d0e1b5a9bc6e50920464818ebdb0043b143ae2ba')
]
self.private_keys = [
'3081de304906092a864886f70d01050d303c301b06092a864886f70d01050c300e04089abeae5e8a8f75d302020800301d060960'
'864801650304012a0410abbde27221fd302280c13fca7887c85e048190c41403f39b1e9bbc5b6b7c3be4729c054fae9506dc0f83'
'61adcff0ea393f0bb3ca9f992fc2eea83d532691bc9a570ed7fb9e939e6d1787881af40b19fb467f06595229e29b5a6268d831f0'
'287530c7935d154deac61dd4ced988166f9c98054912935b607e2fb332e11c95b30ea4686eb0bda7dd57ed1eeb25b07cea9669dd'
'e5210528a00653159626a5baa61cdee7f4',
'3081de304906092a864886f70d01050d303c301b06092a864886f70d01050c300e040817ca6c6c47ade0de02020800301d060960'
'864801650304012a041003746599b1d7dde5b875e4d8e2c4c157048190a25ccabb17e603260f8a1407bdca24904b6ae0aa9ae225'
'd87552e5a9aa62d98b35b2c6c78f33cb051f3a3932387b4cea6f49e94f14ee856d0b630d77c1299ad7207b0be727d338cf92a3ff'
'fe232aff59764240aff84e079a5f6fb3355048ac15703290a005a9a033fdcb7fcf582a5ddf6fd7b7c1193bd7912cd275a88a8a68'
'23b6c3ed291b4a3f4724875a3ae058054c',
'3081de304906092a864886f70d01050d303c301b06092a864886f70d01050c300e0408089f48fbf59fa92902020800301d060960'
'864801650304012a041072f553e860b77654fd5fb80e5891e7c90481900fde272b88f9a70e7220b2d5adeda1ed29667527caedc2'
'385be7f9e0d63defdde20557e90726e102f879eaf2233cceca8d4af239d5b2a159467255446f001c99b69e570bb176b95248fc21'
'cb752d463b494c2195411639989086336a530d1f4eae91493faf89368f439991baa947ebeca00be7f5099ed69606dc78a4cc384d'
'41542350a9054c5fa1295305dfc37e5989'
]
self.redeem_script = generate_multisig_redeem_script(2, self.public_keys)
self.multisig_address_b58 = generate_multisig_address(self.redeem_script)
self.multisig_address = decode_address(self.multisig_address_b58)
self.address = decode_address(self.manager.wallet.get_unused_address())
self.outside_address = decode_address(self.get_address(0)) | [
0,
1
]
|
def METHOD_NAME(self):
model_id = 'damo/speech_dfsmn_aec_psm_16k'
input = {
'nearend_mic': os.path.join(os.getcwd(), NEAREND_MIC_FILE),
'farend_speech': os.path.join(os.getcwd(), FAREND_SPEECH_FILE)
}
aec = pipeline(Tasks.acoustic_echo_cancellation, model=model_id)
output_path = os.path.abspath('output.wav')
aec(input, output_path=output_path)
print(f'Processed audio saved to {output_path}') | [
9,
-1
]
|
def METHOD_NAME(self, message):
while True:
message_id = '%s-%s' % (
message.get('target', '-'),
uuid.uuid4().hex,
)
if message_id not in self.id_cache:
self.id_cache[message_id] = time.time()
break
message['id'] = message_id
return self.transport.send(pickle.dumps(message)) | [
2648
]
|
def METHOD_NAME(self, key):
return MockedBigtableKVStorage.Row(self, key) | [
203,
843
]
|
def METHOD_NAME(obj, store=None):
# coerces pandas nullable dtypes; does nothing if obj is not pandas
obj = _coerce_df_dtypes(obj)
return obj | [
197,
2989
]
|
def METHOD_NAME(self, task_id, description, task_type):
"""Assert that the dataset with the given data is in the DB
The query is done by task_id and description, and to avoid caching,
we query from a brand new session.
"""
with SessionGen() as session:
db_datasets = session.query(Dataset)\
.filter(Dataset.task_id == task_id)\
.filter(Dataset.description == description).all()
self.assertEqual(len(db_datasets), 1)
d = db_datasets[0]
self.assertEqual(d.description, description)
self.assertEqual(d.task_type, task_type) | [
638,
126,
623,
1267
]
|
def METHOD_NAME(img, fraction=2, bilinear=False):
if bilinear:
img = BilinImage(img.width, img.height, data=img.data)
else:
img = NNImage(img.width, img.height, data=img.data)
out = Image(img.width, img.height, data=img.data[:])
maxr = img.height / (fraction + 1)
for y in range(int(img.height / 2 - maxr), int(img.height / 2 + maxr)):
for x in range(int(img.width / 2 - maxr), int(img.width / 2 + maxr)):
dx, dy = x - img.width / 2, y - img.height / 2
a = atan2(dy, dx)
r = sqrt(dx ** 2 + dy ** 2)
if r < maxr:
nr = r * r / maxr
nx, ny = nr * cos(a), nr * sin(a)
out[x,y] = min(int(img[nx + img.width / 2, ny + img.height / 2]), 255)
else:
out[x,y] = img[x,y]
return out | [
14128
]
|
def METHOD_NAME(self, query: ast.Select) -> pd.DataFrame:
"""Pulls data from the Confluence "get_all_spaces" API endpoint
Parameters
----------
query : ast.Select
Given SQL SELECT query
Returns
-------
pd.DataFrame
confluence "get_all_spaces" matching the query
Raises
------
ValueError
If the query contains an unsupported condition
"""
conditions = extract_comparison_conditions(query.where)
if query.limit:
total_results = query.limit.value
else:
total_results = 50
spaces_kwargs = {}
order_by_conditions = {}
if query.order_by and len(query.order_by) > 0:
order_by_conditions["columns"] = []
order_by_conditions["ascending"] = []
for an_order in query.order_by:
if an_order.field.parts[0] != "":
next
if an_order.field.parts[1] in self.get_columns():
order_by_conditions["columns"].append(an_order.field.parts[1])
if an_order.direction == "ASC":
order_by_conditions["ascending"].append(True)
else:
order_by_conditions["ascending"].append(False)
else:
raise ValueError(
f"Order by unknown column {an_order.field.parts[1]}"
)
for a_where in conditions:
if a_where[1] == "type":
if a_where[0] != "=":
raise ValueError("Unsupported where operation for type")
if a_where[2] not in ["personal", "global"]:
raise ValueError(
f"Unsupported where argument for state {a_where[2]}"
)
spaces_kwargs["type"] = a_where[2]
else:
raise ValueError(f"Unsupported where argument {a_where[1]}")
confluence_spaces_records = self.handler.connect().get_all_spaces(start=0,limit=total_results)
confluence_spaces_df = pd.json_normalize(confluence_spaces_records["results"])
confluence_spaces_df = confluence_spaces_df[self.get_columns()]
if "type" in spaces_kwargs:
confluence_spaces_df = confluence_spaces_df[confluence_spaces_df.type == spaces_kwargs["type"]]
selected_columns = []
for target in query.targets:
if isinstance(target, ast.Star):
selected_columns = self.get_columns()
break
elif isinstance(target, ast.Identifier):
selected_columns.append(target.parts[-1])
else:
raise ValueError(f"Unknown query target {type(target)}")
if len(confluence_spaces_df) == 0:
confluence_spaces_df = pd.DataFrame([], columns=selected_columns)
else:
confluence_spaces_df.columns = self.get_columns()
for col in set(confluence_spaces_df.columns).difference(set(selected_columns)):
confluence_spaces_df = confluence_spaces_df.drop(col, axis=1)
if len(order_by_conditions.get("columns", [])) > 0:
confluence_spaces_df = confluence_spaces_df.sort_values(
by=order_by_conditions["columns"],
ascending=order_by_conditions["ascending"],
)
return confluence_spaces_df | [
1472
]
|
def METHOD_NAME(dataset, instance_data):
tracks = {}
label_cat = dataset.categories()[dm.AnnotationType.label]
for item in dataset:
# NOTE: MOT frames start from 1
# job has an offset, for task offset is 0
frame_number = int(item.id) - 1 + instance_data.start
frame_number = instance_data.abs_frame_id(frame_number)
for ann in item.annotations:
if ann.type != dm.AnnotationType.bbox:
continue
occluded = ann.attributes.pop('occluded', False) is True
track_id = ann.attributes.pop('track_id', None)
attributes = [
instance_data.Attribute(name=n, value=str(v))
for n, v in ann.attributes.items()
]
if track_id is None:
# Extension. Import regular boxes:
instance_data.add_shape(instance_data.LabeledShape(
type='rectangle',
label=label_cat.items[ann.label].name,
points=ann.points,
occluded=occluded,
z_order=ann.z_order,
group=0,
frame=frame_number,
attributes=attributes,
source='manual',
))
continue
shape = instance_data.TrackedShape(
type='rectangle',
points=ann.points,
occluded=occluded,
outside=False,
keyframe=True,
z_order=ann.z_order,
frame=frame_number,
attributes=attributes,
source='manual',
)
# build trajectories as lists of shapes in track dict
if track_id not in tracks:
tracks[track_id] = instance_data.Track(
label_cat.items[ann.label].name, 0, 'manual', [])
tracks[track_id].shapes.append(shape)
for track in tracks.values():
# MOT annotations do not require frames to be ordered
track.shapes.sort(key=lambda t: t.frame)
# insert outside=True in skips between the frames track is visible
prev_shape_idx = 0
prev_shape = track.shapes[0]
for shape in track.shapes[1:]:
has_skip = instance_data.frame_step < shape.frame - prev_shape.frame
if has_skip and not prev_shape.outside:
prev_shape = prev_shape._replace(outside=True,
frame=prev_shape.frame + instance_data.frame_step)
prev_shape_idx += 1
track.shapes.insert(prev_shape_idx, prev_shape)
prev_shape = shape
prev_shape_idx += 1
# Append a shape with outside=True to finish the track
last_shape = track.shapes[-1]
if last_shape.frame + instance_data.frame_step <= \
int(instance_data.meta[instance_data.META_FIELD]['stop_frame']):
track.shapes.append(last_shape._replace(outside=True,
frame=last_shape.frame + instance_data.frame_step)
)
instance_data.add_track(track) | [
512,
24,
758
]
|
def METHOD_NAME(dump_path, result_dict):
cmd = "lsof -w %s/* |awk '/libvirt_i/{print $2}'" % dump_path
start_time = time.time()
while (time.time() - start_time) < 30:
ret = process.run(cmd, shell=True, ignore_status=True)
status, iohelper_pid = ret.exit_status, ret.stdout_text.strip()
if status:
time.sleep(0.1)
continue
if not len(iohelper_pid):
continue
else:
logging.info('pid: %s', iohelper_pid)
result_dict['pid'] = iohelper_pid
break
# Get file open flags containing bypass cache information.
with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo:
flags = 0
for line in fdinfo.readlines():
if line.startswith('flags:'):
flags = int(line.split()[1], 8)
logging.debug('file open flag is: %o', flags)
result_dict['flags'] = flags
with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo:
cmdline = cmdinfo.readline()
logging.debug(cmdline.split()) | [
19,
1106
]
|
def METHOD_NAME(self) -> bool:
"""
Determines whether the setting is enable or disabled.
"""
return pulumi.get(self, "is_enabled") | [
137,
1111
]
|
def METHOD_NAME(metric_name):
"""Given a metric name, return the corresponding metric function."""
return _EVAL_METRICS[metric_name] | [
19,
1341
]
|
def METHOD_NAME(test_path):
TEST_STR = b"Hello World"
with open_new_securely_permissioned_file(test_path, "wb") as f:
f.write(TEST_STR)
with open(test_path, "rb") as f:
assert f.read() == TEST_STR | [
9,
1452,
80,
12365,
12366,
171,
77
]
|
def METHOD_NAME(self, threshold=30):
seg_num = math.ceil((self.end - self.start) / threshold)
if seg_num == 1:
return [self.segs]
avg = (self.end - self.start) / seg_num
return_seg = []
start_time = self.start
cache_seg = []
for seg in self.segs:
cache_time = seg[1] - start_time
if cache_time > avg:
return_seg.append(cache_seg)
start_time = seg[0]
cache_seg = [seg]
else:
cache_seg.append(seg)
return_seg.append(cache_seg)
return return_seg | [
265
]
|
def METHOD_NAME(
organization_name: str,
user_id: int,
slug: str,
) -> OrganizationAndMemberCreationResult:
org = create_organization_with_outbox_message(
create_options={"name": organization_name, "slug": slug}
)
team = org.team_set.create(name=org.name)
om = OrganizationMember.objects.create(
user_id=user_id, organization=org, role=roles.get_top_dog().id
)
OrganizationMemberTeam.objects.create(team=team, organizationmember=om, is_active=True)
return OrganizationAndMemberCreationResult(organization=org, org_member=om, team=team) | [
129,
1044,
61,
1823,
43,
8699
]
|
def METHOD_NAME(api):
responses.add(responses.GET,
f'{RE_BASE}/users/1',
json={
'id': 1,
'surname': 'surname',
'name': 'name',
'email': '[email protected]',
'lockedOut': True,
'department': 'AD',
'biography': 'some biography',
'active': True,
'picture': [1, 2],
'roles': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}
)
resp = api.users.details('1')
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'name'
assert resp['surname'] == 'surname' | [
9,
3467,
2051
]
|
def METHOD_NAME():
spec_at, cor_z = perform_attenuation()
ref = np.load(REFERENCE_RAYS_FILE)
assert_allclose(ref["spec_at"], spec_at["data"], rtol=1e-2, atol=1e-3)
assert_allclose(ref["cor_z"], cor_z["data"].data, rtol=1e-2, atol=1e-3) | [
9,
9299
]
|
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters | [
539,
386
]
|
def METHOD_NAME():
return (_sample_size,) | [
734,
2904
]
|
def METHOD_NAME(self):
return self.rc.METHOD_NAME | [
4333
]
|
def METHOD_NAME(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_fall_out(preds, target, top_k=self.top_k) | [
1341
]
|
def METHOD_NAME(self, ev, axis=None, fromSignal=False):
"""
Handles user input from a drag of the mouse. Propagates to any stacked views.
Parameters
----------
ev: QEvent
The event that was generated
axis: int
Zero if the event happened on the x axis, one for any y axis, and None for no associated axis
fromSignal: bool
True if this event was generated from a signal rather than a user event. Used to ensure we only propagate
the even once.
"""
if axis != ViewBox.YAxis and not fromSignal:
# This event happened within the view box area itself or the x axis so propagate to any stacked view boxes
self.sigMouseDragged.emit(self, ev, axis)
if ev.isFinish() and self.state['mouseMode'] == ViewBox.RectMode and axis is None:
self.sigMouseDraggedDone.emit() # Indicates the end of a mouse drag event
super(MultiAxisViewBox, self).METHOD_NAME(ev, axis) | [
2571,
2572,
417
]
|
def METHOD_NAME(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x) | [
76
]
|
f METHOD_NAME(self): | [
9,
129,
13769,
16809,
-1,
59,
671
]
|
def METHOD_NAME(self):
logger.info("trying to connect %s, %s", self.url.url, self.url.headers)
try:
self.ws = yield websocket_connect(self.url, ping_interval=constants.WEBSOCKET_PING_INTERVAL)
except Exception as e:
logger.exception("connection error, %s" % e)
self.msg_handler.close()
else:
self.post_connected()
self.run() | [
707
]
|
f METHOD_NAME(self): | [
9,
5366,
2277
]
|
def METHOD_NAME(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1) | [
9,
2726,
2550,
2969
]
|
def METHOD_NAME(self, date: list[str]) -> datetime:
"""Format the times provided by the api to datetime forms."""
try:
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%S%z")
except ValueError:
# there is a possibility of an event not having a time, just a day
# to catch this, we try again without time information
return datetime.strptime(date, "%Y-%m-%d").replace(tzinfo=UTC) | [
214,
104,
24,
884
]
|
def METHOD_NAME(self, context, layout):
layout.prop(self, "formula1", text="")
layout.prop(self, "formula2", text="")
layout.prop(self, "formula3", text="")
layout.label(text="Output:")
layout.prop(self, "output_mode", expand=True) | [
1100,
1409
]
|
def METHOD_NAME(self):
class X:
def x(): pass
e = self.assertRaises(TypeError,
lambda: self.local.call(X().x))
self.assertEqual(e.args[0], self.klass.method_msg) | [
9,
89,
103,
1950
]
|
def METHOD_NAME():
"""Test anomaly detection with 1D input array with missing values."""
X = np.array(
[
np.nan,
-5.72257076,
-4.91555882,
-8.3456977,
-5.57087531,
0.0,
6.50605589,
5.42526004,
5.45336814,
5.435548,
5.10996217,
]
)
y_expected = np.array([0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0])
scaler = MinMaxScaler()
X = scaler.fit_transform(X.reshape(-1, 1))
model = STRAY(k=3)
fitted_model = model.fit(X)
y_actual = fitted_model.y_
assert np.allclose(y_actual, y_expected) | [
9,
1170,
227,
863,
41,
5182
]
|
def METHOD_NAME(self, X):
result = self._wrapped_model.METHOD_NAME(X)
try:
if hasattr(self, "column_names") and len(self.column_names) == len(
self._wrapped_model.cols_to_keep_final_
):
self.column_names = [
self.column_names[i]
for i in self._wrapped_model.cols_to_keep_final_
]
if hasattr(self, "column_dtypes") and len(self.column_dtypes) == len(
self._wrapped_model.cols_to_keep_final_
):
self.column_dtypes = [
self.column_dtypes[i]
for i in self._wrapped_model.cols_to_keep_final_
]
except Exception: # nosec
pass
return result | [
1053
]
|
def METHOD_NAME():
bbx_rect = Bbox.from_slices(np.s_[1:10,1:10,1:10])
bbx_plane = Bbox.from_slices(np.s_[1:10,10:1,1:10])
assert bbx_rect == Bbox((1,1,1), (10,10,10))
assert bbx_plane == Bbox((1,10,1), (10, 10, 10))
try:
bbx_plane = Bbox.from_slices(np.s_[1:10,10:1:-1,1:10])
assert False
except ValueError:
pass
bbx_plane = Bbox.from_slices(np.s_[1:10,10:1:1,1:10])
assert bbx_plane == Bbox((1,10,1), (10, 10, 10)) | [
9,
2739,
9830
]
|
def METHOD_NAME(self):
"""Test function fdr.
Compare to the p.adjust R function.
"""
# FDR BH
# ------
reject, pval_corr = fdr(pvals)
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(pval_corr, [0.52, 0.175, 0.0005, 0.075, 0.175])
# With NaN values
_, pval_corr = fdr(pvals2_NA)
assert_array_almost_equal(pval_corr, [0.52, np.nan, 0.28, 0.40, 0.28])
# With 2D arrays
_, pval_corr = fdr(pvals_2d)
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(
pval_corr, [0.52, 0.21, 0.001, 0.135, 0.21, 0.52, np.nan, 0.21, 0.386, 0.21]
)
# FDR BY
# ------
reject, pval_corr = fdr(pvals, method="fdr_by")
assert_array_equal(reject, [False, False, True, False, False])
assert_array_almost_equal(
pval_corr, [1.0, 0.399583333, 0.001141667, 0.171250000, 0.399583333]
)
# With NaN values
_, pval_corr = fdr(pvals2_NA, method="fdr_by")
assert_array_almost_equal(pval_corr, [1.0, np.nan, 0.5833333, 0.8333333, 0.5833333])
# With 2D arrays
_, pval_corr = fdr(pvals_2d, method="fdr_by")
pval_corr = np.round(pval_corr.ravel(), 3)
assert_array_almost_equal(
pval_corr, [1.0, 0.594, 0.003, 0.382, 0.594, 1.0, np.nan, 0.594, 1.0, 0.594]
) | [
9,
5480
]
|
def METHOD_NAME(self):
"""The `DType` of values in this ragged tensor."""
return self.values.METHOD_NAME | [
1249
]
|
def METHOD_NAME(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist) | [
92,
245
]
|
def METHOD_NAME(self):
with pytest.raises(ValueError):
EnumField(choices=[]) | [
9,
35,
998
]
|
def METHOD_NAME(self, mimetype: str) -> Type[Response]:
"""Return the most appropriate Response class for the given mimetype"""
if mimetype is None:
return Response
if mimetype in self.classes:
return self.classes[mimetype]
basetype = f"{mimetype.split('/')[0]}/*"
return self.classes.get(basetype, Response) | [
280,
5030
]
|
def METHOD_NAME(mapping):
return Table(mapping.table_name, metadata, extend_existing=True, *[c.sql_column for c in mapping.columns]) | [
19,
410
]
|
def METHOD_NAME(self):
return os.METHOD_NAME(self.abspath(), SIG_VAR) | [
-1
]
|
def METHOD_NAME(self, my_task, force=False):
# Look at the tree to find all places where this task is used.
tasks = my_task.workflow.get_tasks_from_spec_name(self.name)
# Look up which tasks have parents completed.
completed_inputs = set([ task.parent.task_spec for task in tasks if task.parent.state == TaskState.COMPLETED ])
# Find waiting tasks
# Exclude tasks whose specs have already been completed
# A spec only has to complete once, even if on multiple paths
waiting_tasks = []
for task in tasks:
if task.parent._has_state(TaskState.DEFINITE_MASK) and task.parent.task_spec not in completed_inputs:
waiting_tasks.append(task.parent)
if force:
# If force is true, complete the task
complete = True
elif len(waiting_tasks) > 0:
# If we have waiting tasks, we're obviously not done
complete = False
else:
# Handle the case where there are paths from active tasks that must go through waiting inputs
waiting_inputs = [i for i in self.inputs if i not in completed_inputs]
sources = [t.task_spec for t in my_task.workflow.get_tasks(TaskState.READY | TaskState.WAITING)]
# This will go back through a task spec's ancestors and return the source, if applicable
def check(spec):
for parent in spec.inputs:
return parent if parent in sources else check(parent)
# If we can get to a completed input from this task, we don't have to wait for it
for spec in completed_inputs:
source = check(spec)
if source is not None:
sources.remove(source)
# Now check the rest of the waiting inputs and see if they can be reached from any of the remaining tasks
unfinished_paths = []
for spec in waiting_inputs:
if check(spec) is not None:
unfinished_paths.append(spec)
break
complete = len(unfinished_paths) == 0
return complete, waiting_tasks | [
250,
853,
1410
]
|
def METHOD_NAME(
apiclient, category_factory, phase_factory
):
phase = phase_factory(phase_content=phases.IssuePhase())
module = phase.module
category = category_factory(module=module)
initiator = module.project.organisation.initiators.first()
url = reverse("questions-list", kwargs={"module_pk": module.pk})
data = {"text": "I have a question", "category": category.pk}
assert apiclient.login(username=initiator.email, password="password")
with freeze_post_phase(phase):
response = apiclient.post(url, data, format="json")
assert response.status_code == 403 | [
9,
6924,
2286,
129,
13755,
4217,
72
]
|
def METHOD_NAME():
parser = make_parser()
args = options.parse_args_and_arch(parser)
dataset = load_dataset_task(args)
random_dataset = randomly_sample_subset(dataset)
short_dataset = get_short_data_subset(dataset)
long_dataset = get_long_data_subset(dataset)
if args.dataset_save_token:
args.dataset_save_token = f"_{args.dataset_save_token}_"
if args.dataset_save_dir:
save_dataset_npy(
random_dataset,
f"{args.dataset_save_dir}/random_dataset{args.dataset_save_token}w_ids.npy",
)
save_dataset_npy(
short_dataset,
f"{args.dataset_save_dir}/short_dataset{args.dataset_save_token}w_ids.npy",
)
save_dataset_npy(
long_dataset,
f"{args.dataset_save_dir}/long_dataset{args.dataset_save_token}w_ids.npy",
) | [
615,
57
]
|
def METHOD_NAME(
self, config_command: str = "config", pattern: str = "", re_flags: int = 0
) -> str:
return super().METHOD_NAME(
config_command=config_command, pattern=pattern, re_flags=re_flags
) | [
200,
854
]
|
def METHOD_NAME(self):
"""Separate arrays of X and Y coordinate values
Example:
>>> x, y = Point(0, 0).xy
>>> list(x)
[0.0]
>>> list(y)
[0.0]
"""
return self.coords.METHOD_NAME | [
695
]
|
def METHOD_NAME(self, owner=None):
"""
returns the first available module (starting from the end of the list)
:param owner: (string): name of the module that is reserving the
resource leave None if the gui shouldn't be disabled. If no
available module left, returns None.
To make sure the module will be freed afterwards, use the context
manager construct:
with pyrpl.mod_mag.pop('owner') as mod:
mod.do_something()
# module automatically freed at this point
"""
n = len(self.all_modules)
for index in range(n):
index = n - index - 1 # count backwards to reserve last module 1st
if not index in self._reserved_modules:
module = self.all_modules[index]
if module.owner is None:
module.owner = owner # this changes the module's visibility
return module
raise InsufficientResourceError('No more ' + self.name + ' left.') | [
760
]
|
def METHOD_NAME(core_manager):
core_manager.core_running = True
core_manager.shutting_down = True
core_manager.check_core_api_port()
assert not core_manager.process_manager.current_process.get_core_process.called | [
9,
250,
1542,
58,
237,
15905,
481
]
|
def METHOD_NAME(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
location = self.toy_course_key.make_usage_key('course', 'course')
course = self.store.get_item(location)
assert len(course.textbooks) > 0 | [
9,
6560,
-1,
5725
]
|
def METHOD_NAME(self):
"""Return the time since the start of current test to
the present.
Return `0.0` if the test is not running."""
if self._running:
return time() - self._time_current
return 0.0 | [
1024,
1888
]
|
def METHOD_NAME(self):
"""
Verify that `get_failed_enrollment_and_user_ids` method returns correct user ids.
* user id must have a paid enrollment
* user id must have a failed grade
"""
for course in self.course_overviews:
for enrollment_and_user_ids in self.command.get_failed_enrollment_and_user_ids(course):
for enrollment_id, user_id in enrollment_and_user_ids:
# user id must have a paid enrollment
assert CourseEnrollment.objects.filter(
id=enrollment_id,
course_id=course.id,
user_id=user_id,
mode__in=PAID_ENROLLMENT_MODES,
is_active=True
).exists()
# user id must have a failed grade
assert PersistentCourseGrade.objects.filter(
passed_timestamp__isnull=True,
course_id=course.id,
user_id=user_id,
).exists() | [
9,
19,
1122,
1423,
21,
308
]
|
def METHOD_NAME(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id") | [
147
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.