text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, instance):
current_event = self.memoized_events[instance.current_event_num]
instance.next_event_due += timedelta(minutes=current_event.minutes_to_wait) | [
0,
243,
417,
6202,
2722
] |
def METHOD_NAME(self, decorations):
"""
Add text decorations on a CodeEditor instance.
Don't add duplicated decorations, and order decorations according
draw_order and the size of the selection.
Args:
decorations (sourcecode.api.TextDecoration) (could be a list)
Returns:
int: Amount of decorations added.
"""
current_decorations = self._decorations["misc"]
added = 0
if isinstance(decorations, list):
not_repeated = set(decorations) - set(current_decorations)
current_decorations.extend(list(not_repeated))
self._decorations["misc"] = current_decorations
added = len(not_repeated)
elif decorations not in current_decorations:
self._decorations["misc"].append(decorations)
added = 1
if added > 0:
self.update()
return added | [
238
] |
async def METHOD_NAME(self, mp_request: MPRequest) -> Any:
with logging_context(req_id=mp_request.req_id):
if mp_request.type == MPRequestType.SendTransaction:
tx_request = cast(MPTxRequest, mp_request)
return await self._mempool.schedule_mp_tx_request(tx_request)
elif mp_request.type == MPRequestType.GetPendingTxNonce:
pending_nonce_req = cast(MPPendingTxNonceRequest, mp_request)
return self._mempool.get_pending_tx_nonce(pending_nonce_req.sender)
elif mp_request.type == MPRequestType.GetMempoolTxNonce:
mempool_nonce_req = cast(MPMempoolTxNonceRequest, mp_request)
return self._mempool.get_last_tx_nonce(mempool_nonce_req.sender)
elif mp_request.type == MPRequestType.GetTxByHash:
pending_tx_by_hash_req = cast(MPPendingTxByHashRequest, mp_request)
return self._mempool.get_pending_tx_by_hash(pending_tx_by_hash_req.tx_hash)
elif mp_request.type == MPRequestType.GetTxBySenderNonce:
req = cast(MPPendingTxBySenderNonceRequest, mp_request)
return self._mempool.get_pending_tx_by_sender_nonce(req.sender, req.tx_nonce)
elif mp_request.type == MPRequestType.GetGasPrice:
return self._mempool.get_gas_price()
elif mp_request.type == MPRequestType.GetElfParamDict:
return self._mempool.get_elf_param_dict()
elif mp_request.type == MPRequestType.TxPoolContent:
return self._mempool.get_content()
LOG.error(f"Failed to process mp_request, unknown type: {mp_request.type}") | [
356,
7820,
377
] |
def METHOD_NAME(batch_size, segment="train"):
"""
Get a DataLoader instance for dataset of sudoku. Every iteration of the dataloader returns
a DGLGraph instance, the ndata of the graph contains:
'q': question, e.g. the sudoku puzzle to be solved, the position is to be filled with number from 1-9
if the value in the position is 0
'a': answer, the ground truth of the sudoku puzzle
'row': row index for each position in the grid
'col': column index for each position in the grid
:param batch_size: Batch size for the dataloader
:param segment: The segment of the datasets, must in ['train', 'valid', 'test']
:return: A pytorch DataLoader instance
"""
data = _get_sudoku_dataset(segment)
q, a = zip(*data)
dataset = ListDataset(q, a)
if segment == "train":
data_sampler = RandomSampler(dataset)
else:
data_sampler = SequentialSampler(dataset)
basic_graph = _basic_sudoku_graph()
sudoku_indices = np.arange(0, 81)
rows = sudoku_indices // 9
cols = sudoku_indices % 9
def collate_fn(batch):
graph_list = []
for q, a in batch:
q = torch.tensor(q, dtype=torch.long)
a = torch.tensor(a, dtype=torch.long)
graph = copy(basic_graph)
graph.ndata["q"] = q # q means question
graph.ndata["a"] = a # a means answer
graph.ndata["row"] = torch.tensor(rows, dtype=torch.long)
graph.ndata["col"] = torch.tensor(cols, dtype=torch.long)
graph_list.append(graph)
batch_graph = dgl.batch(graph_list)
return batch_graph
dataloader = DataLoader(
dataset, batch_size, sampler=data_sampler, collate_fn=collate_fn
)
return dataloader | [
7713,
568
] |
def METHOD_NAME(self, func, route, methods):
"""To enable multiple dashboards to run in the same notebook we need to
prevent them from using the same method names (in addition to using
dedicated ports). We rename the function for that purpose and
manually add the URL rule instead of using the route decorator.
"""
func.__name__ = func.__name__ + str(id(self))
self._service.app.METHOD_NAME(
route,
endpoint=func.__name__,
view_func=func,
methods=methods) | [
238,
274,
446
] |
def METHOD_NAME(cause, rule, parent):
if rule.tag == "EntityRule":
collect_entities(cause, rule.attribute) | [
1444,
5399,
-1
] |
def METHOD_NAME(a: int, b: int):
return a ** b | [
667,
962,
962
] |
def METHOD_NAME(self, p):
return 0.0 | [
6619
] |
def METHOD_NAME(rest_client, auth_token):
""" SCOPE (REST): send a POST to create a new account and scope """
acntusr = account_name_generator()
data = {'type': 'USER', 'email': 'rucio.email.com'}
response = rest_client.post('/accounts/' + acntusr, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 201
scopeusr = scope_name_generator()
response = rest_client.post('/accounts/%s/scopes/%s' % (acntusr, scopeusr), headers=headers(auth(auth_token)))
assert response.status_code == 201 | [
9,
913,
1434
] |
def METHOD_NAME():
trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward
finish_episode(policy, optimizer, args.gamma) | [
86,
578
] |
def METHOD_NAME(self):
self.wallet = MiniWallet(self.nodes[0])
self.check_tx_relay()
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
# Make sure the default values in the command line documentation match the ones here
["relay", "noban", "mempool", "download"])
self.checkpermission(
# no permission (even with forcerelay)
["[email protected]", "-whitelistforcerelay=1"],
[])
self.checkpermission(
# relay permission removed (no specific permissions)
["-whitelist=127.0.0.1", "-whitelistrelay=0"],
["noban", "mempool", "download"])
self.checkpermission(
# forcerelay and relay permission added
# Legacy parameter interaction which set whitelistrelay to true
# if whitelistforcerelay is true
["-whitelist=127.0.0.1", "-whitelistforcerelay"],
["forcerelay", "relay", "noban", "mempool", "download"])
# Let's make sure permissions are merged correctly
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.nodes[1].replace_in_config([("bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)])
self.checkpermission(
["[email protected]"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay", "download"])
self.nodes[1].replace_in_config([("whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")])
self.checkpermission(
# legacy whitelistrelay should be ignored
["-whitelist=noban,[email protected]", "-whitelistrelay"],
["noban", "mempool", "download"])
self.checkpermission(
# legacy whitelistforcerelay should be ignored
["-whitelist=noban,[email protected]", "-whitelistforcerelay"],
["noban", "mempool", "download"])
self.checkpermission(
# missing mempool permission to be considered legacy whitelisted
["[email protected]"],
["noban", "download"])
self.checkpermission(
# all permission added
["[email protected]"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"])
self.stop_node(1)
self.nodes[1].assert_start_raises_init_error(["[email protected]"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["[email protected]:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["[email protected]/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["[email protected]", "-bind=127.0.0.1", "-listen=0"], "Cannot set -bind or -whitebind together with -listen=0", match=ErrorMatch.PARTIAL_REGEX) | [
22,
9
] |
def METHOD_NAME(self):
return self.create_organization() | [
1044
] |
def METHOD_NAME(self):
self.assertEqual(4.17, self.harvester_class.ratings()) | [
9,
4622
] |
def METHOD_NAME(self, stream):
if not yaml:
raise DataError('Using YAML variable files requires PyYAML module '
'to be installed. Typically you can install it '
'by running `pip install pyyaml`.')
if yaml.__version__.split('.')[0] == '3':
return yaml.load(stream)
return yaml.full_load(stream) | [
557,
406
] |
def METHOD_NAME(
n_folds: int,
train_size: float = 0.7,
val_size: float = 0.2,
test_size: float = 0.1,
seed=42,
):
assert isclose(
train_size + val_size + test_size, 1.0
), "sizes need to add up to 1"
random_state = np.random.RandomState(seed)
folds = []
for _ in range(n_folds):
train_split, test_split = train_test_split(
DATASETS_FILTERED,
train_size=train_size,
random_state=random_state.randint(low=0, high=10000),
)
rest = 1 - train_size
val_split, test_split = train_test_split(
test_split,
test_size=test_size / rest,
random_state=random_state.randint(low=0, high=10000),
)
folds.append((train_split, val_split, test_split))
assert not any(
[
set(train_split) & (set(val_split)),
set(train_split) & set(test_split),
set(val_split) & set(test_split),
]
), "Splits should not intersect!"
return folds | [
734,
4146
] |
def METHOD_NAME(self, X, y):
"""Fits the sampler to the data.
Args:
X (pd.DataFrame): Input features.
y (pd.Series): Target.
Returns:
self
Raises:
ValueError: If y is None.
"""
if y is None:
raise ValueError("y cannot be None")
X_ww = infer_feature_types(X)
y_ww = infer_feature_types(y)
self._initialize_sampler(X_ww, y_ww)
return self | [
90
] |
def METHOD_NAME(self, validated_data):
user = validated_data.get("user") or self.context["request"].user
organization = self.context["request"].auth.organization
self_or_admin = user.self_or_admin(user_to_check=self.context["request"].user, organization=organization)
if not self_or_admin:
raise Forbidden()
instance = UserNotificationPolicy.objects.METHOD_NAME(**validated_data)
return instance | [
129
] |
def METHOD_NAME(session, ctx):
# Look through Session state to see if we want to emit a DELETE for
# orphans
orphans_found = (
any(
isinstance(obj, parent_class) and
sa.orm.attributes.get_history(obj, attr.key).deleted
for obj in session.dirty
) or
any(
isinstance(obj, parent_class)
for obj in session.deleted
)
)
if orphans_found:
# Emit a DELETE for all orphans
(
session.query(target_class)
.filter(
~getattr(target_class, backref).any()
)
.delete(synchronize_session=False)
) | [
34,
9679,
4130
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-02-01-preview",
required=True,
),
}
return parameters | [
539,
386
] |
def METHOD_NAME(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs | [
724
] |
METHOD_NAME(self): | [
1243,
7671
] |
def METHOD_NAME(self):
return self._width | [
2327
] |
def METHOD_NAME(self, obj): ... | [
19,
1354,
252,
527
] |
def METHOD_NAME(self):
static_attachment = StaticQueueSelectorAttachment(
queue_selector = RouterQueueSelector(
key = "test_key", label_operator = LabelOperator.EQUAL, value = "test_value"
)
)
serialized_json = _serialize_to_json(static_attachment, "StaticQueueSelectorAttachment")
deserialized_json = _deserialize_from_json("QueueSelectorAttachment", serialized_json)
self.assertEqual(_serialize_to_json(static_attachment, "StaticQueueSelectorAttachment"), _serialize_to_json(deserialized_json, "StaticQueueSelectorAttachment")) | [
9,
628,
651,
5169,
70,
137,
5913
] |
def METHOD_NAME(self):
"""Test PositionInfo with custom converters"""
converters = [
('Coords', lambda x, y: (int(x), int(y))),
('Radius', lambda x, y: numpy.sqrt(x * x + y * y)),
('Angle', lambda x, y: numpy.degrees(numpy.arctan2(y, x)))
]
positionWidget = tools.PositionInfo(plot=self.plot,
converters=converters)
self._test(positionWidget, ('Coords', 'Radius', 'Angle')) | [
9,
343,
9799
] |
def METHOD_NAME(self, collection_day, weeks, start_date):
collection_day = time.strptime(collection_day, "%A").tm_wday
days = (collection_day - datetime.now().date().weekday() + 7) % 7
next_collect = datetime.now().date() + timedelta(days=days)
days = abs(next_collect-datetime.strptime(start_date, "%Y-%m-%d").date()).days
if ((days//7)%weeks):
next_collect = next_collect + timedelta(days=7)
next_dates = []
next_dates.append(next_collect)
for i in range (1, int(4/weeks)):
next_collect = next_collect + timedelta(days=(weeks*7))
next_dates.append(next_collect)
return next_dates | [
19,
5321
] |
def METHOD_NAME(k, arr, n):
permutation = ""
for i in range(0, n):
permutation = permutation + k[arr[i] - 1]
return permutation | [
2755
] |
def METHOD_NAME(self, state):
"""
Loop lifting analysis and transformation
"""
loop_flags = state.flags.copy()
outer_flags = state.flags.copy()
# Do not recursively loop lift
outer_flags.enable_looplift = False
loop_flags.enable_looplift = False
if not state.flags.enable_pyobject_looplift:
loop_flags.enable_pyobject = False
loop_flags.enable_ssa = False
main, loops = transforms.loop_lifting(state.func_ir,
typingctx=state.typingctx,
targetctx=state.targetctx,
locals=state.locals,
flags=loop_flags)
if loops:
# Some loops were extracted
if config.DEBUG_FRONTEND or config.DEBUG:
for loop in loops:
print("Lifting loop", loop.get_source_location())
from numba.core.compiler import compile_ir
cres = compile_ir(state.typingctx, state.targetctx, main,
state.args, state.return_type,
outer_flags, state.locals,
lifted=tuple(loops), lifted_from=None,
is_lifted_loop=True)
return cres | [
1382,
-1
] |
async def METHOD_NAME(
self, get_chia_simulator: Tuple[FullNodeSimulator, Path, Dict[str, Any], str, int, Keychain]
) -> None:
simulator, root_path, config, mnemonic, fingerprint, keychain = get_chia_simulator
ph_1: bytes32 = get_puzzle_hash_from_key(keychain=keychain, fingerprint=fingerprint, key_id=1)
ph_2: bytes32 = get_puzzle_hash_from_key(keychain=keychain, fingerprint=fingerprint, key_id=2)
dummy_hash: bytes32 = std_hash(b"test")
num_blocks = 2
# connect to rpc
rpc_port = config["full_node"]["rpc_port"]
simulator_rpc_client = await SimulatorFullNodeRpcClient.create(
config["self_hostname"], uint16(rpc_port), root_path, config
)
# test auto_farm logic
assert await simulator_rpc_client.get_auto_farming()
await time_out_assert(10, simulator_rpc_client.set_auto_farming, False, False)
await simulator.autofarm_transaction(dummy_hash) # this should do nothing
await asyncio.sleep(3) # wait for block to be processed
assert len(await simulator.get_all_full_blocks()) == 0
# now check if auto_farm is working
await time_out_assert(10, simulator_rpc_client.set_auto_farming, True, True)
for i in range(num_blocks):
await simulator.autofarm_transaction(dummy_hash)
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, 2)
# check if reward was sent to correct target
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_1)
# test both block RPC's
await simulator_rpc_client.farm_block(ph_2)
new_height = await simulator_rpc_client.farm_block(ph_2, guarantee_tx_block=True)
# check if farming reward was received correctly & if block was created
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, new_height)
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_2)
# test balance rpc
ph_amount = await simulator_rpc_client.get_all_puzzle_hashes()
assert ph_amount[ph_2][0] == 2000000000000
assert ph_amount[ph_2][1] == 2
# test all coins rpc.
coin_records = await simulator_rpc_client.get_all_coins()
ph_2_total = 0
ph_1_total = 0
for cr in coin_records:
if cr.coin.puzzle_hash == ph_2:
ph_2_total += cr.coin.amount
elif cr.coin.puzzle_hash == ph_1:
ph_1_total += cr.coin.amount
assert ph_2_total == 2000000000000 and ph_1_total == 4000000000000
# block rpc tests.
# test reorg
old_blocks = await simulator_rpc_client.get_all_blocks()
assert len(old_blocks) == 5
# Sometimes in CI reorg_blocks takes a long time and the RPC times out
# We can ignore this timeout as long as the subsequent tests pass
try:
await simulator_rpc_client.reorg_blocks(2) # fork point 2 blocks, now height is 5
except asyncio.TimeoutError:
pass # ignore this error and hope the reorg is going ahead
# wait up to 5 mins
await time_out_assert(300, simulator.full_node.blockchain.get_peak_height, 5)
# now validate that the blocks don't match
assert (await simulator.get_all_full_blocks())[0:4] != old_blocks
# test block deletion
await simulator_rpc_client.revert_blocks(3) # height 5 to 2
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, 2)
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_1)
# close up
simulator_rpc_client.close()
await simulator_rpc_client.await_closed() | [
9,
447,
3656
] |
def METHOD_NAME(self, **kwargs):
# 主进程执行
self.threadLock.acquire()
self.parent.send(kwargs)
while True:
res = self.parent.recv()
if res != '[Finish]':
yield res
else:
break
self.threadLock.release() | [
919,
3337
] |
def METHOD_NAME(messages):
page_header = """\
Paparazzi Messages {#paparazzi_messages}
==================
These are the common messages.
Also see http://wiki.paparazziuav.org/wiki/Telemetry and http://wiki.paparazziuav.org/wiki/Messages_Format
[TOC]
"""
s = textwrap.dedent(page_header)
for msg_class in messages.findall("./msg_class"):
s += print_msg_class(msg_class)
s += "\n"
return s | [
1107,
1174
] |
def METHOD_NAME(self):
self.request = self.request_factory.get('/courses/edX/101/')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({}) | [
9,
532,
1122,
147
] |
async def METHOD_NAME(rest_api, tmp_path):
"""
Test whether the API returns the last 100 logs when no max_lines parameter is not provided
"""
module = 'gui'
default_num_logs_returned = 100
num_logs_to_write = 200
# Log directory
log_dir = tmp_path / 'logs'
create_dummy_logs(log_dir, process=module, log_message=log_dir, num_logs=num_logs_to_write)
json_response = await do_request(rest_api, f'debug/log?process={module}&max_lines=', expected_code=200)
logs = json_response['content'].strip().split("\n")
assert len(logs) == default_num_logs_returned | [
9,
290,
3730,
235,
181,
1099
] |
def METHOD_NAME(self, number_splits: int) -> Iterator[Dict]: # pragma: no cover
"""
Prechunk method to perform chunking by the key field
"""
q = dict(self.query)
keys = self.dielectric.newer_in(self.materials, criteria=q, exhaustive=True)
N = ceil(len(keys) / number_splits)
for split in grouper(keys, N):
yield {"query": {self.materials.key: {"$in": list(split)}}} | [
15658
] |
def METHOD_NAME(self, instance):
if self.super_ratio:
return instance.ordinal % self.super_ratio
return instance.ordinal | [
19,
11712
] |
def METHOD_NAME(self, orm):
# Adding model 'Flowbit'
db.create_table('rules_flowbit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('set', self.gf('django.db.models.fields.BooleanField')(default=False)),
('isset', self.gf('django.db.models.fields.BooleanField')(default=False)),
('enable', self.gf('django.db.models.fields.BooleanField')(default=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rules.Source'])),
))
db.send_create_signal('rules', ['Flowbit'])
# Adding M2M table for field flowbits on 'Rule'
m2m_table_name = db.shorten_name('rules_rule_flowbits')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('rule', models.ForeignKey(orm['rules.rule'], null=False)),
('flowbit', models.ForeignKey(orm['rules.flowbit'], null=False))
))
db.create_unique(m2m_table_name, ['rule_id', 'flowbit_id']) | [
2368
] |
def METHOD_NAME(self):
"""Returns a copy of eigenvectors array."""
return self._model.METHOD_NAME()[:, self._indices].copy() | [
19,
877
] |
def METHOD_NAME(self):
root = self.root
for _ in range(10):
root.add_widget(self.cls())
self.assertEqual(len(root.children), 10)
root.clear_widgets(root.children)
self.assertEqual(root.children, []) | [
9,
537,
1551,
2189
] |
def METHOD_NAME(
self,
limit: int | None = DEFAULT_LIMIT_READ,
include_global: bool = False,
) -> SpaceList:
"""`List spaces <https://developer.cognite.com/api#tag/Spaces/operation/listSpacesV3>`_
Args:
limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
include_global (bool): Whether to include global spaces. Defaults to False.
Returns:
SpaceList: List of requested spaces
Examples:
List spaces and filter on max start time::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> space_list = c.data_modeling.spaces.list(limit=5)
Iterate over spaces::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> for space in c.data_modeling.spaces:
... space # do something with the space
Iterate over chunks of spaces to reduce memory load::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> for space_list in c.data_modeling.spaces(chunk_size=2500):
... space_list # do something with the spaces
"""
return self._list(
list_cls=SpaceList,
resource_cls=Space,
method="GET",
limit=limit,
other_params={"includeGlobal": include_global},
) | [
245
] |
def METHOD_NAME(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME():
a = 0
while not a == 1000:
c = random.uniform(1.0, 100000.0)
a += 1
init = {'validator': "float",
'data': c}
plugin = run_plugin(StringValidatorAction, init, {})
result = plugin.output
assert result.port == 'valid'
assert result.value == {} | [
9,
1819
] |
def METHOD_NAME(self, x):
return insert_image(x, backdoor_path=self.backdoor_path, random=True, size=(100, 100)) | [
1326,
717,
1327
] |
def METHOD_NAME(con, queue):
with con:
cur = con.cursor()
cmd = "CREATE TABLE {}(id INTEGER PRIMARY KEY, name TEXT UNIQUE)".format(queue)
log.debug("SQL Query: %s", cmd)
cur.execute(cmd)
return True | [
129,
410
] |
f METHOD_NAME(self, statement): | [
750,
925
] |
def METHOD_NAME(b, t, p, j):
return (
b.mixed_permeate[t].get_material_flow_terms(p, j)
== b.area * b.flux_mass_phase_comp_avg[t, p, j]
) | [
2338,
4285,
1900
] |
def METHOD_NAME(cls):
timestamp = datetime.now().isoformat()
cls._output_root = mkdtemp(
prefix="gunpowder_{}_{}_".format(cls.__name__, timestamp)
) | [
0,
1,
2
] |
def METHOD_NAME(self):
return (
self.subsequent_operation_id.partner_id
or self.source_document_id.partner_id
) | [
11881,
7618
] |
def METHOD_NAME(self):
"""Simple round-trip through app with infile and options, result passed to stdout."""
cmdline = MafftCommandline(mafft_exe)
cmdline.set_parameter("input", self.infile1)
cmdline.set_parameter("maxiterate", 100)
cmdline.set_parameter("--localpair", True)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
self.assertTrue(stdoutdata.startswith(">gi|1348912|gb|G26680|G26680"))
self.assertNotIn("$#=0", stderrdata) | [
9,
10004,
41,
1881
] |
def METHOD_NAME():
return WorkLoadStatistic(component='backend') | [
2454,
2652
] |
def METHOD_NAME(self):
pmol = mol.copy()
pmol.cart = True
nao = pmol.nao_nr()
numpy.random.seed(1)
mo = numpy.random.random((nao,4))
eri = ao2mo.kernel(pmol, mo)
self.assertAlmostEqual(lib.fp(eri), -977.99841341828437, 9)
eri = ao2mo.kernel(mol, mo, intor='int2e_cart')
self.assertAlmostEqual(lib.fp(eri), -977.99841341828437, 9) | [
9,
10316,
41,
4077,
9697
] |
def METHOD_NAME(parent, text_name=None, text_text=None):
text = QtWidgets.QLabel(parent)
if text_name is not None:
text.setObjectName(text_name)
if text_text is not None:
text.setText(text_text)
return text | [
129,
526
] |
def METHOD_NAME(self, date_str, relative_base=None):
"Parse date string passed in date filter of query to datetime object"
# clean date string to handle future date parsing by date parser
future_strings = ["later", "from now", "from today"]
prefer_dates_from = {True: "future", False: "past"}[any([True for fstr in future_strings if fstr in date_str])]
clean_date_str = re.sub("|".join(future_strings), "", date_str)
# parse date passed in query date filter
parsed_date = dtparse.METHOD_NAME(
clean_date_str,
settings={
"RELATIVE_BASE": relative_base or datetime.now(),
"PREFER_DAY_OF_MONTH": "first",
"PREFER_DATES_FROM": prefer_dates_from,
},
)
if parsed_date is None:
return None
return self.date_to_daterange(parsed_date, date_str) | [
214
] |
def METHOD_NAME(key, IV, implList=None):
"""Create a new 3DES object.
:type key: str
:param key: A 24 byte string.
:type IV: str
:param IV: An 8 byte string
:rtype: tlslite.utils.TripleDES
:returns: A 3DES object.
"""
if implList is None:
implList = ["openssl", "pycrypto", "python"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_tripledes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_tripledes.new(key, 2, IV)
elif impl == "python":
return python_tripledes.new(key, IV)
raise NotImplementedError() | [
129,
7064,
12838
] |
def METHOD_NAME(self, action):
CheckAuthenticator(self.request)
if not self.available():
self.status = _(
"text_not_allowed_manage_server",
default="You are not allowed to manage the Zope server.",
)
return
try:
user = '"%s"' % getSecurityManager().getUser().getUserName()
except Exception:
user = "unknown user"
logger.info("Shutdown requested by %s" % user)
if LIFETIME:
shutdown(0)
else:
raise
# TODO: returning html has no effect in button handlers
self.request.response.setHeader("X-Theme-Disabled", "True")
return """<html><head></head><body>{}</body></html>""".format(
_("plone_shutdown", default="Zope is shutting down.")
) | [
276,
158,
1006
] |
def METHOD_NAME(self) -> None:
"""This function checks for duplicate value generators."""
all_python_files = self.get_all_python_files()
all_value_generators = []
for file_name in all_python_files:
python_module = importlib.import_module(file_name)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
all_base_classes = [base_class.__name__ for base_class in
(inspect.getmro(clazz))]
# Check that it is a subclass of 'BaseValueGenerator'.
if 'BaseValueGenerator' in all_base_classes:
all_value_generators.append(name)
expected_value_generators = ['BaseValueGenerator', 'Copier',
'RandomSelector']
self.assertEqual(
sorted(all_value_generators), sorted(expected_value_generators)) | [
9,
99,
1443,
83
] |
def METHOD_NAME(stream_slice: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Produces the date range parameters from input stream_slice
"""
date_range = stream_slice["dateRange"]
return {
"dateRange": f"(start:(year:{date_range['start.year']},month:{date_range['start.month']},day:{date_range['start.day']}),"
f"end:(year:{date_range['end.year']},month:{date_range['end.month']},day:{date_range['end.day']}))",
# Chunk of fields
"fields": stream_slice["fields"],
} | [
86,
3762,
434
] |
def METHOD_NAME(self):
with pytest.raises(ValueError):
tio.RescaleIntensity(out_min_max='wrong') | [
9,
909,
1737,
1835,
232,
44
] |
def METHOD_NAME(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge | [
19,
7197
] |
def METHOD_NAME(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self, name: str, value: Union[int, float], tags: Optional[Tags] = None) -> None:
self.__backend.METHOD_NAME(key=self.__merge_name(name), value=value, tags=self.__merge_tags(tags)) | [
2936
] |
def METHOD_NAME(filename, port1, port2, protocol_version):
with open(filename, 'w') as f:
f.write("port %d\n" % (port2))
f.write("allow_anonymous true\n")
f.write("\n")
f.write("connection bridge_sample\n")
f.write("address 127.0.0.1:%d\n" % (port1))
f.write("topic bridge/# both 2\n")
f.write("notifications false\n")
f.write("restart_timeout 5\n")
f.write("bridge_protocol_version %s\n" % (protocol_version)) | [
77,
200
] |
def METHOD_NAME(self):
for k in self._torrent_decoded:
key = k.replace(" ", "_").lower()
setattr(self, key, self._torrent_decoded[k])
self._calc_info_hash() | [
214,
3564
] |
def METHOD_NAME(src):
"""
Checks whether the given directory is a git working copy.
:param src: A directory. May or may not exist.
:return: True iff the given directory is a git working copy.
"""
return os.path.exists(src) and os.path.exists(os.path.join(src, ".git")) | [
137,
6907,
215
] |
def METHOD_NAME(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest() | [
4179,
280,
171
] |
def METHOD_NAME(self, request):
"""Init form request."""
self.request = request
self.offset = self.get_offset(request)
self.limit = self.get_limit(request) | [
176,
280,
377
] |
def METHOD_NAME(dictionary, source_directory, target_directory):
check_dictionary_type(dictionary, "subdirectories_to_copy", list)
subdirectories_to_copy = dictionary["subdirectories_to_copy"]
# loop over the subdirectories
for subdirectory in subdirectories_to_copy:
parent = subdirectory["source_directory_parent"] if "source_directory_parent" in subdirectory else None
source_subdir = find_subdirectory(source_directory, subdirectory["source_directory"], parent)
target_subdir = find_subdirectory(target_directory, subdirectory["target_directory"])
copy_files(source_subdir, target_subdir, subdirectory["files"], True)
print(BIG_SECTION) | [
214,
897
] |
def METHOD_NAME(self):
labels = self.labelsChannel.get_value()
labels = labels.split()
retlist = []
for label in labels:
# label, pos = label.split(":")
# retlist.append(int(pos))
pos = str(label.replace(":", " "))
retlist.append(pos)
logging.getLogger("HWR").debug("Zoom positions list: %s" % repr(retlist))
new_retlist = []
for n, e in enumerate(retlist):
name = e.split()
new_retlist.append("%s %s" % (n + 1, name[0]))
logging.getLogger("HWR").debug("Zoom positions list: %s" % repr(new_retlist))
# retlist = ["z1 1","z2 2"]
# logging.getLogger("HWR").debug("Zoom positions list: %s" % repr(retlist))
return new_retlist | [
19,
4446,
2758,
245
] |
def METHOD_NAME(line):
ser = REG_COMMIT_UUID.search(line)
if not ser:
return
LAST_COMMIT_UUID = ser.group()
if REG_COMMIT_WAKE.search(line):
ADDAX_JOBDICT_COMMIT[LAST_COMMIT_UUID] = {
'stat' : 'R',
'wake' : parse_timestamp(line),
'done' : UNIXTIME,
}
elif ((LAST_COMMIT_UUID in ADDAX_JOBDICT_COMMIT) and REG_COMMIT_DONE.search(line)):
ADDAX_JOBDICT_COMMIT[LAST_COMMIT_UUID]['stat'] = 'D'
ADDAX_JOBDICT_COMMIT[LAST_COMMIT_UUID]['done'] = parse_timestamp(line) | [
214,
77,
758
] |
def METHOD_NAME(self) -> str:
"""Parse and return where client is redirected after payment has been registered
Can be overriden in subclass if the provider does not support
the added extra query parameters in the return URL redirect.
"""
return '' if not self.request else self.request.GET.get(self.ui_return_url_param_name, '') | [
297,
882,
1413,
274
] |
def METHOD_NAME(self, reference_date: datetime.date) -> Set[pendulum.Date]:
"""
Given a reference date to calculate the offsets relative to, return
this date stencil as a set of dates.
Parameters
----------
reference_date : date
Date to calculate offsets relative to.
Returns
-------
set of pendulum.Date
Set of dates represented by the stencil
"""
date_pairs = self.as_date_pairs(reference_date=reference_date)
# Have to subtract a day from pair[1] here because pendulum.Period.range returns a range inclusive of both limits
dates = set().union(
*[pendulum.period(pair[0], pair[1].subtract(days=1)) for pair in date_pairs]
)
return dates | [
947,
0,
47,
306
] |
METHOD_NAME(self,hp_id): | [
19,
6972,
43,
1754,
147
] |
def METHOD_NAME(self, seq_end):
i = 0
while i < len(self.cached_seqs):
if self.cached_seqs[i].seq_idx >= seq_end:
break
i += 1
del self.cached_seqs[:i] | [
950,
2228,
5355,
596
] |
async def METHOD_NAME(
subject: helpers.ModuleListener, mock_callback: AsyncMock
) -> None:
"""It should call the call back with the correct modules to load."""
message = models.Message(status="dump", connections=[])
await subject.handle_message(message=message)
mock_callback.assert_called_once_with([], []) | [
9,
276,
277,
278,
35
] |
def METHOD_NAME(
gift_card: GiftCard,
old_gift_card: GiftCard,
user: Optional[User],
app: Optional[App],
):
balance_data = {
"currency": gift_card.currency,
"initial_balance": gift_card.initial_balance_amount,
"current_balance": gift_card.current_balance_amount,
"old_currency": gift_card.currency,
"old_initial_balance": old_gift_card.initial_balance_amount,
"old_current_balance": old_gift_card.current_balance_amount,
}
return GiftCardEvent.objects.create(
gift_card=gift_card,
user=user,
app=app,
type=GiftCardEvents.BALANCE_RESET,
parameters={"balance": balance_data},
) | [
4755,
5427,
3101,
656,
417
] |
def METHOD_NAME(fn, dry_run = False):
all_results = read_eval_data(fn)
ls = ['-', '--', ':']
for indexes in sorted(all_results):
output_fn = '%s_%s.eps' % (fn.split('.')[0], indexes)
print('Saving EPS: '+output_fn)
if dry_run:
continue
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
for (model, linestyle) in zip(sorted(all_results[indexes]), ls):
all_results[indexes][model].sort(key = itemgetter(0))
x = [float(ele[0]) for ele in all_results[indexes][model]]
y = [float(ele[1]) for ele in all_results[indexes][model]]
ax.METHOD_NAME(x, y, linestyle=linestyle, marker='o', ms=5, label=model)
ax.grid(True)
ax.set_title(indexes)
ax.set_xlabel(r'$\beta$')
ax.set_ylabel('ERR20')
ax.legend()
plt.savefig(output_fn, bbox_inches='tight', format='eps') | [
1288
] |
def METHOD_NAME(config_base, config_extra):
return "\n".join([config_base, config_extra]) | [
200,
3
] |
def METHOD_NAME(self, train_data, device, args):
model = self.model
model.to(device)
model.METHOD_NAME()
test_data = None
try:
test_data = self.test_data
except:
pass
criterion = torch.nn.BCEWithLogitsLoss(reduction="none")
if args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
max_test_score = 0
best_model_params = {}
for epoch in range(args.epochs):
for mol_idxs, (forest, feature_matrix, label, mask) in enumerate(
train_data
):
# Pass on molecules that have no labels
if torch.all(mask == 0).item():
continue
optimizer.zero_grad()
forest = [level.to(device=device, dtype=torch.long, non_blocking=True) for level in forest]
feature_matrix = feature_matrix.to(
device=device, dtype=torch.float32, non_blocking=True
)
label = label.to(device=device, dtype=torch.float32, non_blocking=True)
mask = mask.to(device=device, dtype=torch.float32, non_blocking=True)
logits = model(forest, feature_matrix)
loss = criterion(logits, label) * mask
loss = loss.sum() / mask.sum()
loss.backward()
optimizer.step()
if ((mol_idxs + 1) % args.frequency_of_the_test == 0) or (
mol_idxs == len(train_data) - 1
):
if test_data is not None:
test_score, _ = self.test(self.test_data, device, args)
print(
"Epoch = {}, Iter = {}/{}: Test Score = {}".format(
epoch, mol_idxs + 1, len(train_data), test_score
)
)
if test_score > max_test_score:
max_test_score = test_score
best_model_params = {
k: v.cpu() for k, v in model.state_dict().items()
}
print("Current best = {}".format(max_test_score))
return max_test_score, best_model_params | [
849
] |
def METHOD_NAME(self):
pass | [
656
] |
def METHOD_NAME(args):
cmd_str = ' '.join(quote(x) for x in args)
log("running: %s" % cmd_str)
# https://github.com/AcademySoftwareFoundation/rez/pull/659
use_shell = ("Windows" in platform.system())
p = Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=use_shell,
text=True
)
stdout, stderr = p.communicate()
return stdout, stderr, p.returncode | [
22,
462
] |
def METHOD_NAME(f, hook, line, arg):
# type: (FunctionType, HookType, int, Any) -> FunctionType
"""Eject a hook from a function.
The hook is identified by its line number and the argument passed to the
hook.
"""
abstract_code = Bytecode.from_code(f.__code__)
_eject_hook(abstract_code, hook, line, arg)
return _function_with_new_code(f, abstract_code) | [
4601,
1021
] |
def METHOD_NAME(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"Register the state Hydro expects to use and evolve."
return "void" | [
372,
551
] |
def METHOD_NAME(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.METHOD_NAME(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output | [
76
] |
def METHOD_NAME(self) -> str:
invitations = self.backend.METHOD_NAME()
return json.dumps(
{"Invitations": [invitation.to_dict() for invitation in invitations]}
) | [
245,
2621
] |
def METHOD_NAME():
test_data = load_tests("monitors", "policies.conf")
run_tests("monitors_policies", *test_data) | [
9,
10,
4152
] |
def METHOD_NAME(patch_base_class, requests_mock):
stream = NotionStream(config=MagicMock())
requests_mock.get("https://dummy", json={"results": [{"a": 123}, {"b": "xx"}]})
resp = requests.get("https://dummy")
inputs = {"response": resp, "stream_state": MagicMock()}
expected_parsed_object = [{"a": 123}, {"b": "xx"}]
assert list(stream.parse_response(**inputs)) == expected_parsed_object | [
9,
214,
17
] |
def METHOD_NAME(
resource_group_name: str, monitor_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}/vmIngestionDetails",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"monitorName": _SERIALIZER.url("monitor_name", monitor_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) | [
56,
2051,
377
] |
def METHOD_NAME(self) -> str:
return pulumi.get(self, "user_storage_key") | [
21,
948,
59
] |
def METHOD_NAME(self):
return encode_hashid(self.pk) | [
17789
] |
def METHOD_NAME(self):
self.zone = self.route53.create_hosted_zone(self.base_domain,
private_zone=True,
vpc_id=self.test_vpc.id,
vpc_region='us-east-1') | [
9,
129,
547,
2456
] |
def METHOD_NAME(hparams, hparams_path):
"""Override hparams values with existing standard hparams config."""
if hparams_path and tf.gfile.Exists(hparams_path):
print_out("# Loading standard hparams from %s" % hparams_path)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_path, "rb")) as f:
hparams.parse_json(f.read())
return hparams | [
2946,
214,
2356,
8866
] |
def METHOD_NAME(course_block):
""" Returns all sections that have highlights in a course """
return list(filter(_section_has_highlights, course_block.get_children())) | [
19,
1446,
41,
11531
] |
def METHOD_NAME(args):
args.embed_dim = getattr(args, "embed_dim", 1600)
args.num_attention_heads = getattr(args, "num_attention_heads", 25)
args.num_layers = getattr(args, "num_layers", 48)
default_architecture(args) | [
5191,
7683,
15158
] |
def METHOD_NAME():
# The last assignment to SPAMDOPTIONS takes effect
content = 'SPAMDOPTIONS="--ssl-version tlsv1"\nSPAMDOPTIONS="--ssl-version sslv3"\n'
value = spamassassinconfigread_spamd._parse_ssl_version(content)
assert value == 'sslv3' | [
9,
214,
1247,
281,
679,
776,
8411
] |
def METHOD_NAME(self, obj):
try:
acl = obj.acl
except AttributeError:
return False
return acl.get("can_approve") and obj.has_unapproved_posts | [
19,
220,
17864,
10177
] |
def METHOD_NAME(self, parameters: Optional[Dict[str, Any]] = None) -> Optional[Union[bool, np.bool_]]:
"""Materializes a BoolArtifact into a boolean.
Returns:
A boolean representing whether the check passed or not.
Raises:
InvalidRequestError:
An error occurred because of an issue with the user's code or inputs.
InternalServerError:
An unexpected error occurred in the server.
"""
if self._is_content_deleted():
return None
self._dag.must_get_artifact(self._artifact_id)
if self._from_flow_run:
if self._get_content() is None:
raise ArtifactNeverComputedException(
"This artifact was part of an existing flow run but was never computed successfully!",
)
elif parameters is not None:
raise NotImplementedError(
"Parameterizing historical artifacts is not currently supported."
)
content = self._get_content()
if parameters is not None or content is None:
previewed_artifact = artifact_utils.preview_artifact(
self._dag, self._artifact_id, parameters
)
content = previewed_artifact._get_content()
# If the artifact was previously generated lazily, materialize the contents.
if parameters is None and self._get_content() is None:
self._set_content(content)
assert isinstance(content, bool) or isinstance(content, np.bool_)
return content | [
19
] |
def METHOD_NAME(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj | [
772,
99
] |
def METHOD_NAME(self, mock_finalize, mock_verify, mock_sign, mock_compose):
mock_compose.return_value = {"credential": "composed"}
mock_sign.return_value = json.dumps({"credential": "composed-and-signed"})
result = CredentialIssuer(issuance_uuid=self.issuance_line.uuid).issue()
mock_compose.assert_called_once()
mock_sign.assert_called_once_with({"credential": "composed"})
mock_verify.assert_called_once_with(json.dumps({"credential": "composed-and-signed"}))
mock_finalize.assert_called_once()
self.assertEqual(result, json.loads(json.dumps({"credential": "composed-and-signed"}))) | [
9,
946,
771
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-09-01-preview",
required=True,
),
}
return parameters | [
539,
386
] |
def METHOD_NAME(self):
roiA = [(10, 10, 10), (20, 20, 20)]
roiB = [(15, 26, 27), (16, 30, 30)]
intersection = getIntersection(roiA, roiB, assertIntersect=False)
assert intersection is None, "Expected None because {} doesn't intersect with {}".format() | [
9,
654,
638,
256,
3801
] |
def METHOD_NAME(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).recovery_points | [
1300,
182,
7943,
2325
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.