text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, modid: Optional[str]) -> Optional[Dict[str, Any]]:
if modid:
mod_data = self.manager.arkman.getModData(modid)
assert mod_data
title = mod_data['title'] or mod_data['name']
return dict(mod=dict(id=modid, tag=mod_data['name'], title=title))
return None | [
19,
709,
365
] |
def METHOD_NAME(self, node_name, bitmap_name):
bitmap = block_dirty_bitmap.get_bitmap_by_name(self.main_vm,
node_name,
bitmap_name)
# check if bitmap exists
if bitmap is None:
self.test.fail('Failed to get bitmap')
# check if bitmap is persistent
if not bitmap['persistent']:
self.test.fail('Bitmap should be persistent') | [
250,
7039
] |
def METHOD_NAME(
cls,
additional_dir: Workspace,
workspace: Workspace,
task: Task,
):
all_states = dict(
Config={}, # placeholder
SpecHead=task.predictor.state_dict(),
Transformer=task.upstream.state_dict(),
Upstream_Config=dict(
transformer=_transformer_config,
audio=_audio_config,
task=dict(sequence_length=0),
),
)
torch.save(
all_states, str(additional_dir.parent.resolve()) + "/all_states.ckpt"
) | [
73,
2900
] |
def METHOD_NAME(self):
self._workbook = Workbook()
self._sheets = {}
self._columns = defaultdict(list)
self._current_index = defaultdict(lambda: 1)
self._generated_sheet_name_dict = {} | [
656,
6151
] |
def METHOD_NAME(self, u, q=None):
"""
TODO:
引入 power 参数
"""
return self.integral(u, celltype=True, q=q) | [
118,
2489
] |
async def METHOD_NAME(c, s, a, b):
assert set(s.workers) == {a.worker_address, b.worker_address}
await c.retire_workers(workers=[a.worker_address], close_workers=True)
assert set(s.workers) == {b.worker_address}
start = time()
while a.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5 | [
9,
4435,
5930
] |
def METHOD_NAME(metadata_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_machine_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHybridIdentityMetadatumResult:
"""
Implements HybridIdentityMetadata GET method.
Azure REST API version: 2022-07-15-preview.
:param str metadata_name: Name of the HybridIdentityMetadata.
:param str resource_group_name: The Resource Group Name.
:param str virtual_machine_name: Name of the vm.
"""
__args__ = dict()
__args__['metadataName'] = metadata_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualMachineName'] = virtual_machine_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:connectedvmwarevsphere:getHybridIdentityMetadatum', __args__, opts=opts, typ=GetHybridIdentityMetadatumResult).value
return AwaitableGetHybridIdentityMetadatumResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_key=pulumi.get(__ret__, 'public_key'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'),
vm_id=pulumi.get(__ret__, 'vm_id')) | [
19,
2895,
2989,
5264
] |
def METHOD_NAME(self, action: pe_actions.Action) -> None:
"""React to a ProtocolEngine action."""
# TODO(jbl 2022-07-06) handle_action stub should be completely removed
pass | [
276,
1006
] |
async def METHOD_NAME(
session_factory: DbSessionFactory,
aggregate_fixtures: Tuple[AggregateDb, Sequence[AggregateElementDb]],
):
aggregate, elements = aggregate_fixtures
_test_refresh_aggregate(
session_factory=session_factory,
aggregate=None,
expected_aggregate=aggregate,
elements=elements,
) | [
9,
1920,
3428,
408
] |
def METHOD_NAME(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
["-txnotokens=0", "-amkheight=1", "-txindex=1"],
["-txnotokens=0", "-amkheight=1", "-txindex=1"],
] | [
0,
9,
434
] |
f METHOD_NAME(self, t: float, current_power: float) -> None: | [
407,
1911
] |
def METHOD_NAME(self):
self.set_focus_order(list(range(1, 7))) | [
852,
264,
604,
14119
] |
def METHOD_NAME(self, *args, **kwargs):
"""
encapsulate all call to kraken in a circuit breaker, this way we don't lose time calling dead instance
"""
try:
return self.breaker.call(self._send_and_receive, *args, **kwargs)
except pybreaker.CircuitBreakerError:
raise DeadSocketException(self.name, self._zmq_socket) | [
353,
61,
375
] |
def METHOD_NAME(self):
# This test comes from https://forums.aws.amazon.com/thread.jspa?messageID=374936
body = {
'__type': 'com.amazon.coral.validate#ValidationException',
'message': 'The attempted filter operation is not supported '
'for the provided filter argument count'}
jre = JSONResponseError('400', 'Bad Request', body=body)
self.assertEqual(jre.status, '400')
self.assertEqual(jre.reason, 'Bad Request')
self.assertEqual(jre.error_message, body['message'])
self.assertEqual(jre.error_message, jre.message)
self.assertEqual(jre.code, 'ValidationException')
self.assertEqual(jre.code, jre.error_code) | [
9,
277,
763,
17,
168
] |
def METHOD_NAME(self):
conn_params = self.conn_dict.copy()
conn_params["allow_multapses"] = False
N = 10
# test that multapses must be permitted
nest.ResetKernel()
pop = nest.Create("iaf_psc_alpha", N)
with self.assertRaises(nest.kernel.NESTError):
nest.Connect(pop, pop, conn_params)
# test that multapses can only arise from symmetric
# connectivity
conn_params["p"] = 1.0 - 1.0 / N
conn_params["allow_multapses"] = True
nest.ResetKernel()
pop = nest.Create("iaf_psc_alpha", N)
nest.Connect(pop, pop, conn_params)
conn_dict = collections.defaultdict(int)
conn = nest.GetConnections()
for s_t_key in zip(conn.sources(), conn.targets()):
conn_dict[s_t_key] += 1
self.assertTrue(conn_dict[s_t_key] <= 2) | [
9,
3113
] |
def METHOD_NAME(self) -> 'outputs.FluidRelayEndpointsResponse':
"""
The Fluid Relay Service endpoints for this server.
"""
return pulumi.get(self, "fluid_relay_endpoints") | [
10082,
4208,
1197
] |
def METHOD_NAME(self, transactions=None, tag=None, date=None):
form = get_simple_wrapped_form(uuid.uuid4().hex, metadata=TestFormMetadata(domain=self.domain))
report = StockReportHelper.make_from_form(
form,
date or datetime.utcnow(),
tag or REPORT_TYPE_BALANCE,
transactions or [],
)
return report, form | [
129,
339
] |
def METHOD_NAME(self, size, safe=False):
"""
Allocate a new memory of `size` bytes and returns a MemInfo object
that tracks the allocation. When there is no more reference to the
MemInfo object, the underlying memory will be deallocated.
If `safe` flag is True, the memory is allocated using the `safe` scheme.
This is used for debugging and testing purposes.
See `NRT_MemInfo_alloc_safe()` in "nrt.h" for details.
"""
self._init_guard()
if size < 0:
msg = f"Cannot allocate a negative number of bytes: {size}."
raise ValueError(msg)
if safe:
mi = _nrt.meminfo_alloc_safe(size)
else:
mi = _nrt.METHOD_NAME(size)
if mi == 0: # alloc failed or size was 0 and alloc returned NULL.
msg = f"Requested allocation of {size} bytes failed."
raise MemoryError(msg)
return MemInfo(mi) | [
6184,
5262
] |
def METHOD_NAME():
osType = platform.system()
containerLocalHostAddress = containerLocalHostAddressOrg # NOTE: see the file: `OpenUxAS/docker/00c_README_OS_Differences.md`
if osType =='Linux':
pass
elif osType =='Darwin':
containerLocalHostAddress = '192.168.65.2'
networkCfg = ''
elif osType =='Windows':
containerLocalHostAddress = '10.0.75.1'
networkCfg = ''
BuildDockerComposeFile(dockerComposeFilename,containerLocalHostAddress)
print('\n** close any running containers **')
cmd = 'docker-compose -f {} kill'.format(dockerComposeFilename)
print('{}\n'.format(cmd))
call(cmd,shell=True)
print('\n** start new containers **')
cmd = 'docker-compose -f {} up'.format(dockerComposeFilename)
print('{}\n'.format(cmd))
call(cmd,shell=True) | [
57
] |
def METHOD_NAME(provisioning_service_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIotDpsResourceResult]:
"""
Get the metadata of the provisioning service without SAS keys.
Azure REST API version: 2022-12-12.
:param str provisioning_service_name: Name of the provisioning service to retrieve.
:param str resource_group_name: Resource group name.
"""
... | [
19,
4254,
6431,
191,
146
] |
def METHOD_NAME(self, filename):
with openfile(filename, encoding="utf-8") as fp:
return email.message_from_file(fp, policy=self.policy) | [
-1
] |
def METHOD_NAME(sql):
connection = mssql_dbapi_connection_from_url(os.environ["DATABASE_URL"])
cursor = connection.cursor()
cursor.execute(sql)
keys = [x[0] for x in cursor.description]
# Convert all values to str as that's what will end in the CSV
return [dict(zip(keys, map(str, row))) for row in cursor] | [
1621,
24,
2673
] |
def METHOD_NAME(courselike_source_key, block_key, dest_parent: BlockKey):
"""
Return a new reproducible block ID for a given root, source block, and destination parent.
When recursively copying a block structure, we need to generate new block IDs for the
blocks. We don't want to use the exact same IDs as we might copy blocks multiple times.
However, we do want to be able to reproduce the same IDs when copying the same block
so that if we ever need to re-copy the block from its source (that is, to update it with
upstream changes) we don't affect any data tied to the ID, such as grades.
This is used by the copy_from_template function of the modulestore, and can be used by
pluggable django apps that need to copy blocks from one course to another in an
idempotent way. In the future, this should be created into a proper API function
in the spirit of OEP-49.
"""
hashable_source_id = courselike_source_key.for_version(None)
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
str(hashable_source_id).encode("utf-8"),
block_key.id,
dest_parent.id,
)
new_block_id = hashlib.sha1(unique_data.encode('utf-8')).hexdigest()[:20]
return BlockKey(block_key.type, new_block_id) | [
1684,
59
] |
def METHOD_NAME(self, shares, expected):
""" 異常値を与えてエラーが出るかTest """
with pytest.raises(expected):
Share.recons(shares) | [
9,
17266,
-1
] |
def METHOD_NAME(g, ego, hop_number):
# get ego-networks for sampled nodes
sub_nodes, sub_edge_index, _, _ = k_hop_subgraph(ego, hop_number, g.edge_index)
def re_index(source):
mapping = dict(zip(sub_nodes.numpy(), range(sub_nodes.shape[0])))
return mapping[source]
edge_index_u = [*map(re_index, sub_edge_index[0][:].numpy())]
edge_index_v = [*map(re_index, sub_edge_index[1][:].numpy())]
egonet = Data(edge_index=torch.tensor([edge_index_u, edge_index_v]), x=g.x[sub_nodes], y=g.y[sub_nodes])
return egonet | [
24,
12050,
819
] |
def METHOD_NAME(sched_name, class_name):
k = (sched_name, class_name)
try:
rv = self.objectids[k]
except KeyError:
rv = self.objectids[k] = self.next_objectid
self.next_objectid += 1
return defer.succeed(rv) | [
19,
279,
147
] |
def METHOD_NAME(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Delete by anonumous user
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 401
# Delete by logged user
client.login(username=user1.username, password=user1.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 204
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
# Delete not existent entry
response = client.json.delete(reverse("user-storage-detail", args=["foo"]))
assert response.status_code == 404
client.login(username=user2.username, password=user2.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404 | [
9,
34,
948,
475
] |
def METHOD_NAME(self):
"""
Returns an Elasticsearch DSL search object or an iterator.
.. note::
Calling ``list(search)`` over an DSL search object is the same as
calling ``search.execute().hits``. This is why an DSL search object
is compatible with DRF's paginator.
"""
projects = {
project.slug: version.slug
for project, version in self._get_projects_to_search()
}
# Check to avoid searching all projects in case it's empty.
if not projects:
log.info("Unable to find a version to search")
return []
query = self._get_search_query()
queryset = PageSearch(
query=query,
projects=projects,
aggregate_results=False,
use_advanced_query=self._use_advanced_query(),
)
return queryset | [
19,
2386
] |
def METHOD_NAME(CP) -> CANParser:
msg_n = len(DELPHI_ESR_RADAR_MSGS)
messages = list(zip(DELPHI_ESR_RADAR_MSGS, [20] * msg_n, strict=True))
return CANParser(RADAR.DELPHI_ESR, messages, CanBus(CP).radar) | [
129,
10404,
9277,
9380,
1046,
1319
] |
def METHOD_NAME(output: Sequence[Mapping[str, Any]], loss_key: str = CommonKeys.LOSS) -> Any:
return output[0][loss_key] | [
19,
1572,
280,
146
] |
def METHOD_NAME(conn, rpc_key):
"""Handle a single connection that has just been accept'd()."""
def send(data):
conn.sendall(data)
return len(data)
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
return
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:"
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
_LOG.warning("mismatch key from %s", addr)
return
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
server = _ffi_api.CreateEventDrivenServer(send, "microtvm-rpc-debugger", key)
def _readall(n):
buf = bytearray()
while len(buf) < n:
x = conn.recv(n - len(buf))
if not x:
raise ConnectionClosedError()
buf = buf + x
return buf
while True:
packet_length_bytes = _readall(8)
packet_length = struct.unpack("<q", packet_length_bytes)[0]
if not packet_length:
break
status = server(packet_length_bytes, 3)
if status == 0:
break
packet_body = _readall(packet_length)
status = server(packet_body, 3) | [
276,
4579
] |
def METHOD_NAME(ds):
for d in ds:
try:
os.unlink(d)
except:
print("Could not remove ", d) | [
188,
1537
] |
f METHOD_NAME(self, bucket_name): | [
372,
235,
39,
581
] |
def METHOD_NAME():
assert isinstance(cirq.X, cirq.XPowGate)
assert isinstance(cirq.Y, cirq.YPowGate)
assert isinstance(cirq.Z, cirq.ZPowGate)
assert not isinstance(cirq.X, cirq.YPowGate)
assert not isinstance(cirq.X, cirq.ZPowGate)
assert not isinstance(cirq.Y, cirq.XPowGate)
assert not isinstance(cirq.Y, cirq.ZPowGate)
assert not isinstance(cirq.Z, cirq.XPowGate)
assert not isinstance(cirq.Z, cirq.YPowGate) | [
9,
5553
] |
def METHOD_NAME(self):
value = ma.arange(10).reshape(2, 5)
expected = self._masked(value)
self.assertEqual(expected, hexdigest(value)) | [
9,
2062,
877,
3013
] |
def METHOD_NAME(self, field, options):
return django.forms.EmailField(**options) | [
129,
487,
101
] |
def METHOD_NAME(cls, action: Any, resource: Any) -> Dict[str, Any]:
"""Return the DeadLetterQueue Policy to be added to the LambdaRole
:returns: Policy for the DeadLetterQueue
:rtype: Dict
"""
return {
"PolicyName": "DeadLetterQueuePolicy",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [{"Action": action, "Resource": resource, "Effect": "Allow"}],
},
} | [
1565,
3371,
651,
54
] |
def METHOD_NAME(
ctx: ProtocolContext,
pipette: InstrumentContext,
tip_volume: int,
all_channels: bool = True,
) -> Dict[int, List[Well]]:
"""Get tips."""
if pipette.channels == 1:
return {0: get_tips_for_single(ctx, tip_volume)}
elif pipette.channels == 8:
if all_channels:
return {0: get_tips_for_all_channels_on_multi(ctx)}
else:
return {
channel: get_tips_for_individual_channel_on_multi(
ctx, channel, tip_volume, int(pipette.max_volume)
)
for channel in range(pipette.channels)
}
elif pipette.channels == 96:
if all_channels:
return {0: get_tips_for_96_channel(ctx)}
else:
raise NotImplementedError(
"no support for individual channel testing on the 96ch pipette"
)
else:
raise ValueError(
f"unexpected state when getting tips: "
f"pipette.channels={pipette.channels}, "
f"all_channels={all_channels}"
) | [
19,
7964
] |
def METHOD_NAME(
appName, #: String,
srcDataSchema, #: Seq[StructField],
srcData, #: Seq[Row],
columnNames,#: Map[String, String] // standard column name to name of column in the dataset
srcPairsData = None # Seq[Row]
):
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
df = spark.createDataFrame(spark.sparkContext.parallelize(srcData), StructType(srcDataSchema))
if srcPairsData:
pairsDataSchema = StructType(
[
StructField("groupId", LongType(), False),
StructField("winnerId", IntegerType(), False),
StructField("loserId", IntegerType(), False)
]
)
pairsDf = spark.createDataFrame(spark.sparkContext.parallelize(srcPairsData), pairsDataSchema)
pool = catboost_spark.Pool(df, pairsDf)
else:
pool = catboost_spark.Pool(df)
if ("features" in columnNames):
pool = pool.setFeaturesCol(columnNames["features"])
if ("groupId" in columnNames):
pool = pool.setGroupIdCol(columnNames["groupId"])
if ("sampleId" in columnNames):
pool = pool.setSampleIdCol(columnNames["sampleId"])
if ("subgroupId" in columnNames):
pool = pool.setSubgroupIdCol(columnNames["subgroupId"])
if ("weight" in columnNames):
pool = pool.setWeightCol(columnNames["weight"])
if ("groupWeight" in columnNames):
pool = pool.setGroupWeightCol(columnNames["groupWeight"])
return pool | [
129,
772,
1567
] |
def METHOD_NAME(): pass | [
3712
] |
def METHOD_NAME(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
# required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(LanguageModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word"],
help="",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file fo sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
for class_choices in cls.class_choices_list:
class_choices.add_arguments(group)
return parser | [
238,
758,
134
] |
def METHOD_NAME(self):
try:
FormVariableFactory.create(prefill_plugin="demo", prefill_attribute="demo")
except (ValidationError, IntegrityError) as e:
raise self.failureException("Failed valid input") from e | [
9,
1205,
5385,
2793,
200
] |
def METHOD_NAME():
return get_hosts('pgbouncer') | [
19,
508
] |
def METHOD_NAME(self, handle):
"""
Reset a ticket timer, which moves it to the end of the ticket list and
resets its execution time. This is a very fast operation.
"""
utils.lib.zloop_ticket_reset(self._p, handle._p) | [
3769,
656
] |
def METHOD_NAME(self):
return all(o.METHOD_NAME for o in self.optimizers.values()) | [
1466,
2301,
434
] |
def METHOD_NAME(app):
# regression test for #976
app.builder.build(['index_entries'])
pot = (app.outdir / 'index_entries.pot').read_text(encoding='utf8')
msg_ids = list(filter(None, map(msgid_getter, pot.splitlines())))
assert msg_ids == [
"i18n with index entries",
"index target section",
"this is :index:`Newsletter` target paragraph.",
"various index entries",
"That's all.",
"Mailing List",
"Newsletter",
"Recipients List",
"First",
"Second",
"Third",
"Entry",
"See",
] | [
9,
6316,
724,
109
] |
def METHOD_NAME(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if isinstance(features, np.ndarray):
return features
else:
return features.todense(), sparse_to_tuple(features) | [
666,
2247
] |
f METHOD_NAME(self): | [
9,
356,
69,
463,
41,
880,
2258
] |
def METHOD_NAME(self):
with test_domain(name="B", skip_full_delete=True):
self.helper.get_or_create_invitation("B")
self._assertLogs([
(self.domain, RegistryAuditLog.ACTION_INVITATION_ADDED),
("B", RegistryAuditLog.ACTION_INVITATION_ADDED),
]) | [
9,
129,
4568,
663
] |
def METHOD_NAME(integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
session_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountSessionResult:
"""
Gets an integration account session.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
:param str session_name: The integration account session name.
"""
__args__ = dict()
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
__args__['sessionName'] = session_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:logic/v20190501:getIntegrationAccountSession', __args__, opts=opts, typ=GetIntegrationAccountSessionResult).value
return AwaitableGetIntegrationAccountSessionResult(
changed_time=pulumi.get(__ret__, 'changed_time'),
content=pulumi.get(__ret__, 'content'),
created_time=pulumi.get(__ret__, 'created_time'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type')) | [
19,
1911,
598,
240
] |
def METHOD_NAME(self):
"""Instance depends on the API version:
* 2019-07-01: :class:`ApplicationsOperations<azure.mgmt.resource.managedapplications.v2019_07_01.aio.operations.ApplicationsOperations>`
"""
api_version = self._get_api_version('applications')
if api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ApplicationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'applications'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | [
179
] |
def METHOD_NAME():
config = pretend.stub(
add_wsgi_middleware=pretend.call_recorder(lambda *a, **kw: None),
add_directive=pretend.call_recorder(lambda name, callable: None),
)
static.includeme(config)
assert config.add_wsgi_middleware.calls == [
pretend.call(static._create_whitenoise, config)
]
assert config.add_directive.calls == [
pretend.call("whitenoise_serve_static", static.whitenoise_serve_static),
pretend.call("whitenoise_add_files", static.whitenoise_add_files),
pretend.call("whitenoise_add_manifest", static.whitenoise_add_manifest),
] | [
9,
9995
] |
def METHOD_NAME(self, filename: str) -> list[Breakpoint]: ... | [
19,
171,
7101
] |
def METHOD_NAME(self):
response = self.client.get(reverse(profile,
kwargs={'username': 'nonuser'}))
self.assertEqual(response.status_code, 404) | [
9,
2121,
217,
21,
870,
130,
1985
] |
def METHOD_NAME(prod_root_1):
file_name = "common_file"
root_1_file = str(prod_root_1 / file_name)
expected = [{root_1_file: root_1_file}]
ret = file_roots.read(file_name, saltenv="prod")
assert ret == expected | [
9,
203,
1518
] |
def METHOD_NAME(self):
"""Reset the index and refill new and new_edges
From time to time TaskGraph wants to remove invisible nodes and reset
all of its indices. This helps.
"""
self.new = []
self.new_edges = []
self.visible_updates = []
self.state_updates = []
self.visible_edge_updates = []
self.index = {}
self.next_index = 0
self.index_edge = {}
self.next_edge_index = 0
for key in self.x:
self.index[key] = self.next_index
self.next_index += 1
self.new.append(key)
for dep in self.scheduler.tasks[key].dependencies:
edge = (dep.key, key)
self.index_edge[edge] = self.next_edge_index
self.next_edge_index += 1
self.new_edges.append(edge) | [
656,
724
] |
def METHOD_NAME(ts):
t = ts.tt_jd(api.T0)
eph = api.load('de421.bsp')
boston = eph['earth'] + Topos('42.3583 N', '71.0636 W')
b = boston.at(t)
star = api.Star(ra_hours=[1.0, 2.0], dec_degrees=[+3.0, +4.0])
p = b.observe(star)
assert p.position.au.shape == (3, 2)
assert p.velocity.au_per_d.shape == (3, 2)
assert p.t.shape == (2,)
assert (p.t.tt == api.T0).all()
a = p.apparent()
a1 = b.observe(api.Star(ra_hours=1.0, dec_degrees=+3.0)).apparent()
a2 = b.observe(api.Star(ra_hours=2.0, dec_degrees=+4.0)).apparent()
assert (a1.position.au == a.position.au[:,0]).all()
assert (a2.position.au == a.position.au[:,1]).all() | [
9,
4889,
798,
280,
10696
] |
def METHOD_NAME(self):
self.cmd = dnf.cli.commands.repoquery.RepoQueryCommand(
tests.support.CliStub(tests.support.BaseCliStub()))
tests.support.command_configure(self.cmd, ['--source'])
pkg = dnf.cli.commands.repoquery.PackageWrapper(PkgStub())
self.assertEqual(self.cmd.build_format_fn(self.cmd.opts, pkg),
EXPECTED_SOURCERPM_FORMAT) | [
9,
100
] |
def METHOD_NAME(self):
if no_groups():
return
self.check_args("/Groups/Abstract/1920.240463", [
"nonsolvable",
"10 subgroups in one conjugacy class",
"240.190", # socle
"960.5735", # max sub
"960.5692", # max quo
"rgb(20,82,204)", # color in image
])
self.check_args("/Groups/Abstract/1536.123", [
r"C_3 \times ((C_2\times C_8) . (C_4\times C_8))", # latex
"216", # number of 2-dimensional complex characters
"j^{3}", # presentation
"metabelian", # boolean quantities
])
self.check_args("/Groups/Abstract/ab/2.2.3.4.5.6.7.8.9.10", [
"7257600", # order
"2520", # exponent
r"C_{2}^{3} \times C_{6} \times C_{60} \times C_{2520}", # latex
r"2^{40} \cdot 3^{10} \cdot 5^{2} \cdot 7", # order of automorphism group
"1990656", # number of elements of order 2520
r"C_2\times C_{12}", # Frattini
])
self.check_args("/Groups/Abstract/ab/2_50", [ # large elementary abelian 2-group
"4432676798593", # factor of aut_order
])
self.check_args("/Groups/Abstract/ab/3000", [ # large cyclic group
r"C_2^3\times C_{100}", # automorphism group structure
]) | [
9,
1824,
254
] |
def METHOD_NAME(self, run_input):
return transpile(run_input, backend=self._device) | [
6263
] |
def METHOD_NAME(outFile, failedQueryIds):
if len(failedQueryIds) == 0:
return
distOutFileContentAsLines = []
with open(outFile, "r") as f:
distOutFileContentAsLines = f.readlines()
with open(outFile, "w") as f:
clear = False
nextIdx = 0
nextQueryIdToDelete = failedQueryIds[nextIdx]
queryIdPattern = "queryId: ([0-9]+)"
for line in distOutFileContentAsLines:
matched = re.search(queryIdPattern, line)
# founded line which contains query id
if matched:
# query id matches with the next failed query's id
if nextQueryIdToDelete == matched[1]:
# clear lines until we find succesfull query
clear = True
nextIdx += 1
if nextIdx < len(failedQueryIds):
nextQueryIdToDelete = failedQueryIds[nextIdx]
else:
# we found successfull query
clear = False
if not clear:
f.write(line)
return | [
188,
1423,
539,
146,
280,
171
] |
def METHOD_NAME(self, x):
"""
INPUT:
- ``x`` -- an element of the ambient space for ``self``
Retracts ``x`` from the ambient space to ``self``, as per
:meth:`Sets.Subquotients.ParentMethods.retract()
<sage.categories.sets_cat.Sets.Subquotients.ParentMethods.retract>`.
EXAMPLES::
sage: C = FiniteEnumeratedSets().IsomorphicObjects().example(); C
The image by some isomorphism of An example of a finite enumerated set: {1,2,3}
sage: C.retract(3)
9
"""
return x ** 2 | [
5094
] |
def METHOD_NAME(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) | [
19,
200
] |
def METHOD_NAME(self, *args):
self.insert_called_with = args
self.delegate.METHOD_NAME(*args) | [
408
] |
def METHOD_NAME():
args = Mock()
args.job_conda_env = TEST_JOB_CONDA_ENV
args.client_python_version = TEST_PYTHON_VERSION
return args | [
248,
335
] |
async def METHOD_NAME(self, mock_processor):
"""Test that process_update calls do_process_update."""
update = Update(1)
async def coroutine():
pass
await mock_processor.process_update(update, coroutine())
# This flag is set in the mock processor in do_process_update, telling us that
# do_process_update was called.
assert mock_processor.test_flag | [
9,
356,
86
] |
def METHOD_NAME(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(6060)) | [
9,
324,
236,
3420,
2199,
1170
] |
def METHOD_NAME(self):
self.assertEqual(list(util.find_escaped_pattern_fields(r'a.b.c.d')), [])
self.assertEqual(list(util.find_escaped_pattern_fields(r'a\*.b.c.d')), [0])
self.assertEqual(list(util.find_escaped_pattern_fields(r'a.b.\[c].d')), [2]) | [
9,
416,
6409,
652,
342
] |
def METHOD_NAME(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
return {key: [(timestamp, {}) for timestamp in series] for key in keys} | [
19,
759,
11524,
4045
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self):
"""removes the test directory"""
if os.path.exists(self.testdir):
for dirpath, subdirs, filenames in os.walk(self.testdir, topdown=False):
for name in filenames:
os.remove(os.path.join(dirpath, name))
for name in subdirs:
os.rmdir(os.path.join(dirpath, name))
if os.path.exists(self.testdir):
os.rmdir(self.testdir)
assert not os.path.exists(self.testdir) | [
-1
] |
METHOD_NAME(task): | [
77,
9,
171
] |
def METHOD_NAME():
module = parse('if x: f.\nelse: g(')
if_stmt = module.children[0]
assert if_stmt.type == 'if_stmt'
if_, test, colon, f = if_stmt.children
assert f.type == 'error_node'
assert f.children[0].value == 'f'
assert f.children[1].value == '.'
assert module.children[1].type == 'newline'
assert module.children[1].value == '\n'
assert module.children[2].type == 'error_leaf'
assert module.children[2].value == 'else'
assert module.children[3].type == 'error_leaf'
assert module.children[3].value == ':'
in_else_stmt = module.children[4]
assert in_else_stmt.type == 'error_node'
assert in_else_stmt.children[0].value == 'g'
assert in_else_stmt.children[1].value == '(' | [
9,
217,
7535
] |
def METHOD_NAME(self):
return True | [
220,
2685,
672
] |
def METHOD_NAME(mini, full):
""" Disable test """
assert mini[0]['enabled'] is False
assert full[0]['enabled'] is False
assert full[1]['enabled'] is False | [
9,
193
] |
def METHOD_NAME(app: Sphinx) -> dict[str, bool]:
app.connect("env-before-read-docs", on_env_before_read_docs)
app.connect("missing-reference", on_missing_reference)
app.connect("warn-missing-reference", on_warn_missing_reference)
app.add_config_value("ignore_missing_refs", default={}, rebuild=False)
return {"parallel_read_safe": True, "parallel_write_safe": True} | [
102
] |
async def METHOD_NAME(item_id: int):
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()} | [
34,
1024
] |
def METHOD_NAME(request):
if request.META.get('REQUEST_METHOD') == 'POST':
return request.POST.get('_format')
elif request.META.get('REQUEST_METHOD') == 'GET':
return request.GET.get('_format')
else:
return None | [
19,
275,
49
] |
def METHOD_NAME(self, text):
"""Append new text and scroll output to bottom.
We can't use Qt's way to append stuff because that inserts weird
newlines.
"""
self.moveCursor(QTextCursor.MoveOperation.End)
self.insertPlainText(text)
scrollbar = self.verticalScrollBar()
assert scrollbar is not None
scrollbar.setValue(scrollbar.maximum()) | [
1459,
526
] |
def METHOD_NAME(f1):
return f1.replace('xrange', 'range') | [
369,
7929
] |
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result | [
24,
553
] |
def METHOD_NAME(
x: tvm.nd.array,
sorted: int,
return_index: int,
return_inverse: int,
return_counts: int,
) -> tvm.nd.array:
"""Returns the unique elements of the input tensor.
Uses numpy.unique to compute unique elements.
"""
import builtins
# TODO(prakalp): add support for returning a tuple when return_inverse or return_counts is True
if bool(return_index) or bool(return_inverse) or bool(return_counts):
raise NotImplementedError("missing support return_inverse or return_counts set to true")
x_numpy = x.numpy()
# TODO(prakalp): use torch.unique instead of numpy when torch is installed in ci.
output_sorted_numpy, indices = np.unique(x_numpy, return_index=True)
if sorted:
return tvm.nd.array(output_sorted_numpy)
output_numpy = [x_numpy.flatten()[index] for index in builtins.sorted(indices, reverse=True)]
return tvm.nd.array(output_numpy) | [
2028,
2768
] |
def METHOD_NAME(recipient, service, key_type, allow_safelisted_recipients=True):
is_simulated = False
if recipient in current_app.config["SIMULATED_EMAIL_ADDRESSES"] or recipient in current_app.config["SIMULATED_SMS_NUMBERS"]:
is_simulated = True
members = safelisted_members(service, key_type, is_simulated, allow_safelisted_recipients)
if members is None:
return True
return allowed_to_send_to(recipient, members) | [
549,
2474,
24,
353,
24
] |
def METHOD_NAME(
request, conference_slug, proposal_slug, proposal_comment_id
):
return proposal_comment_vote(
request, conference_slug, proposal_slug, proposal_comment_id, False
) | [
4229,
1591,
481,
9811
] |
def METHOD_NAME(agent):
testerchain = agent.blockchain
# Internal
assert 'NuCypher' == agent.contract.functions.name().call()
assert 18 == agent.contract.functions.decimals().call()
assert 'NU' == agent.contract.functions.symbol().call()
# Cannot transfer any ETH to token contract
with pytest.raises((TransactionFailed, ValueError)):
origin = testerchain.client.coinbase
payload = {'from': origin, 'to': agent.contract_address, 'value': 1}
tx = testerchain.client.send_transaction(payload)
testerchain.wait_for_receipt(tx)
assert len(agent.contract_address) == 42
assert agent.contract.address == agent.contract_address
assert agent.contract_name == NucypherTokenAgent.contract_name
assert not agent._proxy_name # not upgradeable | [
9,
466,
748
] |
def METHOD_NAME(self, x):
out = x
for i in range(0, len(self.body)):
out = self.body[i](out)
out = self.upsampler(out)
# add the nearest upsampled image, so that the network learns the residual
base = F.interpolate(x, scale_factor=self.scale, mode="nearest")
out += base
return out | [
76
] |
def METHOD_NAME(cls):
""" Return iteration params with default values to start iteration from scratch """
return dict(_stop_iter=False, _start_index=0, _order=None, _n_iters=0, _n_epochs=0, _random_state=None) | [
19,
235,
84,
434
] |
def METHOD_NAME() -> Dict[NodeId, UInt32Field]:
"""Current settings from default ot3 config."""
return {
NodeId.gantry_x: UInt32Field(7208),
NodeId.gantry_y: UInt32Field(14548),
NodeId.pipette_left: UInt32Field(21843),
NodeId.pipette_right: UInt32Field(29126),
} | [
2351,
1056,
817,
623,
813
] |
METHOD_NAME(self): | [
9,
8779,
47,
2486,
626,
2486,
106
] |
def METHOD_NAME(setup):
ParameterizedExchange.delete().execute()
ActivityParameter.delete().execute()
DatabaseParameter.delete().execute()
assert not DatabaseParameter.select().count()
write_lci_csv("example") | [
9,
5300,
294,
654,
483
] |
def METHOD_NAME(value: str) -> Tuple[str, str, str]:
"""str2triple_str.
Examples:
>>> str2triple_str('abc,def ,ghi')
('abc', 'def', 'ghi')
"""
value = remove_parenthesis(value)
a, b, c = value.split(",")
# Workaround for configargparse issues:
# If the list values are given from yaml file,
# the value givent to type() is shaped as python-list,
# e.g. ['a', 'b', 'c'],
# so we need to remove quotes from it.
return remove_quotes(a), remove_quotes(b), remove_quotes(c) | [
-1,
3
] |
def METHOD_NAME(*args: str) -> Iterator[_InteractiveCommandProcess]:
"""
Runs a Determined CLI command in a subprocess. On exit, it kills the
corresponding Determined task if possible before closing the subprocess.
Example usage:
with util.interactive_command("notebook", "start") as notebook:
for line in notebook.stdout:
if "Jupyter Notebook is running" in line:
break
"""
with subprocess.Popen(
("det", "-m", conf.make_master_url()) + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env={"PYTHONUNBUFFERED": "1", **os.environ},
) as p:
cmd = _InteractiveCommandProcess(p, detach="--detach" in args)
if cmd.task_id is None:
raise AssertionError(
"Task ID for '{}' could not be found. "
"If it is still active, this command may persist "
"in the Determined test deployment...".format(args)
)
try:
yield cmd
finally:
subprocess.check_call(
["det", "-m", conf.make_master_url(), str(args[0]), "kill", cmd.task_id]
)
p.kill() | [
1204,
462
] |
def METHOD_NAME(self, location: str = None) -> str: ... | [
56,
4653,
354
] |
def METHOD_NAME():
yield "/tmp/somedir" | [
1278,
5944,
2851
] |
def METHOD_NAME(
data_type: Callable,
split: bool = True,
seed: int = 999) -> List[Union[numpy.array, pandas.DataFrame]]:
"""
Loads test data from the sklearn Iris dataset in a given format,
either in a single or multiple batches.
Args:
data_type: Datatype of the iris test dataset.
split: Split the dataset in different batches or return single batch.
seed: Random state for splitting the train and test set.
"""
dataset = load_iris()
_, x_test, _, _ = train_test_split(
dataset['data'], dataset['target'], test_size=.2, random_state=seed)
if split:
return [(index, data_type(sample.reshape(1, -1))) for index,
sample in enumerate(x_test)]
return [(0, data_type(x_test))] | [
557,
435,
8245,
9,
365
] |
def METHOD_NAME():
target_catalog = copy_test_file("catalog1.yml", appdirs.user_data_dir(appname="intake", appauthor="intake"))
yield target_catalog
# Remove the file, but not the directory (because there might be other
# files already there)
os.remove(target_catalog) | [
21,
2824
] |
def METHOD_NAME(self, METHOD_NAME):
assert isinstance(METHOD_NAME, bool)
self._include_uuid = METHOD_NAME
return self | [
1872,
4977
] |
def METHOD_NAME(
cls,
content_data: bytes,
data_pointer: u32,
) -> "Sir0Serializable":
return cls(content_data, data_pointer) | [
8304,
1534
] |
def METHOD_NAME(warp_tile_m=16, m=64, n=32, l=96, batch=2):
A = te.placeholder((batch, n, l), name="A", dtype="float16")
B = te.placeholder((batch, l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
z, y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 2
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi)
s[C].bind(z, te.thread_axis("blockIdx.z"))
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
zo, yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, zo, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[1], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[1], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype)
c_np = np.zeros((batch, n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print(
"batch gemm m=%d n=%d k=%d batch=%d: %f ms"
% (m, n, l, batch, evaluator(a, b, c).mean * 1e3)
)
for bs in range(batch):
c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :])
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3) | [
768,
1542,
2277,
1496
] |
def METHOD_NAME(self, data):
if 'data' not in data:
return {'data': []}
results = getattr(self, 'results', None)
if results is not None:
data['data'][:] = data['data'][:results]
# Horrible hack to support AKAs.
data['data'] = [x for x in data['data'] if x[0] and x[1]]
if data and data['data'] and len(data['data'][0]) == 4 and isinstance(data['data'][0], tuple):
for idx, datum in enumerate(data['data']):
if not isinstance(datum, tuple):
continue
if not datum[0] and datum[1]:
continue
if datum[2] is not None:
akas = [aka[1:-1] for aka in datum[2]] # remove the quotes
datum[1]['akas'] = akas
if datum[3] is not None:
datum[1][self.img_type] = datum[3]
data['data'][idx] = (datum[0], datum[1])
return data | [
1710,
365
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.