text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self) -> None:
fatfs = wl_fatfsgen.WLFATFS()
fatfs.plain_fatfs.create_file('TESTFILE')
fatfs.init_wl()
fatfs.wl_write_filesystem(CFG['output_file'])
with open(CFG['output_file'], 'rb') as fs_file:
file_system = fs_file.read()
self.assertEqual(file_system[0x3000:0x300c], b'TESTFILE \x20') # check entry name and type
self.assertEqual(file_system[0x2000:0x2006], b'\xf8\xff\xff\xff\x0f\x00') # check fat | [
9,
35,
171,
9153,
13270
] |
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
) | [
69,
1072
] |
def METHOD_NAME(datadir,
run_list,
processor_list,
verbose=True,
output_dir=None,
output_file_string="t2",
num_threads=1,
overwrite=True):
""" Wrapper function for RunDSP.
If run_list is > 1 run, can try processing each run on a separate thread
with num_threads > 1. Careful, it's a lot to load in RAM ...
TODO: try out the multiprocessing in Tier 0 as well
"""
# if processor_list is None:
# processor_list = get_default_processor_list()
t1_args = []
for run in run_list:
filepath = os.path.join(datadir, "t1_run{}.h5".format(run))
if not overwrite:
outfilepath = os.path.join(
output_dir, output_file_string + "_run{}.h5".format(run))
if os.path.isfile(outfilepath):
print("Skipping run {} because t2 file already created...".
format(run))
continue
if num_threads == 1:
RunDSP(
filepath,
processor_list,
verbose=verbose,
output_dir=output_dir,
output_file_string=output_file_string)
else:
t1_args.append([filepath, processor_list])
keywords = {"verbose": verbose, "output_dir": output_dir}
if num_threads > 1:
# careful, it's a lot to load in RAM...
max_proc = cpu_count()
num_threads = num_threads if num_threads < max_proc else max_proc
print("Running RunDSP with {} threads ...".format(num_threads))
p = Pool(num_threads)
# p.starmap( partial(ProcessRaw, **keywords), t0_args)
p.starmap(partial(RunDSP, **keywords), t1_args) | [
356,
10291,
1170
] |
def METHOD_NAME(self, config: Config):
config_options = self._make_config()
config.register(config_options)
if config.configured_section("pamauthenticator"):
self.provider = config.pamauthenticator_provider
self.service = config.pamauthenticator_service
self.encoding = config.pamauthenticator_encoding
self.check_account = config.pamauthenticator_check_account
self.admin_groups = config.pamauthenticator_admin_groups
self.maintainer_groups = config.pamauthenticator_maintainer_groups
self.member_groups = config.pamauthenticator_member_groups
self.is_enabled = True
else:
self.is_enabled = False
super().METHOD_NAME(config) | [
111
] |
def METHOD_NAME(name, can_create_projects=True):
"""Create an organisation with the given name."""
org = Organisation.objects.create(
name=name, long_name=name, can_create_projects=can_create_projects
)
return org | [
129,
4074
] |
def METHOD_NAME(self, datapoint: Any, **kwargs):
# Featurize str data
if isinstance(datapoint, (str, np.str_)):
return self._featurize_string(datapoint)
# Featurize mol data
else:
return self._featurize_mol(datapoint) | [
11246
] |
def METHOD_NAME(self, session: Session) -> None: ... | [
0,
240
] |
def METHOD_NAME(self):
f = open(str(self.process.script_path), "w")
f.write("import time\n")
f.write("time.sleep(100)\n")
f.close()
self.process.activate()
assert not self.process.poll()
self.process.cleanup() | [
9,
643,
356
] |
def METHOD_NAME(result):
"""
Chew the results a bit and show a nice summary
Args: raw trex results
Returns: A list of samples with the following format:
[
{
"time_delta": 0.1
"packets"
[
"port0": {
"tx_delta": 12345
"rx_delta": 12334
},
"port0": {
"tx_delta": 12345
"rx_delta": 12334
}
}
]
"""
prev_time = result["start_time"]
prev_tx_val = {}
prev_rx_val = {}
digested_results=[]
for res in result["data"]:
sample={}
time_delta = res["timestamp"] - prev_time
sample["time_delta"]=time_delta
packets={}
for port in res["measurement"]:
if port == "global" or port == "total" or port == "flow_stats" or port == "latency":
continue
tx_delta = res["measurement"][port]["opackets"] - (prev_tx_val.get(port) or 0)
rx_delta = res["measurement"][port]["ipackets"] - (prev_rx_val.get(port) or 0)
packets[port] = {
"tx_delta": tx_delta,
"rx_delta": rx_delta
}
prev_tx_val[port] = res["measurement"][port]["opackets"]
prev_rx_val[port] = res["measurement"][port]["ipackets"]
sample["packets"]=packets
digested_results.append(sample)
prev_time = res["timestamp"]
return digested_results | [
5347
] |
def METHOD_NAME(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the lighter values. ::
out = max(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im)) | [
14081
] |
def METHOD_NAME(vae, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = vae.encoder.predict(data, verbose=0)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show() | [
1288,
636,
1827
] |
def METHOD_NAME(s: str) -> datetime.datetime:
struct = feedparser.datetimes._parse_date(s)
return ts2dt(int(timegm(struct))) | [
214,
11098
] |
def METHOD_NAME(self):
pids = self.pids
all_pushes = self.pushlog
return [Push(pid, all_pushes[str(pid)]) for pid in pids] | [
93,
17371
] |
def METHOD_NAME(tree1, tree2, branch_names, scan_var='floats_dumpTauVariables_pt_DUMP.obj', index_var='ull_dumpTauVariables_EventNumber_DUMP.obj'):
tree2.BuildIndex(index_var)
diff_events = []
for entry_1 in tree1:
ind = int(getattr(tree1, index_var))
tree2.GetEntryWithIndex(ind)
var1 = getattr(tree1, scan_var)
var2 = getattr(tree2, scan_var)
if tree1.evt != tree2.evt:
continue
if round(var1, 6) != round(var2, 6):
diff_events.append(ind)
print('Event', ind)
for branch in branch_names:
v1 = getattr(tree1, branch)
v2 = getattr(tree2, branch)
if round(v1, 6) != round(v2, 6) and v1 > -99.:
print('{b:>43}: {v1:>8.4f}, {v2:>8.4f}'.format(b=branch, v1=v1, v2=v2))
print()
print('Found', len(diff_events), 'events with differences in', scan_var)
print(diff_events) | [
793,
43,
2443
] |
def METHOD_NAME(self, new_root_ref: Incomplete | None = None) -> None: ... | [
77,
10951,
61,
10952
] |
def METHOD_NAME(user, usage_key):
"""
Create a bookmark.
Arguments:
user (User): The user of the bookmark.
usage_key (UsageKey): The usage_key of the bookmark.
Returns:
Dict.
Raises:
ItemNotFoundError: If no block exists for the usage_key.
BookmarksLimitReachedError: if try to create new bookmark when max limit of bookmarks already reached
"""
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
data = {
'user': user,
'usage_key': usage_key
}
if usage_key.course_key.run is None:
raise ItemNotFoundError
if not can_create_more(data):
raise BookmarksLimitReachedError
bookmark, created = Bookmark.create(data)
if created:
_track_event('edx.bookmark.added', bookmark)
return BookmarkSerializer(bookmark, context={'fields': DEFAULT_FIELDS + OPTIONAL_FIELDS}).data | [
129,
11283
] |
def METHOD_NAME(mol, *args):
if mol.nelectron == 1:
if not mol.symmetry or mol.groupname == 'C1':
return rohf.HF1e(mol)
else:
return hf_symm.HF1e(mol, *args)
elif not mol.symmetry or mol.groupname == 'C1':
return rohf.METHOD_NAME(mol, *args)
else:
return hf_symm.METHOD_NAME(mol, *args) | [
10064
] |
def METHOD_NAME(self) -> AuthorizationApplication:
self._completeIfNotSet(self._app)
return self._app.value | [
991
] |
def METHOD_NAME(self, inline_query_result_cached_gif):
assert inline_query_result_cached_gif.type == self.type_
assert inline_query_result_cached_gif.id == self.id_
assert inline_query_result_cached_gif.gif_file_id == self.gif_file_id
assert inline_query_result_cached_gif.title == self.title
assert inline_query_result_cached_gif.caption == self.caption
assert inline_query_result_cached_gif.parse_mode == self.parse_mode
assert inline_query_result_cached_gif.caption_entities == tuple(self.caption_entities)
assert (
inline_query_result_cached_gif.input_message_content.to_dict()
== self.input_message_content.to_dict()
)
assert inline_query_result_cached_gif.reply_markup.to_dict() == self.reply_markup.to_dict() | [
9,
391,
199
] |
def METHOD_NAME(data, color="lightgreen"):
"""
Highlights the minimum in a Series or DataFrame.
"""
attr = f"background-color: {color}"
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_min = data == data.min()
return [attr if v else "" for v in is_min]
else: # from .apply(axis=None)
return pd.DataFrame(
np.where(is_min, attr, ""), index=data.index, columns=data.columns
) | [
8186,
1835
] |
def METHOD_NAME(self):
model = Model.from_pretrained(self.model_id)
self.tokenizer = BertTokenizer(
os.path.join(model.model_dir, ModelFile.VOCAB_FILE))
db = Database(
tokenizer=self.tokenizer,
table_file_path=[
os.path.join(model.model_dir, 'databases', fname)
for fname in os.listdir(
os.path.join(model.model_dir, 'databases'))
],
syn_dict_file_path=os.path.join(model.model_dir, 'synonym.txt'),
is_use_sqlite=True)
preprocessor = TableQuestionAnsweringPreprocessor(
model_dir=model.model_dir, db=db)
pipelines = [
pipeline(
Tasks.table_question_answering,
model=model,
preprocessor=preprocessor,
db=db)
]
tableqa_tracking_and_print_results_without_history(pipelines) | [
9,
22,
41,
578,
280,
14897,
41
] |
def METHOD_NAME(session_mocker):
target = 'nucypher.characters.lawful.Alice._check_grant_requirements'
session_mocker.patch(target, return_value=MOCK_IP_ADDRESS) | [
193,
250,
3211,
5186
] |
def METHOD_NAME(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self):
self.pal[0] = "#aaaaaa"
assert colors.to_hex(self.pal["C1"][0]) == "#aaaaaa"
self.pal["C1"] = "#000000"
assert colors.to_hex(self.pal["C1"][0]) == "#000000" | [
9,
0,
19,
1024
] |
def METHOD_NAME(name, lbn, target, action, profile="default", tgt_type="glob"):
"""
Wrapper function for the stop/disable/activate functions
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
action_map = {
"worker_stop": "STP",
"worker_disable": "DIS",
"worker_activate": "ACT",
}
# Check what needs to be done
status = _worker_status(target, name, action_map[action], profile, tgt_type)
if not status["result"]:
ret["result"] = False
ret["comment"] = "no servers answered the published command modjk.worker_status"
return ret
if status["errors"]:
ret["result"] = False
ret[
"comment"
] = "the following balancers could not find the worker {}: {}".format(
name, status["errors"]
)
return ret
if not status["wrong_state"]:
ret[
"comment"
] = "the worker is in the desired activation state on all the balancers"
return ret
else:
ret["comment"] = "the action {} will be sent to the balancers {}".format(
action, status["wrong_state"]
)
ret["changes"] = {action: status["wrong_state"]}
if __opts__["test"]:
ret["result"] = None
return ret
# Send the action command to target
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret["comment"] = response["msg"]
ret["result"] = response["code"]
return ret | [
13553
] |
def METHOD_NAME(self, value):
self.specialOptions["Module"] = value
self.module = value
return S_OK() | [
0,
298
] |
def METHOD_NAME(opts):
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
common_options(parser, opts)
opts.make_parser(parser, "console_layout")
opts.make_parser(parser, "console_layout_headers")
group = parser.add_argument_group(
"Filters", "See help in mitmproxy for filter expression syntax."
)
opts.make_parser(group, "intercept", metavar="FILTER")
opts.make_parser(group, "view_filter", metavar="FILTER")
return parser | [
575
] |
def METHOD_NAME():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat() | [
1399,
126,
667
] |
def METHOD_NAME(self, GLViewWidget):
self.GLViewWidget = GLViewWidget | [
0,
289,
1179,
706
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(expression):
expr_sps = []
for a in expression.expr.atoms():
if isinstance(a, pysb.core.Observable):
sps = a.species
expr_sps += sps
return set(expr_sps) | [
1728,
280,
1120
] |
def METHOD_NAME(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGCPDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getGCPDataConnector', __args__, opts=opts, typ=GetGCPDataConnectorResult).value
return AwaitableGetGCPDataConnectorResult(
auth=pulumi.get(__ret__, 'auth'),
connector_definition_name=pulumi.get(__ret__, 'connector_definition_name'),
dcr_config=pulumi.get(__ret__, 'dcr_config'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
request=pulumi.get(__ret__, 'request'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
4212,
365,
4059
] |
def METHOD_NAME(self, obj):
ct = ContentType.objects.get_for_model(obj)
try:
return OverallRating.objects.filter(object_id=obj.pk, content_type=ct).all().count()
except OverallRating.DoesNotExist:
return 0 | [
123,
181,
4622
] |
def METHOD_NAME(self):
self.updateMRMLFromGUI()
# Trigger preview update
if self.getPreviewNode():
self.delayedAutoUpdateTimer.start() | [
86,
4089,
511,
280,
2139
] |
def METHOD_NAME(self, is_verbose: int):
sudo.log_info("Python Example Policy Plugin "
"version: {}".format(VERSION))
if is_verbose:
sudo.log_info("Python interpreter version:", sys.version) | [
697,
281
] |
def METHOD_NAME(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.definitions["JPEGXL_STATIC"] = not self.options.shared
self._cmake.definitions["JPEGXL_ENABLE_BENCHMARK"] = False
self._cmake.definitions["JPEGXL_ENABLE_EXAMPLES"] = False
self._cmake.definitions["JPEGXL_ENABLE_MANPAGES"] = False
self._cmake.definitions["JPEGXL_ENABLE_SJPEG"] = False
self._cmake.definitions["JPEGXL_ENABLE_OPENEXR"] = False
self._cmake.definitions["JPEGXL_ENABLE_SKCMS"] = False
self._cmake.definitions["JPEGXL_ENABLE_TCMALLOC"] = False
if tools.cross_building(self):
self._cmake.definitions["CMAKE_SYSTEM_PROCESSOR"] = \
str(self.settings.arch)
self._cmake.configure()
return self._cmake | [
111,
334
] |
def METHOD_NAME(self, oToi, action):
lTokens = oToi.get_tokens()
iLine = oToi.get_line_number()
lReturn = []
value = set_check_value(action)
for iToken, oToken in enumerate(lTokens):
if rules_utils.token_exists_in_token_type_list(oToken, self.analysis_options):
if utils.is_token_at_end_of_line(iToken, lTokens) == value:
iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken])
sSolution = 'jcl-fix this'
iEndIndex = utils.find_next_non_whitespace_token(iToken + 1, lTokens) - 1
oMyToi = oToi.extract_tokens(iToken + 1, iEndIndex)
oViolation = violation.New(iViolation_line_number, oMyToi, sSolution)
dAction = {}
dAction['action'] = action
oViolation.set_action(dAction)
lReturn.append(oViolation)
return lReturn | [
250
] |
def METHOD_NAME(minimal_swagger_dict):
del minimal_swagger_dict['definitions']
spec = Spec.from_dict(minimal_swagger_dict)
assert 0 == len(spec.definitions) | [
9,
2706,
130,
2541
] |
def METHOD_NAME(self):
response = milestones_helpers.get_course_milestones_fulfillment_paths(str(self.course.id), self.user)
assert response is None | [
9,
19,
1122,
8059,
14626,
3336,
610
] |
def METHOD_NAME(self, request):
"""Ensure the request is valid.
The protected resource calls the introspection endpoint using
an HTTP POST request with parameters sent as
"application/x-www-form-urlencoded".
token REQUIRED. The string value of the token.
token_type_hint OPTIONAL.
A hint about the type of the token submitted for
introspection. The protected resource MAY pass this parameter to
help the authorization server optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search across all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it
is able to detect the token type automatically.
* access_token: An Access Token as defined in [`RFC6749`],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [`RFC6749`],
`section 1.5`_
The introspection endpoint MAY accept other OPTIONAL
parameters to provide further context to the query. For
instance, an authorization server may desire to know the IP
address of the client accessing the protected resource to
determine if the correct client is likely to be presenting the
token. The definition of this or any other parameters are
outside the scope of this specification, to be defined by
service documentation or extensions to this specification.
.. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`RFC6749`: http://tools.ietf.org/html/rfc6749
"""
self._raise_on_bad_method(request)
self._raise_on_bad_post_request(request)
self._raise_on_missing_token(request)
self._raise_on_invalid_client(request)
self._raise_on_unsupported_token(request) | [
187,
11172,
377
] |
def METHOD_NAME(self):
"""
Retrieves the presence of the device
Returns:
bool: True if device is present, False if not
"""
attr_path = self.__thermal_temp_attr
return os.path.isfile(attr_path) | [
19,
4061
] |
def METHOD_NAME():
random_alias = "".join((choice(string.ascii_letters) for _ in range(100)))
alias_dict = {random_alias: {}}
index = Index("i", using="alias")
index.aliases(**alias_dict)
assert index._aliases == index.to_dict()["aliases"] == alias_dict | [
9,
2334,
2475,
280,
24,
553
] |
def METHOD_NAME(items):
# Ensure test_cli.py and test_black.py and test_inspect.py run first before any asyncio code kicks in
move_to_front(items, "test_cli")
move_to_front(items, "test_black")
move_to_front(items, "test_inspect_cli")
move_to_front(items, "test_serve_with_get")
move_to_front(items, "test_serve_with_get_exit_code_for_error")
move_to_front(items, "test_inspect_cli_writes_to_file")
move_to_front(items, "test_spatialite_error_if_attempt_to_open_spatialite")
move_to_front(items, "test_package")
move_to_front(items, "test_package_with_port") | [
2595,
1098,
7287
] |
def METHOD_NAME(graph, node_ids, samples, edge_types):
""" Heter-EgoGraph
____ n1
/----n2
etype1 /---- n3
--------/-----n4
/ \-----n5
/
/
start_n
\ /----n6
\ etype2 /---- n7
--------/-----n8
\-----n9
TODO @Yelrose: Speed up and standarize to pgl.distributed.sampling
"""
# All Nodes
all_new_nodes = [node_ids]
# Node Index
all_new_nodes_index = [np.zeros_like(node_ids, dtype="int64")]
# The Ego Index for each graph
all_new_nodes_ego_index = [np.arange(0, len(node_ids), dtype="int64")]
unique_nodes = set(node_ids)
ego_graph_list = [
EgoInfo(
node_id=[n], edges=[], edges_type=[], edges_weight=[])
for n in node_ids
]
for sample in samples:
cur_node_ids = all_new_nodes[-1]
cur_node_ego_index = all_new_nodes_ego_index[-1]
cur_node_index = all_new_nodes_index[-1]
nxt_node_ids = []
nxt_node_ego_index = []
nxt_node_index = []
for edge_type_id, edge_type in enumerate(edge_types):
cur_succs = graph.sample_successor(
cur_node_ids, max_degree=sample, edge_type=edge_type)
for succs, ego_index, parent_index in zip(
cur_succs, cur_node_ego_index, cur_node_index):
if len(succs) == 0:
succs = [0]
ego = ego_graph_list[ego_index]
for succ in succs:
unique_nodes.add(succ)
succ_index = len(ego.node_id)
ego.node_id.append(succ)
nxt_node_ids.append(succ)
nxt_node_ego_index.append(ego_index)
nxt_node_index.append(succ_index)
if succ == 0:
ego.edges.append((succ_index, succ_index))
ego.edges_type.append(edge_type_id)
ego.edges_weight.append(1.0 / len(succs))
else:
ego.edges.append((succ_index, parent_index))
ego.edges_type.append(edge_type_id)
ego.edges_weight.append(1.0 / len(succs))
all_new_nodes.append(nxt_node_ids)
all_new_nodes_index.append(nxt_node_index)
all_new_nodes_ego_index.append(nxt_node_ego_index)
for ego in ego_graph_list:
pg = pgl.Graph(
num_nodes=len(ego.node_id),
edges=ego.edges,
edge_feat={
"edge_type": np.array(
ego.edges_type, dtype="int64"),
"edge_weight": np.array(
ego.edges_weight, dtype="float32"),
},
node_feat={"node_id": np.array(
ego.node_id, dtype="int64"), })
ego.graph = pg
return ego_graph_list, list(unique_nodes) | [
12050,
303,
734
] |
def METHOD_NAME(self, telescope_type):
table = self.loader.read_telescope_events([telescope_type])
self.log.info("Events read from input: %d", len(table))
if len(table) == 0:
raise TooFewEvents(
f"Input file does not contain any events for telescope type {telescope_type}"
)
mask = self.regressor.quality_query.get_table_mask(table)
table = table[mask]
self.log.info("Events after applying quality query: %d", len(table))
if len(table) == 0:
raise TooFewEvents(
f"No events after quality query for telescope type {telescope_type}"
)
table = self.regressor.feature_generator(table, subarray=self.loader.subarray)
feature_names = self.regressor.features + [self.regressor.target]
table = table[feature_names]
valid = check_valid_rows(table)
if np.any(~valid):
self.log.warning("Dropping non-predictable events.")
table = table[valid]
n_events = self.n_events.tel[telescope_type]
if n_events is not None:
if n_events > len(table):
self.log.warning(
"Number of events in table (%d) is less than requested number of events %d",
len(table),
n_events,
)
else:
self.log.info("Sampling %d events", n_events)
idx = self.rng.choice(len(table), n_events, replace=False)
idx.sort()
table = table[idx]
return table | [
203,
410
] |
def METHOD_NAME(
m: size,
n: size,
tile: [i32][m, n] @ AMX_TILE,
):
assert m <= 16
assert n <= 16
for i in seq(0, m):
for j in seq(0, n):
tile[i, j] = 0.0 | [
313,
7507
] |
def METHOD_NAME(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
region_name=AWS_REGION,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=AWS_REGION,
credentials=None,
assumed_role_info=None,
audited_regions=None,
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info | [
0,
4331,
1422,
100
] |
def METHOD_NAME(evt):
d.remove() | [
212
] |
def METHOD_NAME():
application.METHOD_NAME() | [
22
] |
def METHOD_NAME(self):
"""test form label"""
# get object
form = OsimportnameForm()
# compare
self.assertEqual(form.fields['osimportname_importer'].label, 'Importer (*)') | [
9,
12256,
2358,
1029,
636
] |
def METHOD_NAME(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_INPUT_TABLE}',
dest=constants.CASSANDRA_TO_BQ_INPUT_TABLE,
required=False,
help='Cassandra to BQ Input table name'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_INPUT_HOST}',
dest=constants.CASSANDRA_TO_BQ_INPUT_HOST,
required=True,
help='Input hostname for Cassandra cluster'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_BIGQUERY_LOCATION}',
dest=constants.CASSANDRA_TO_BQ_BIGQUERY_LOCATION,
required=True,
help='Target table in BQ Format: <dataset>.<table-name>'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_WRITE_MODE}',
dest=constants.CASSANDRA_TO_BQ_WRITE_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_TEMP_LOCATION}',
dest=constants.CASSANDRA_TO_BQ_TEMP_LOCATION,
required=True,
help='Cloud Storage location for staging, Format: <bucket-name>'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_QUERY}',
dest=constants.CASSANDRA_TO_BQ_QUERY,
required=False,
help='Optional query for selective exports'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_CATALOG}',
dest=constants.CASSANDRA_TO_BQ_CATALOG,
required=False,
default="casscon",
help='To provide a name for connection between Cassandra and BQ'
)
parser.add_argument(
f'--{constants.CASSANDRA_TO_BQ_INPUT_KEYSPACE}',
dest=constants.CASSANDRA_TO_BQ_INPUT_KEYSPACE,
required=False,
help='Keyspace Name of Cassandra Table'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
if (not getattr(known_args, constants.CASSANDRA_TO_BQ_QUERY)
and (not getattr(known_args, constants.CASSANDRA_TO_BQ_INPUT_KEYSPACE)
or not getattr(known_args, constants.CASSANDRA_TO_BQ_INPUT_TABLE))):
sys.exit("ArgumentParser Error: Either of cassandratobq.input.keyspace and cassandratobq.input.table "
+ "OR cassandratobq.input.query needs to be provided as argument to read data from Cassandra")
elif (getattr(known_args, constants.CASSANDRA_TO_BQ_QUERY)
and (getattr(known_args, constants.CASSANDRA_TO_BQ_INPUT_KEYSPACE)
or getattr(known_args, constants.CASSANDRA_TO_BQ_INPUT_TABLE))):
sys.exit("ArgumentParser Error: Both cassandratobq.input.keyspace and cassandratobq.input.table "
+ "AND cassandratobq.input.query cannot be provided as arguments at the same time.")
return vars(known_args) | [
214,
335
] |
def METHOD_NAME(self):
app = self.state.document.settings.env.app
existing_link = self.options.get('link')
domain = getattr(app.config, 'grid_item_link_domain', None)
if self.has_content:
self.content.replace('|gallery-endpoint|', domain)
if existing_link and domain:
new_link = existing_link.replace('|gallery-endpoint|', domain)
self.options['link'] = new_link
return list(orig_grid_run(self)) | [
1265,
753,
22
] |
METHOD_NAME(self, req): | [
356,
377
] |
def METHOD_NAME(sample_peaks_df: pd.DataFrame, params: RecalParams, cache_path: Optional[Path]):
models = []
stages = [
('initial', sample_peaks_df),
]
for tf_name, *tf_args in params.transforms:
logger.info(f'Fitting {tf_name} model')
assert tf_name in TRANSFORM, f'Unrecognized transform "{tf_name}"'
tf = TRANSFORM[tf_name](params, *tf_args)
loaded_cache = False
if cache_path is not None:
try:
tf.load_cache(f'{cache_path}/{tf_name}')
loaded_cache = True
logger.debug(f'{tf_name} loaded from cache')
except (IOError, EOFError):
logger.debug(f'{tf_name} not cached')
if not loaded_cache:
tf.fit(sample_peaks_df)
sample_peaks_df = tf.predict(sample_peaks_df)
models.append(tf)
stages.append((tf_name, sample_peaks_df))
# Hacky progress report
eval = EvalPeaksCollector(sample_peaks_df, params)
for stage_name, stage_df in stages:
eval.collect_peaks(stage_df, stage_name)
logger.debug(
pd.DataFrame(
{stage_name: eval.get_stats(stage_name).abs().mean() for stage_name, stage_df in stages}
)
)
eval.reset()
return models, eval | [
56,
1148
] |
def METHOD_NAME(
alembic_config: Config, conn: Connection, run_id: Optional[str] = None, rev: str = "head"
) -> None:
alembic_config.attributes["connection"] = conn
alembic_config.attributes["run_id"] = run_id
upgrade(alembic_config, rev) | [
22,
8171,
738
] |
def METHOD_NAME(self):
time_to_freeze = timezone.now()
with freeze_time(time_to_freeze) as frozen_time:
assert isinstance(frozen_time, FrozenDateTimeFactory)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
conditions=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
filters=[],
actionMatch="any",
filterMatch="all",
frequency=10,
endpoint=None,
)
result = parse_datetime(resp["endpoint"])
endpoint = time_to_freeze.replace(tzinfo=result.tzinfo)
assert result == endpoint
frozen_time.tick(1)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
conditions=[
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
filters=[],
actionMatch="any",
filterMatch="all",
frequency=10,
endpoint=endpoint,
)
assert parse_datetime(resp["endpoint"]) == endpoint | [
9,
841
] |
def METHOD_NAME(self, I):
"""
@brief 增加一个区域积分对象
"""
self.dintegrators.append(I) | [
238,
1674,
9991
] |
def METHOD_NAME(model, data_loader, label_map):
"""
Given a prediction dataset, it gives the prediction results.
Args:
model(obj:`paddle.nn.Layer`): A model to classify texts.
data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.
label_map(obj:`dict`): The label id (key) to label str (value) map.
"""
model.eval()
results = []
for batch in tqdm(data_loader):
input_ids, token_type_ids = batch["input_ids"], batch["token_type_ids"]
logits = model(input_ids, token_type_ids)
probs = F.softmax(logits, axis=1)
idx = paddle.argmax(probs, axis=1).numpy()
idx = idx.tolist()
labels = [label_map[i] for i in idx]
results.extend(labels)
return results | [
2103
] |
f METHOD_NAME(self, pcoll: beam.pvalue.PCollection) -> beam.pvalue.PValue: | [
2450
] |
f METHOD_NAME(self): | [
9,
4789,
6608
] |
def METHOD_NAME():
if not is_enabled():
return False
with open(METADATA_FILE) as f:
metadata_json = json.load(f)
db_config = metadata_json.get("DataBrokerConfiguration")
return bool(
db_config is not None
and db_config.get("publishedServices") is not None
and len(db_config.get("publishedServices")) > 0
) | [
137,
4726,
991
] |
def METHOD_NAME():
banner_msg = app.config.get('BANNER_MSG', None)
banner_msg = sanitize_html(banner_msg)
return dict(banner_msg=banner_msg) | [
0,
3726,
169
] |
def METHOD_NAME():
actions.auto_insert(" < ") | [
544,
837,
488,
489
] |
def METHOD_NAME(self):
return self._prop_one | [
1302,
206
] |
def METHOD_NAME(self):
def load_local_metric(metric_name, *args, **kwargs):
return load_metric(os.path.join("metrics", metric_name), *args, **kwargs)
with patch("datasets.load_metric") as mock_load_metric:
mock_load_metric.side_effect = load_local_metric
yield | [
1080,
125,
1097
] |
def METHOD_NAME(self):
"""Defaults are correct when creating groups"""
scratching = self.scratching
felines = self.felines
dog = self.dog
# TODO: check for group existence via uuid handle
self.assertTrue(felines.gaccess.discoverable)
self.assertTrue(felines.gaccess.public)
self.assertTrue(felines.gaccess.shareable)
self.assertTrue(dog.uaccess.owns_group(felines))
self.assertTrue(dog.uaccess.can_change_group(felines))
self.assertTrue(dog.uaccess.can_view_group(felines))
self.assertTrue(dog.uaccess.can_view_resource(scratching))
self.assertTrue(dog.uaccess.can_change_resource(scratching))
self.assertTrue(dog.uaccess.owns_resource(scratching))
assertGroupResourceUnshareCoherence(self) | [
9,
6690,
1618
] |
f METHOD_NAME(self): | [
9,
99,
6725,
870,
130,
2117,
748
] |
def METHOD_NAME(cls, values: Dict[str, Any]) -> Dict[str, Any]:
definition_type = values.get("header_definition_type")
column_names = values.get("user_provided_column_names")
if definition_type == CsvHeaderDefinitionType.USER_PROVIDED and not column_names:
raise ValidationError("`user_provided_column_names` should be defined if the definition 'User Provided'.", model=CsvFormat)
if definition_type != CsvHeaderDefinitionType.USER_PROVIDED and column_names:
raise ValidationError(
"`user_provided_column_names` should not be defined if the definition is not 'User Provided'.", model=CsvFormat
)
return values | [
187,
665,
335
] |
f METHOD_NAME(self, input_file): | [
73,
2395,
171,
365
] |
def METHOD_NAME(self, widget, parent, context):
"""
Inserts the beginning (A) marker
"""
self.beginning_marker.props.position = context['current-position']
providers.register('playback-markers', self.beginning_marker)
context['current-marker'] = self.beginning_marker
playback.MoveMarkerMenuItem.METHOD_NAME(self, widget, parent, context) | [
69,
1284
] |
def METHOD_NAME(p):
params = p.copy()
for key, item in p.items():
if hasattr(item, "unit"):
params[key] = item.value
params[key + "_unit"] = str(item.unit)
if hasattr(params[key], "tolist"): # convert array to list
params[key] = params[key].tolist()
return params | [
1699,
434
] |
def METHOD_NAME(
log_dir: str, ssh_install, nodeip: str, results: list, directory: str
) -> None:
"""Uploading all the collected logs to magna from installer node
Args:
log_dir directory to store all the logs
ssh_install ssh object of installer node
nodeip host Ip address od installer node
results host Ip address which are failed
Returns:
None
"""
try:
file_share = "http://magna002.ceph.redhat.com/cephci-jenkins"
print("uploading logs to Magna")
ssh_install.exec_command("sudo mkdir -p tmp")
cmd = "sudo mount -t nfs -o sec=sys,nfsvers=4.1 reesi004.ceph.redhat.com:/ tmp"
ssh_install.exec_command(cmd)
stdin, stdout, stderr = ssh_install.exec_command(
f"[ -d tmp/cephci-jenkins/{directory} ]; echo $?"
)
if not directory or json.loads(stdout):
print("Either directory is not provided or given diretory does not exist")
ssh_install.exec_command("mkdir -p tmp/cephci-jenkins/ceph_logs")
ssh_install.exec_command(f"mv {log_dir} tmp/cephci-jenkins/ceph_logs/")
print(
f"Logs Successfully uploaded to Magna, location:{file_share}/ceph_logs/{log_dir}"
)
else:
print(f"Given directory {directory} exist")
ssh_install.exec_command(f"mv {log_dir} ceph_logs")
ssh_install.exec_command(f"mv ceph_logs tmp/cephci-jenkins/{directory}/")
print(
f"Logs Successfully uploaded to Magna, location:{file_share}/{directory}/ceph_logs"
)
except Exception:
results.append(nodeip) | [
172,
1099
] |
def METHOD_NAME():
parser = argparse.ArgumentParser("generic-image-rec train script")
parser.add_argument(
'-c',
'--config',
type=str,
default='configs/config.yaml',
help='config file path')
parser.add_argument(
'-o',
'--override',
action='append',
default=[],
help='config options to be overridden')
parser.add_argument(
'-p',
'--profiler_options',
type=str,
default=None,
help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".'
)
args = parser.METHOD_NAME()
return args | [
214,
335
] |
def METHOD_NAME(self):
return self._slice_indices.array | [
55,
1894
] |
def METHOD_NAME(self, result, state):
if self.beaker_url is None:
return
location = self.post_result(state)
if location is None:
return
logfile = state.get("logfile")
self.put_file(location, "logfile", logfile)
ppstate = pprint.pformat(state).encode("utf8")
self.put_data(location, "state", ppstate)
pattern = os.path.join(state.get("logdir"), "*")
filelist = [f for f in glob.glob(pattern) if f != logfile]
self.put_file_list(location, "", filelist) | [
1798,
9
] |
def METHOD_NAME(
username: str = "",
skip: int = 0,
limit: int = 100,
db: Session = Depends(database.get_db), | [
19,
3467
] |
def METHOD_NAME(items, value):
"""
Converts a list into a dict.
"""
key = items.pop()
result = {}
bracket_index = key.find("[")
if bracket_index > 0:
value = [value]
result[key] = value
if items:
result = METHOD_NAME(items, result)
return result | [
245,
24,
553
] |
def METHOD_NAME(self):
"test OnnxMicroRuntime"
opset = self.config.opset
x = np.array([1, 2, 4, 5, 5, 4]).astype(
np.float32).reshape((3, 2))
model_def = helper.make_model(
opset_imports=[helper.make_operatorsetid('', opset)],
ir_version=constants.OPSET_TO_IR_VERSION[opset],
producer_name='tf2onnx',
producer_version='0.0.1',
graph=helper.make_graph(
name='einsum',
inputs=[helper.make_tensor_value_info('X', TensorProto.FLOAT, None)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, None)],
nodes=[
helper.make_node('Add', ["X", "X"], ["temp"]),
helper.make_node('Add', ["X", "temp"], ["Y"]),
]))
rt = OnnxMicroRuntime(model_def)
out = rt.run({'X': x})
self.assertIn('X', out)
self.assertIn('Y', out)
self.assertIn('temp', out)
self.assertEqual(len(out), 3) | [
9,
4233,
4325,
1888
] |
def METHOD_NAME(self):
assert (len(self._table) == 0) == (len(self._allDoIds) == 0)
return len(self._table) == 0 and len(self._allDoIds) == 0 | [
137,
35
] |
def METHOD_NAME(f, name):
coeff, params = f.to_fenics(mesh)
return FenicsMatrixBasedOperator(factory(coeff), params,
bc=bc, bc_zero=bc_zero, functional=functional, name=name) | [
1893,
441
] |
def METHOD_NAME(self, current: Dict[str, Any], prefix: str) -> None:
if not current or any(filter(lambda key: isinstance(key, str) and (":" in key or "/" in key), current.keys())):
return self.set(prefix, current)
for key, value in current.items():
path = "{}.{}".format(prefix, key)
if isinstance(value, dict):
self.METHOD_NAME(value, path)
else:
self.set(path, value) | [
86,
2203
] |
def METHOD_NAME(index: int) -> 'Pauli':
return Pauli._XYZ[index % 3] | [
604,
724
] |
def METHOD_NAME(self, policy_name, statement, policy_type, iam_resource_type, resource_name):
# Effect
effect = str(statement['Effect'])
# Action or NotAction
action_string = 'Action' if 'Action' in statement else 'NotAction'
if type(statement[action_string]) != list:
statement[action_string] = [statement[action_string]]
# Resource or NotResource
resource_string = 'Resource' if 'Resource' in statement else 'NotResource'
if type(statement[resource_string]) != list:
statement[resource_string] = [statement[resource_string]]
# Condition
condition = statement['Condition'] if 'Condition' in statement else None
self['permissions'].setdefault(action_string, {})
if iam_resource_type is None:
return
self._parse_actions(effect, action_string, statement[action_string], resource_string,
statement[resource_string], iam_resource_type, resource_name, policy_name, policy_type,
condition) | [
214,
925
] |
def METHOD_NAME(self, text):
"""Sends the request to the duckling server and parses the result."""
try:
payload = {"text": text, "locale": self._locale()}
headers = {"Content-Type": "application/x-www-form-urlencoded; "
"charset=UTF-8"}
response = requests.post(self._url() + "/parse",
data=payload,
headers=headers)
if response.status_code == 200:
return simplejson.loads(response.text)
else:
logger.error("Failed to get a proper response from remote "
"duckling. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return []
except requests.exceptions.ConnectionError as e:
logger.error("Failed to connect to duckling http server. Make sure "
"the duckling server is running and the proper host "
"and port are set in the configuration. More "
"information on how to run the server can be found on "
"github: "
"https://github.com/facebook/duckling#quickstart "
"Error: {}".format(e))
return [] | [
17256,
214
] |
def METHOD_NAME(cli, ckan_config):
"""CliRunner uses test config automatically, so we have to explicitly
set CLI `-c` option to `None` when using env `CKAN_INI`.
"""
result = cli.invoke(ckan, [u'-c', None, u'-h'],
env={u'CKAN_INI': ckan_config[u'__file__']})
assert not result.exit_code, result.output | [
9,
200,
2499,
485,
486
] |
def METHOD_NAME(db, num_type):
# Create a table that uses the type
db.execute(text('CREATE TABLE test(x INTEGER)'))
# Insert a uuid value into the table
insert_value = num_type(8)
# Hint: https://docs.sqlalchemy.org/en/13/core/tutorial.html#specifying-bound-parameter-behaviors
stmt = text('INSERT INTO test(x) VALUES (:x)')
stmt = stmt.bindparams(bindparam('x', type_=Integer))
db.execute(stmt, x=insert_value)
# Query for the value
stmt = text('SELECT x FROM test')
# Hint: https://docs.sqlalchemy.org/en/13/core/tutorial.html#specifying-result-column-behaviors
stmt = stmt.columns(x=Integer)
results = db.execute(stmt)
selected_value = results.fetchone()[0]
assert selected_value == insert_value | [
9,
2028,
5841
] |
def METHOD_NAME():
window = MyGame()
window.run() | [
57
] |
def METHOD_NAME(self, org):
# Given an org, work out the start date for a new division set
# based on the end date of the most recent previous division set
divsets = OrganisationDivisionSet.objects.filter(organisation=org)
if not divsets:
raise Exception(
"Could not find any previous DivisionSets for Organisation %s"
% org
)
if not divsets.latest().end_date:
raise Exception(
"End date for previous DivisionSets %s is NULL" % divsets[0]
)
return divsets.latest().end_date + datetime.timedelta(days=1) | [
19,
447,
153
] |
def METHOD_NAME(sensor):
if len(sensor.get_active_streams()) > 0:
sensor.stop()
sensor.close() | [
1462,
1614
] |
def METHOD_NAME(plan: "Plan"):
"""Execute a plan on the local machine."""
# NOTE: Quoting string values causes a double quoting when passed to ``subprocess.run``
command_line = plan.to_argv(quote_string=False)
os_env = os.environ.copy()
env = get_workflow_parameters_env_vars(workflow=plan)
os_env.update(env)
try:
command_str = " ".join(plan.to_argv(with_streams=True))
communication.echo(f"Executing step '{plan.name}': '{command_str}' ...")
with get_plan_std_stream_mapping(plan) as std_streams_mappings:
return_code = subprocess.run(command_line, cwd=os.getcwd(), env=os_env, **std_streams_mappings).returncode
except OSError:
tb = "\n ".join(traceback.format_exc().split("\n"))
raise errors.WorkflowExecuteError(f"Execution of step '{plan.name}' failed:\n\n {tb}", show_prefix=False)
success_codes = plan.success_codes or [0]
if return_code not in success_codes:
message = f"Execution of step '{plan.name}' returned {return_code} exit status which is not in {success_codes}"
raise errors.InvalidSuccessCode(return_code=return_code, message=message) | [
750,
145
] |
def METHOD_NAME(f, size):
"""Read the end of a file
This skips to the next line to avoid starting in the middle of a unicode character.
And returns "" in the case of a UnicodeDecodeError
"""
f.seek(0, 2)
end = f.tell()
if end < 1024 * size:
f.seek(0, 0)
else:
f.seek(end - (1024 * size))
data = f.read()
try:
# Find the first newline in the block
newline = min(1+data.find(b'\n'), len(data))
text = data[newline:].decode("UTF-8")
except UnicodeDecodeError:
return ""
return text | [
203,
171,
1798
] |
def METHOD_NAME(_mock_requests_post):
result = run_cli_dev()
assert 1 == result.exit_code
assert result.exception | [
9,
615,
828,
14172,
377,
442
] |
def METHOD_NAME(self, request):
""" Callback for GET to /orgs/<org_id>/types/actioninvocation/fields """
LOG.debug("action_fields_get")
return requests_mock.create_response(request,
status_code=200,
json={}) | [
1006,
342,
19
] |
def METHOD_NAME(self, name, value):
if is_dict_like(value):
name = '&{%s}' % name
elif is_list_like(value):
name = '@{%s}' % name
else:
name = '${%s}' % name
return name, value | [
3949
] |
f METHOD_NAME(self): | [
9,
250,
44
] |
def METHOD_NAME(self) -> str:
"""
Role ID associated with this instance profile.
"""
return pulumi.get(self, "role_id") | [
1018,
147
] |
def METHOD_NAME():
return NativeEnvironment() | [
485
] |
def METHOD_NAME():
if not frappe.db.exists("UAE VAT Settings", "_Test Company UAE VAT"):
vat_accounts = frappe.get_all(
"Account",
fields=["name"],
filters={"company": "_Test Company UAE VAT", "is_group": 0, "account_type": "Tax"},
)
uae_vat_accounts = []
for account in vat_accounts:
uae_vat_accounts.append({"doctype": "UAE VAT Account", "account": account.name})
frappe.get_doc(
{
"company": "_Test Company UAE VAT",
"uae_vat_accounts": uae_vat_accounts,
"doctype": "UAE VAT Settings",
}
).insert() | [
0,
3205,
6736
] |
async def METHOD_NAME(self, interaction: disnake.Interaction, **kwargs):
"""Refresh the interaction's message with the current state of the menu."""
content_kwargs = await self.get_content()
if interaction.response.is_done():
# using interaction feels cleaner, but we could probably do self.message.edit too
await interaction.edit_original_message(view=self, **content_kwargs, **kwargs)
else:
await interaction.response.edit_message(view=self, **content_kwargs, **kwargs) | [
1920,
459
] |
def METHOD_NAME(li, index):
li = li[:index] + li[index+1:]
return li | [
34
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.