text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
"""Returns a remote path to a public integration test repository."""
from tests.fixtures.config import IT_PUBLIC_REMOTE_REPO_URL
return IT_PUBLIC_REMOTE_REPO_URL | [
1807,
2437,
1609,
10226,
522,
274
] |
def METHOD_NAME(status, run_id=None):
mlops.METHOD_NAME(status, run_id) | [
390,
2685,
452
] |
def METHOD_NAME(volumes, lifecycle):
return MockedBoto3Request(
method="describe_volumes",
response={"Volumes": [{"VolumeId": volume, "Lifecycle": lifecycle} for volume in volumes]},
expected_params={"VolumeIds": volumes},
) | [
19,
2517,
4328,
4331,
377
] |
def METHOD_NAME(identity):
"""Find and set autoenablestep"""
event = identity.events.filter(
autoenablestep__isnull=False, justification=identity.justification
).order_by('-event_time')[0]
if event:
return event.autoenablestep
else:
return 0 | [
416,
-1,
367
] |
def METHOD_NAME(self):
"""
The CourseCodeRedirectView should return a 404 if the course is not published.
"""
factories.CourseFactory(should_publish=False)
url = reverse(
"redirect-course-code-to-course-url", kwargs={"course_code": "00000"}
)
request = self.client.get(url, follow=True)
self.assertEqual(request.status_code, 404)
# Under the hood, a Http404 exception is raised
with self.assertRaises(Http404) as context:
CourseCodeRedirectView().get(request, course_code="00000")
self.assertEqual(str(context.exception), "No page found for course 00000.") | [
9,
4632,
1122,
544,
1736,
41,
255
] |
def METHOD_NAME(self,bondFlag):
obj = self.cmd.get_object_list("pkmol")
if self.ligand=="" and len(obj)==1:
self.ligand= obj[0]
self.cmd.refresh_wizard() | [
74,
2981
] |
def METHOD_NAME(self):
if self.settings.os == 'Windows':
del self.options.fPIC
if self.settings.arch not in ["x86", "x86_64"]:
del self.options.sse
del self.options.avx
if "arm" not in self.settings.arch:
del self.options.neon | [
200,
1881
] |
def METHOD_NAME(self):
super().METHOD_NAME()
self.tmp_config_dir = TemporaryDirectory()
self.tmp_runtime = f"{self.tmp_config_dir.name}/runtime.yaml"
self._extend_config(self.tmp_runtime, {})
self.tmp_harmonization = f"{self.tmp_config_dir.name}/harmonization.yaml"
self._extend_config(self.tmp_harmonization, {}, useyaml=False)
self.ctl_conf_patcher = mock.patch.multiple(ctl, RUNTIME_CONF_FILE=self.tmp_runtime,
HARMONIZATION_CONF_FILE=self.tmp_harmonization)
self.ctl_conf_patcher.start()
self.intelmqctl = ctl.IntelMQController() | [
0,
1
] |
def METHOD_NAME(cls):
super(TestTfliteTransferConverter, cls).METHOD_NAME()
cls._default_base_model_dir = tempfile.mkdtemp("tflite-transfer-test-base")
model = tf.keras.Sequential(
[layers.Dense(units=DEFAULT_INPUT_SIZE, input_shape=(DEFAULT_INPUT_SIZE,))]
)
model.build()
tfv1.keras.experimental.export_saved_model(model, cls._default_base_model_dir) | [
0,
1,
2
] |
def METHOD_NAME(self):
accounts = []
for i in range(1,5):
accounts.extend(EthAccount.createEmulatorAccountsFromMnemonic(i, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=32*EthUnit.ETHER.value, total=1, password="admin"))
for account in accounts:
self.assertTrue(self.wallet1._web3.eth.getBalance(account.address) >= 32*EthUnit.ETHER.value) | [
9,
3992,
2365,
598
] |
def METHOD_NAME(string1, string2):
"""Returns first position where string1 and string2 differ."""
for count, c in enumerate(string1):
if len(string2) <= count:
return count
if string2[count] != c:
return count | [
2443,
934
] |
def METHOD_NAME(self) -> int:
return 13 | [
235,
4233,
6979
] |
def METHOD_NAME(
self,
algo_wrapper: InsistSuggest[FixedSuggestionAlgo],
monkeypatch: MonkeyPatch,
):
"""Test that when the algo can't produce a new trial, and there is no InsistWrapper, the
SpaceTransform wrapper fails to sample a new trial.
"""
algo_without_wrapper = algo_wrapper.algorithm
assert isinstance(algo_without_wrapper, FixedSuggestionAlgo)
calls: int = 0
# Make the wrapper insist enough so that it actually
# gets a trial after asking enough times:
fixed_suggestion = algo_without_wrapper.space.sample(1)[0]
def _suggest(num: int) -> list[Trial]:
nonlocal calls
calls += 1
if calls < 5:
return []
return [fixed_suggestion]
monkeypatch.setattr(algo_without_wrapper, "suggest", _suggest)
trials = algo_without_wrapper.suggest(1)
assert calls == 1
assert not trials | [
9,
1993,
-1,
529,
291
] |
def METHOD_NAME(response_data, query_dict):
query_dict.update({
'uid': response_data.get('uid', ''),
'access_token': response_data.get('access_token', '')
})
return query_dict | [
19,
539,
553
] |
def METHOD_NAME(req_version, *, force=False):
cur_version = db.engine.execute("SELECT current_setting('server_version_num')::int / 10000").scalar()
if cur_version >= req_version:
return True
click.secho(f'Postgres version {cur_version} too old; you need at least {req_version} (or newer)', fg='red')
if force:
click.secho('Continuing anyway, you have been warned.', fg='yellow')
return True
return False | [
208,
11,
281
] |
def METHOD_NAME():
return '/gpu:0' if tf.test.is_gpu_available() else '/cpu:0' | [
398
] |
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
type=str,
default='AttentionCluster',
help='name of model to train.')
parser.add_argument('--config',
type=str,
default='configs/attention_cluster.txt',
help='path to config file of model')
parser.add_argument('--use_gpu',
type=ast.literal_eval,
default=True,
help='default use gpu.')
parser.add_argument(
'--weights',
type=str,
default=None,
help=
'weight path, None to automatically download weights provided by Paddle.'
)
parser.add_argument('--batch_size',
type=int,
default=1,
help='sample number in a batch for inference.')
parser.add_argument('--filelist',
type=str,
default='./data/TsnExtractor.list',
help='path to inferenece data file lists file.')
parser.add_argument('--log_interval',
type=int,
default=1,
help='mini-batch interval to log.')
parser.add_argument('--infer_topk',
type=int,
default=20,
help='topk predictions to restore.')
parser.add_argument('--save_dir',
type=str,
default=os.path.join('data', 'tsn_features'),
help='directory to store tsn feature results')
parser.add_argument('--video_path',
type=str,
default=None,
help='directory to store results')
args = parser.METHOD_NAME()
return args | [
214,
335
] |
def METHOD_NAME(self, list_head):
while list_head:
if app := list_head["data"]:
yield app.dereference()
list_head = list_head["next"] | [
4716,
991,
245
] |
def METHOD_NAME(self, grib_api, min_data, max_data):
# Check the use of a mask has been turned on via:
# eccodes.codes_set(grib_message, 'bitmapPresent', 1)
# eccodes.codes_set_double(grib_message, 'missingValue', ...)
# and that a suitable fill value has been chosen.
grib_api.codes_set.assert_called_once_with(GRIB_MESSAGE,
'bitmapPresent', 1)
args, = grib_api.codes_set_double.call_args_list
(message, key, fill_value), kwargs = args
self.assertIs(message, GRIB_MESSAGE)
self.assertEqual(key, 'missingValue')
self.assertTrue(fill_value < min_data or fill_value > max_data,
'Fill value {} is not outside data range '
'{} to {}.'.format(fill_value, min_data, max_data))
return fill_value | [
638,
5651,
661
] |
def METHOD_NAME(self):
if not self.closed:
self.skip()
self.closed = True | [
1462
] |
def METHOD_NAME(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime | [
17295
] |
def METHOD_NAME(self, orect):
"""Placeholder."""
return self.intersect(orect).area() | [
2820
] |
def METHOD_NAME():
display.set_pen(12)
for x in range(5):
display.circle(30 * x, 64, hills1[x])
display.set_pen(9)
for x in range(2):
display.circle(60 * x + 15, 64, hills2[x])
display.set_pen(3)
for x in range(2):
display.circle(60 * x + 30 + 15, 64, hills3[x]) | [
1100,
16181
] |
def METHOD_NAME(tmp_path):
badfile_contents = """
#!jinja|yaml
host: 127.0.0.2
port: 22
THIS FILE IS NOT WELL FORMED YAML
sudo: true
user: scoundrel
"""
basic_contents = """
#!jinja|yaml
host: 127.0.0.2
port: 22
sudo: true
user: scoundrel
"""
domain_contents = """
#!jinja|yaml
port: 2222
user: george
"""
empty_contents = """
"""
with pytest.helpers.temp_file(
"test1_us-east-2_test_badfile", badfile_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_basic", basic_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_domain", domain_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_empty", empty_contents, directory=tmp_path
):
yield | [
129,
10679,
1537
] |
def METHOD_NAME(cls):
_registered_printers[output_format.lower()] = cls
return cls | [
972
] |
def METHOD_NAME(self, shared_dist_ctx_globaldask, my_ds, benchmark):
ctx = Context(executor=DelayedJobExecutor())
udf = EchoMergeUDF()
resources = DelayedJobExecutor.get_resources_from_udfs(udf)
def doit():
result = ctx.run_udf(dataset=my_ds, udf=udf)
return result['intensity'].delayed_raw_data.sum(axis=0).compute(resources=resources)
benchmark(doit) | [
9,
1953,
922,
411
] |
def METHOD_NAME(self):
self.object_position = self.tell() | [
3068,
279,
195
] |
def METHOD_NAME(self, filename):
self.queue_screenshot(filename)
p5.renderer.flush_geometry()
self._save_buffer() | [
3249
] |
def METHOD_NAME(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureCloudService, builder)
assert len(collected) == 1 | [
9,
4054,
549
] |
def METHOD_NAME(keymap_data):
# Validate to ensure we don't have to deal with bad data - handles stdin/file
validate(keymap_data, 'qmk.keymap.v1')
kb_name = keymap_data['keyboard']
km_name = keymap_data['keymap']
km_folder = keymaps(kb_name)[0] / km_name
keyboard_keymap = km_folder / 'keymap.json'
# This is the deepest folder in the expected tree
keyboard_keymap.parent.mkdir(parents=True, exist_ok=True)
# Dump out all those lovely files
keyboard_keymap.write_text(json.dumps(keymap_data, cls=KeymapJSONEncoder, sort_keys=True))
return (kb_name, km_name) | [
512,
12894
] |
def METHOD_NAME(clrd):
clrd = clrd.groupby("LOB").sum()
assert clrd.iloc[[1, 0], :].iloc[0, 1] == clrd.iloc[1, 1] # row
assert clrd.iloc[[1, 0], [1, 0]].iloc[0, 0] == clrd.iloc[1, 1] # col
assert clrd.loc[:, ["CumPaidLoss", "IncurLoss"]].iloc[0, 0] == clrd.iloc[0, 1]
assert (
clrd.loc[["ppauto", "medmal"], ["CumPaidLoss", "IncurLoss"]].iloc[0, 0]
== clrd.iloc[3]["CumPaidLoss"]
)
assert (
clrd.loc[clrd["LOB"] == "comauto", ["CumPaidLoss", "IncurLoss"]]
== clrd[clrd["LOB"] == "comauto"].iloc[:, [1, 0]]
)
assert clrd.groupby("LOB").sum() == clrd | [
9,
-1,
-1,
852
] |
async def METHOD_NAME():
while True:
for bc in await get_balance_checks():
await redeem_lnurl_withdraw(bc.wallet, bc.url)
await asyncio.sleep(60 * 60 * 6) # every 6 hours | [
407,
3101,
2676
] |
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}",
**self.url_parameters
) | [
274
] |
def METHOD_NAME(self, obj):
if obj.organization:
return OrganizationStubSerializer(obj.organization).data
return None | [
19,
1044
] |
def METHOD_NAME(geo_mean, num_runs):
return util.run_benchmark(geo_mean, num_runs, test_regex_effbot) | [
22
] |
def METHOD_NAME(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=["us-east-1", "eu-west-1"],
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info | [
0,
4331,
1422,
100
] |
f METHOD_NAME(self, chain_length, optimize_dataset): | [
1668,
422,
1927
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(session_browser):
GivenPage(session_browser.driver).opened_with_body(
'''
<ul>Hello:
<li>Yakov</li>
<li>Jakob</li>
</ul>
'''
)
session_browser.all('li').should(have.text('ako').each)
session_browser.all('li').should(have.text('').each)
# TODO: this test is a bit weird here...
# the file is called condition__collection__have_texts_test.py
# specifying that here we tests collection conditions...
# but have.text is not a collection condition
# it is an element condition applied to collection
# i am not sure that this file is a correct place for such test
# but we are going to change how a collection can apply an element condition
# to assert its content
# we either go `collection.should_each(have.text(''))` way
# or `collection.should(have.text('').each)` way
# in the latter case probably
# we will move this test to condition__element__have_texts_test.py
# in the latter case probably we keep it here
# for now let's keep it here, and then we see... | [
9,
427,
384,
526
] |
def METHOD_NAME(P, XDot_flag, XDDot_flag, accuracy):
testFailCount = 0 # zero unit test result counter
testMessages = [] # create empty array to store test log messages
X1 = np.array([0, 1, 2, 3, 4, 5, 6])
X2 = np.array([5, 4, 3, 2, 1, 0, 1])
X3 = np.array([3, 2, 1, 2, 3, 4, 5])
Input = BSpline.InputDataSet(X1, X2, X3)
Input.setT([0, 2, 3, 5, 7, 8, 10])
if XDot_flag:
Input.setXDot_0([0, 0, 0])
Input.setXDot_N([0, 0, 0])
if XDDot_flag:
Input.setXDDot_0([0, 0, 0])
Input.setXDDot_N([0.2, 0, 0])
Output = BSpline.OutputDataSet()
BSpline.interpolate(Input, 101, P, Output)
for i in range(len(Output.T)):
for j in range(len(Input.T)):
if abs(Output.T[i][0] - Input.T[j][0]) < accuracy:
if not abs(Output.X1[i][0] - X1[j]) < accuracy:
testFailCount += 1
testMessages.append("FAILED: BSpline." + " Function of order {} failed coordinate #1 check at time t = {}".format(P,Input.T[j][0]))
if not abs(Output.X2[i][0] - X2[j]) < accuracy:
testFailCount += 1
testMessages.append("FAILED: BSpline." + " Function of order {} failed coordinate #2 check at time t = {}".format(P,Input.T[j][0]))
if not abs(Output.X3[i][0] - X3[j]) < accuracy:
testFailCount += 1
testMessages.append("FAILED: BSpline." + " Function of order {} failed coordinate #3 check at time t = {}".format(P,Input.T[j][0]))
if XDot_flag:
if not ((abs(Output.XD1[0][0]-Input.XDot_0[0][0]) < accuracy) and
(abs(Output.XD2[0][0]-Input.XDot_0[1][0]) < accuracy) and
(abs(Output.XD3[0][0]-Input.XDot_0[2][0]) < accuracy)):
testFailCount += 1
testMessages.append("FAILED: BSpline." + " Function of order {} failed first derivative at starting point".format(P))
if XDDot_flag:
if not ((abs(Output.XDD1[0][0]-Input.XDDot_0[0][0]) < accuracy) and
(abs(Output.XDD2[0][0]-Input.XDDot_0[1][0]) < accuracy) and
(abs(Output.XDD3[0][0]-Input.XDDot_0[2][0]) < accuracy)):
testFailCount += 1
testMessages.append("FAILED: BSpline." + " Function of order {} failed second derivative at starting point".format(P))
return [testFailCount, ''.join(testMessages)] | [
1484,
6990,
9,
559
] |
def METHOD_NAME(self, request, view, obj=None):
if not obj:
# FIXME: For some reason this needs to return True
# because it is first called with obj=None?
return True
if getattr(view, 'is_variable_data', False):
return check_user_access(request.user, view.model, 'change', obj,
dict(variables=request.data))
else:
return check_user_access(request.user, view.model, 'change', obj,
request.data) | [
250,
1276,
804
] |
def METHOD_NAME(self, record: logging.LogRecord) -> None:
if self.shortcut:
logger.info('oci-request-reporting to elasticsearch was shortcut due to previous error')
return
method = record.__dict__.get('method')
url = record.__dict__.get('url')
try:
self.es_client.store_document(
index='oci_request',
body={
'method': method,
'url': url,
'stacktrace': traceback.format_stack(),
}
)
except:
logger.warning(traceback.format_exc())
logger.warning('could not send oci request log to elastic search')
self.shortcut = True | [
2648
] |
def METHOD_NAME(prefix: str, coder: _Coder[_T]):
return _RamdiskIndexFile(lambda index: prefix + str(index + 1), coder) | [
43,
426
] |
def METHOD_NAME(self, source: OAuthSource, request: HttpRequest) -> Challenge:
"""Pre-general all the things required for the JS SDK"""
apple_client = AppleOAuthClient(
source,
request,
callback=reverse(
"authentik_sources_oauth:oauth-client-callback",
kwargs={"source_slug": source.slug},
),
)
args = apple_client.get_redirect_args()
return AppleLoginChallenge(
instance={
"client_id": apple_client.get_client_id(),
"scope": "name email",
"redirect_uri": args["redirect_uri"],
"state": args["state"],
"type": ChallengeTypes.NATIVE.value,
}
) | [
273,
4329
] |
def METHOD_NAME(vm):
"""kill vm process
:param vm: vm object
"""
pid = vm.process.get_pid()
test.log.debug("Ending VM %s process (killing PID %s)",
vm.name, pid)
try:
utils_misc.kill_process_tree(pid, 9, timeout=60)
test.log.debug("VM %s down (process killed)", vm.name)
except RuntimeError:
test.error("VM %s (PID %s) is a zombie!"
% (vm.name, vm.process.get_pid())) | [
643,
944,
356
] |
def METHOD_NAME(**kwargs):
"""
Renews a VOMS proxy in the exact same way that :py:func:`law.wlcg.renew_vomsproxy` does, but
with the *vo* attribute set to ``"cms"`` by default.
"""
kwargs.setdefault("vo", "cms")
return law.wlcg.METHOD_NAME(**kwargs) | [
10627,
-1
] |
def METHOD_NAME(self):
"""Yields a rule-based state machine factory method."""
if self.config.getoption("capture") != "no":
# allows the state machine to disable pytest capturing
capman = self.config.pluginmanager.get_plugin("capturemanager")
_BrownieStateMachine._capman = capman
# for posix systems we disable the cursor to make the progress spinner prettier
if sys.platform != "win32":
with capman.global_and_fixture_disabled():
sys.stdout.write("\033[?25l")
sys.stdout.flush()
yield METHOD_NAME
if self.config.getoption("capture") != "no" and sys.platform != "win32":
# re-enable the cursor
with capman.global_and_fixture_disabled():
sys.stdout.write("\033[?25h")
sys.stdout.flush() | [
551,
1600
] |
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC | [
200,
1881
] |
def METHOD_NAME(project, logger, reactor):
logger.info("Running Cram command line tests")
cram_tests = list(_find_files(project))
if not cram_tests or len(cram_tests) == 0:
if project.get_property("cram_fail_if_no_tests"):
raise BuildFailedException("No Cram tests found!")
else:
return
pyb_venv = reactor.pybuilder_venv
command_and_arguments = pyb_venv.executable + _cram_command_for(project)
command_and_arguments.extend(cram_tests)
report_file = _report_file(project)
pyb_environ = pyb_venv.environ
if project.get_property("cram_run_test_from_target"):
dist_dir = project.expand_path("$dir_dist")
_prepend_path(pyb_environ, "PYTHONPATH", dist_dir)
script_dir_dist = project.get_property("dir_dist_scripts")
_prepend_path(pyb_environ, "PATH", os.path.join(dist_dir, script_dir_dist))
else:
source_dir = project.expand_path("$dir_source_main_python")
_prepend_path(pyb_environ, "PYTHONPATH", source_dir)
script_dir = project.expand_path("$dir_source_main_scripts")
_prepend_path(pyb_environ, "PATH", script_dir)
return_code = pyb_venv.execute_command(command_and_arguments,
report_file,
env=pyb_environ,
error_file_name=report_file)
if return_code != 0:
error_str = "Cram tests failed! See %s for full details:\n%s" % (report_file, tail_log(report_file))
logger.error(error_str)
raise BuildFailedException(error_str)
report = read_file(report_file)
result = report[-1][2:].strip()
logger.info("Cram tests were fine")
logger.info(result) | [
22,
10997,
450
] |
def METHOD_NAME(
self, kwargs: Dict[str, Any], tmp_path: Path
) -> None:
"""Test _validate_runtime_or_docker no runtime if Docker."""
if "file" in kwargs:
dockerfile = tmp_path / "Dockerfile"
dockerfile.touch() # file has to exist
kwargs["file"] = dockerfile
obj = AwsLambdaHookArgs(
bucket_name="test-bucket",
docker=DockerOptions.parse_obj(kwargs),
source_code=tmp_path,
)
assert not obj.runtime | [
9,
187,
1888,
894,
223,
223,
654
] |
def METHOD_NAME (self, name, attrs):
self._stack.append( (self.current, self._text_parts))
self.current = DataNode (nameChangeDict = self._ncDict)
self._text_parts = []
# xml attributes --> python attributes
for k, v in attrs.items():
self.current._add_xml_attr (TreeBuilder._name_mangle(k), v) | [
447,
669
] |
def METHOD_NAME(self, username):
conn = self._get_ldap_conn()
conn.simple_bind_s(self.instance.config.get("bind_dn"), self.instance.config.get("bind_password"))
user_dn = self._get_user_dn(username)
results = conn.search_s(user_dn, ldap.SCOPE_BASE, attrlist=["*"])
return cleanup_value(results[0][1]) | [
19,
21,
100
] |
def METHOD_NAME(self):
test_data = zip(dishes_and_appetizers, dishes_cleaned)
for variant, (item, result) in enumerate(test_data, start=1):
with self.subTest(f"variation #{variant}", inputs="dishes with appetizers", results="appetizers only"):
error_message = "Expected only appetizers returned, but some dishes remain in the group."
result_type_error = f"You returned {type(separate_appetizers(item[0], item[1]))}, but a list was expected."
self.assertIsInstance(separate_appetizers(item[0], item[1]), list, msg=result_type_error)
self.assertEqual(sorted(separate_appetizers(item[0], item[1])), (sorted(result)), msg=error_message) | [
9,
2018,
-1
] |
def METHOD_NAME(o,status):
global currentTest
if status == 0:
print "test "+str(currentTest)+" failed!"
print o
exit(1)
else:
print "test "+str(currentTest)+" succesful!"
clear_local_and_remote()
currentTest+=1
cmd_ef(CD+" /") | [
250,
1423,
61,
537
] |
async def METHOD_NAME(self, event, subscription_id=None):
if hasattr(event, 'Retain') and hasattr(event, 'NodeId'):
if event.Retain:
self._conditions[event.NodeId] = event
elif event.NodeId in self._conditions:
del self._conditions[event.NodeId]
if subscription_id is not None:
if subscription_id in self.subscriptions:
await self.subscriptions[subscription_id].monitored_item_srv.METHOD_NAME(event)
else:
for sub in self.subscriptions.values():
await sub.monitored_item_srv.METHOD_NAME(event) | [
2117,
417
] |
def METHOD_NAME(duthosts, enum_rand_one_per_hwsku_frontend_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
cmd = "sonic-db-cli CONFIG_DB hget \"CRM|Config\" {threshold_name}_{type}_threshold"
crm_res_list = ["ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor",
"nexthop_group_member", "nexthop_group", "acl_counter", "acl_entry", "fdb_entry"]
res = {}
for item in crm_res_list:
high = duthost.command(cmd.format(threshold_name=item, type="high"))["stdout_lines"][0]
low = duthost.command(cmd.format(threshold_name=item, type="low"))["stdout_lines"][0]
res[item] = {}
res[item]["high"] = high
res[item]["low"] = low
return res | [
6623,
827
] |
def METHOD_NAME(val: float, allow_negative: bool = True) -> str:
if not allow_negative and val < 0:
return "- pF"
return str(SITools.Value(val, "F", FMT_REACT)) | [
275,
12417
] |
def METHOD_NAME(
use_normalization_tables,
connection_to_group_fn,
filter_connection,
connection_to_asset_key_fn,
airbyte_instance,
):
if connection_to_group_fn:
ab_cacheable_assets = load_assets_from_airbyte_project(
file_relative_path(__file__, "./test_airbyte_project"),
create_assets_for_normalization_tables=use_normalization_tables,
connection_to_group_fn=connection_to_group_fn,
connection_filter=(lambda _: False) if filter_connection == "filter_fn" else None,
connection_directories=(
["github_snowflake_ben"] if filter_connection == "dirs" else None
),
connection_to_asset_key_fn=connection_to_asset_key_fn,
)
else:
ab_cacheable_assets = load_assets_from_airbyte_project(
file_relative_path(__file__, "./test_airbyte_project"),
create_assets_for_normalization_tables=use_normalization_tables,
connection_filter=(lambda _: False) if filter_connection == "filter_fn" else None,
connection_directories=(
["github_snowflake_ben"] if filter_connection == "dirs" else None
),
connection_to_asset_key_fn=connection_to_asset_key_fn,
)
ab_assets = ab_cacheable_assets.build_definitions(ab_cacheable_assets.compute_cacheable_data())
if filter_connection == "filter_fn":
assert len(ab_assets) == 0
return
tables = {
"dagster_releases",
"dagster_tags",
"dagster_teams",
"dagster_array_test",
"dagster_unknown_test",
} | (
{
"dagster_releases_assets",
"dagster_releases_author",
"dagster_tags_commit",
"dagster_releases_foo",
"dagster_array_test_author",
}
if use_normalization_tables
else set()
)
if connection_to_asset_key_fn:
tables = {
connection_to_asset_key_fn(
AirbyteConnectionMetadata(
"Github <> snowflake-ben", "", use_normalization_tables, []
),
t,
).path[0]
for t in tables
}
assert ab_assets[0].keys == {AssetKey(t) for t in tables}
assert all(
[
ab_assets[0].group_names_by_key.get(AssetKey(t))
== (
connection_to_group_fn("GitHub <> snowflake-ben")
if connection_to_group_fn
else "github_snowflake_ben"
)
for t in tables
]
)
assert len(ab_assets[0].op.output_defs) == len(tables)
responses.add(
method=responses.POST,
url=airbyte_instance.api_base_url + "/connections/get",
json=get_project_connection_json(),
status=200,
)
responses.add(
method=responses.POST,
url=airbyte_instance.api_base_url + "/connections/sync",
json={"job": {"id": 1}},
status=200,
)
responses.add(
method=responses.POST,
url=airbyte_instance.api_base_url + "/jobs/get",
json=get_project_job_json(),
status=200,
)
res = materialize(
with_resources(
ab_assets,
resource_defs={
"airbyte": airbyte_resource.configured(
{
"host": "some_host",
"port": "8000",
"poll_interval": 0,
}
)
},
)
)
materializations = [
event.event_specific_data.materialization
for event in res.events_for_node("airbyte_sync_87b7f")
if event.event_type_value == "ASSET_MATERIALIZATION"
]
assert len(materializations) == len(tables)
assert {m.asset_key for m in materializations} == {AssetKey(t) for t in tables} | [
9,
557,
280,
155
] |
def METHOD_NAME() -> Iterator[tuple[str, AnyDict]]:
for job in _yield_pipeline_jobs(pipeline_iid):
for ui_job_name in jobs_of_interest:
if job["name"] == ui_job_name:
yield job["name"], _get_job_ui_fixtures_results(job) | [
764,
59,
99
] |
def METHOD_NAME(df_factory):
df1 = df_factory(x=[1, 2], y=[4, 5])
df1b = df_factory(x=[1, 2], y=[4, 5])
df2 = df_factory(x=[1, 3], y=[4, 5])
assert df1.fingerprint() == df1b.fingerprint()
assert df1.fingerprint() != df2.fingerprint()
assert df1.fingerprint() == df1b.fingerprint()
df1.add_variable('q', 1) # this changes the state
assert df1.fingerprint() != df1b.fingerprint()
# but if we treeshake, it does not
assert df1.fingerprint(treeshake=True) != df1b.fingerprint() | [
9,
1616
] |
def METHOD_NAME(
multi_pvc_factory, interface, project=None, status="", storageclass=None
):
pvc_num = 1
pvc_size = 5
if interface == "CephBlockPool":
access_modes = ["ReadWriteOnce", "ReadWriteOnce-Block", "ReadWriteMany-Block"]
else:
access_modes = ["ReadWriteOnce", "ReadWriteMany"]
# Create pvcs
pvc_objs = multi_pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=pvc_size,
access_modes=access_modes,
access_modes_selection="distribute_random",
status=status,
num_of_pvc=pvc_num,
wait_each=False,
timeout=360,
)
for pvc_obj in pvc_objs:
pvc_obj.interface = interface
return pvc_objs | [
129,
16824
] |
def METHOD_NAME(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t)) | [
9,
1385
] |
def METHOD_NAME(self):
self.assertEqual(
check_cfc.get_output_file(['clang', '-o', 'test.o']), 'test.o')
self.assertEqual(
check_cfc.get_output_file(['clang', '-otest.o']), 'test.o')
self.assertIsNone(
check_cfc.get_output_file(['clang', '-gline-tables-only']))
# Can't get output file if more than one input file
self.assertIsNone(
check_cfc.get_output_file(['clang', '-c', 'test.cpp', 'test2.cpp']))
# No output file specified
self.assertIsNone(check_cfc.get_output_file(['clang', '-c', 'test.c'])) | [
9,
19,
146,
171
] |
def METHOD_NAME(self):
# type: () -> ()
"""
Implement the main query / interface of the monitor class.
In order to combine multiple Monitor objects, call `monitor_step` manually.
If Tasks are detected in this call,
:return: None
"""
previous_timestamp = self._previous_timestamp or time()
timestamp = time()
try:
# retrieve experiments orders by last update time
task_filter = {
'page_size': 100,
'page': 0,
'status_changed': ['>{}'.format(datetime.utcfromtimestamp(previous_timestamp)), ],
'project': self._get_projects_ids(),
}
task_filter.update(self.get_query_parameters())
queried_tasks = Task.get_tasks(task_name=self._task_name_filter, task_filter=task_filter)
except Exception as ex:
# do not update the previous timestamp
print('Exception querying Tasks: {}'.format(ex))
return
# process queried tasks
for task in queried_tasks:
try:
self.process_task(task)
except Exception as ex:
print('Exception processing Task ID={}:\n{}'.format(task.id, ex))
self._previous_timestamp = timestamp | [
1863,
367
] |
def METHOD_NAME(self):
def foo():
return len([1, 2, 3])
self.configure_func(foo)
self.assertEqual(foo(), 3)
with swap_item(globals(), "__builtins__", {"len": lambda x: 7}):
self.assertEqual(foo(), 3) | [
9,
2286,
369,
4298,
553,
1720,
1929
] |
def METHOD_NAME(ref_list, save_plot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
l2_err = []
for r in ref_list:
l2_err.append(compute_l2_error(r, **options))
x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1)
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'barochead'
fs_type = options['fs_type']
expected_rate = 2
def check_convergence(x_log, y_log, expected_slope, field_str, save_plot):
slope_rtol = 0.2
slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log)
if save_plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
# plot points
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = 0.05*(x_max - x_min)
n = 50
xx = numpy.linspace(x_min - offset, x_max + offset, n)
yy = intercept + slope*xx
# plot line
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[2*n/3], yy[2*n/3], '{:4.2f}'.format(slope),
verticalalignment='top',
horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(field_str)
ref_str = 'ref-' + '-'.join([str(r) for r in ref_list])
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, fs_type])
imgfile += '.png'
img_dir = create_directory('plots')
imgfile = os.path.join(img_dir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if expected_slope is not None:
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert abs(slope - expected_slope)/expected_slope < slope_rtol, err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, expected_rate, 'barochead', save_plot) | [
22,
2240
] |
def METHOD_NAME(event: PodEvent, params: JavaParams):
"""
Displays all java-toolkit debugging options for every java process
"""
pod = event.get_pod()
if not pod:
logging.info(f"Java debugging - pod not found for event: {event}")
return
if not params.interactive:
logging.info("unable to support non interactive jdk events")
return
finding = Finding(
title=f"Java debugging session on pod {pod.metadata.name} in namespace {pod.metadata.namespace}:",
source=FindingSource.MANUAL,
aggregation_key="java_process_inspector",
subject=PodFindingSubject(pod),
finding_type=FindingType.REPORT,
failure=False,
)
process_finder = ProcessFinder(pod, params, ProcessType.JAVA)
if not process_finder.matching_processes:
ERROR_MESSAGE = "No relevant processes found for java debugging."
logging.info(ERROR_MESSAGE)
finding.add_enrichment([MarkdownBlock(ERROR_MESSAGE)])
event.add_finding(finding)
return
finding.add_enrichment(
[
TableBlock(
[[proc.pid, proc.exe, " ".join(proc.cmdline)] for proc in process_finder.matching_processes],
["pid", "exe", "cmdline"],
)
]
)
finding = add_jdk_choices_to_finding(finding, params, process_finder.get_pids(), pod)
event.add_finding(finding) | [
5521,
356,
13166
] |
def METHOD_NAME(self):
"""Return a list of records as specified in the given table config."""
group_by = []
# We replace the selects from the analytics query with
# the aggregate function calls as required, but leave the
# rest intact. This way, we won't need to build a subquery
# around the whole thing and keep it relatively (!) simple
aggregate_query = self.base_table.get_query_object()
selects_by_alias = {
alias: fragment for fragment, alias in aggregate_query.select
}
new_selects = []
for field in self._fields:
key = self._sql_alias(field.alias)
# old field might be directly selected in the base query,
# in which case we need to alias its expression. If it's
# not directly in the base, it won't be in the top-level selects
# list, but will be aliased to the known name in a subquery.
old_field = selects_by_alias.get(key, key)
new_alias = self._sql_alias(field.alias)
if field.function == field.FUNCTION_VALUE:
group_by.append(old_field)
new_selects.append((old_field, new_alias))
elif field.function != field.FUNCTION_VALUE:
cast = FUNCTION_PARAMETER_CAST.get(field.function, "")
new_selects.append((f"{field.function}({old_field}{cast})", new_alias))
# TODO: instead of replacing selects, use sub-query
aggregate_query.select = new_selects
aggregate_query.group_by = group_by
aggregate_query.select_direct_only = True
sql_query, params, _ = sql.QueryRender(aggregate_query).as_sql(alias=None)
return sql_query, params | [
19,
1621,
61,
434
] |
def METHOD_NAME(
self,
queue: str,
on_message: OnMessageCallback,
) -> None:
"""Subscribe to queue with on_message callback.
Listens to an event stream and invokes on_message with an EventContext
built from the incoming message.
Args:
queue: Name of queue.
on_message: Callback to be invoked with incoming messages.
"""
callback = partial(self._on_message_handler, on_message=on_message)
while True:
try:
self.client.run(queue=queue, on_message=callback)
except (AMQPError, ChannelError):
self.client.stop()
reconnect_delay = self._get_reconnect_delay()
time.sleep(
reconnect_delay
) # todo: update this blocking call to asyncio.sleep
except KeyboardInterrupt as e:
self.client.stop()
raise KeyboardInterrupt from e
if self.client.should_reconnect:
self.client.reset()
else:
break # exit | [
1049
] |
def METHOD_NAME(self):
return "DELETE" | [
103
] |
def METHOD_NAME() -> Optional[List[str]]:
if ARG.FACE_PLUGINS not in request.values:
return []
return [
name for name in Constants.split(request.values[ARG.FACE_PLUGINS])
] | [
19,
4805,
2793,
83
] |
def METHOD_NAME(size):
da = make_histogram2d()
result = median_filter(da, size=size)
reference = da.copy()
reference.values = scipy.ndimage.median_filter(
reference.values, size=size if isinstance(size, int) else size.values()
)
assert sc.identical(result, reference) | [
9,
1318,
137,
639,
24,
5809,
1318
] |
def METHOD_NAME(self):
self.session = SessionLocal()
self.w3 = Web3(HTTPProvider())
self.gas_payer = self.w3.eth.account.from_key(DEFAULT_GAS_PAYER_PRIV)
self.w3.middleware_onion.add(
construct_sign_and_send_raw_middleware(self.gas_payer),
"construct_sign_and_send_raw_middleware",
)
self.w3.eth.default_account = self.gas_payer.address | [
0,
1
] |
def METHOD_NAME(self, nc_file):
with pytest.raises(TypeError):
NCFile.df_to_text_file(None, str(nc_file), time_in_row=False)
csv_path = nc_file.parent / (nc_file.name + '.csv')
with pytest.raises(ValueError):
NCFile.df_to_text_file(None, csv_path, time_in_row="False") | [
9,
2057,
24,
526,
171,
1096
] |
async def METHOD_NAME(self):
# Instantiate the QueueServiceClient from a connection string
from azure.storage.queue.aio import QueueServiceClient, QueueClient
queue_service = QueueServiceClient.from_connection_string(conn_str=self.connection_string)
# [START async_get_queue_client]
# Get the queue client to interact with a specific queue
queue = queue_service.get_queue_client(queue="myqueue2")
# [END async_get_queue_client] | [
19,
651,
340,
958
] |
def METHOD_NAME(master_to_slave_vars, network_features, locator, network_type
):
"""
Computes the total pump investment cost
:type master_to_slave_vars : class context
:type network_features : class ntwFeatures
:rtype pumpCosts : float
:returns pumpCosts: pumping cost
"""
if network_type == "DH":
# local variables
nominal_E_pump_W = network_features.E_pump_DHN_W
nominal_massflowrate_kgs = network_features.mass_flow_rate_DHN
#get the mass flow rate of this neworkt and the pressireloss
data = master_to_slave_vars.DH_network_summary_individual
mass_flow_rate_kgs = data["mdot_DH_netw_total_kgpers"].values
if network_type == "DC":
# local variables
nominal_E_pump_W = network_features.E_pump_DCN_W
nominal_massflowrate_kgs = network_features.mass_flow_rate_DCN
#get the mass flow rate of this neworkt and the pressireloss
data = master_to_slave_vars.DC_network_summary_individual
if master_to_slave_vars.WasteServersHeatRecovery == 1:
mass_flow_rate_kgs = data["mdot_cool_space_cooling_and_refrigeration_netw_all_kgpers"].values
else:
mass_flow_rate_kgs = data["mdot_cool_space_cooling_data_center_and_refrigeration_netw_all_kgpers"].values
#get pumping energy and peak load
E_pump_W = nominal_E_pump_W * (mass_flow_rate_kgs / nominal_massflowrate_kgs)
peak_pump_power_W = np.max(E_pump_W)
#get costs
Capex_a_pump_USD, \
Opex_fixed_pump_USD, \
Capex_pump_USD = calc_Cinv_pump(peak_pump_power_W, locator, 'PU1') # investment of Machinery
return Capex_a_pump_USD, Opex_fixed_pump_USD, Capex_pump_USD, E_pump_W | [
1407,
-1,
12525
] |
f METHOD_NAME(
self, response_stderr, expected_response_type, expected_response_code
| [
9,
128,
5674,
17,
119
] |
def METHOD_NAME(self):
callback = Callback()
self.process.activate()
self.process.registerCallback(callback)
self.process.allcallbacks("processCancelled", True)
assert callback._cancelled | [
9,
1076
] |
def METHOD_NAME(self, namespace: str) -> TypeVarLikeScope:
"""A new scope frame for binding a class. Prohibits *this* class's tvars"""
return TypeVarLikeScope(self.get_function_scope(), True, self, namespace=namespace) | [
2,
896
] |
def METHOD_NAME(self):
"""This method is not used in this implementation."""
raise NotImplementedError() | [
656
] |
def METHOD_NAME(private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vm_group_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkVMGroupResult]:
"""
NSX VM Group
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vm_group_id: NSX VM Group identifier. Generally the same as the VM Group's display name
"""
... | [
19,
2454,
1228,
944,
846,
146
] |
def METHOD_NAME(self, layername):
"""
Access a layer by its name
@param string name of the layer
@return the layer
"""
return self.playground.METHOD_NAME(layername) | [
-1
] |
def METHOD_NAME(params):
"""
Setup the huge page using given parameters
:param params: dict, parameters to setup huge page
:return: HugePageConfig object created
"""
hpc = test_setup.HugePageConfig(params)
hpc.setup()
return hpc | [
102,
15827
] |
async def METHOD_NAME(self, recipient: Player, sender: Player):
party = self.player_parties.get(sender)
if (
not party or
recipient not in party.invited_players or
party.invited_players[recipient].is_expired()
):
# TODO: Localize with a proper message
raise ClientError("You are not invited to that party (anymore)", recoverable=True)
if sender.state is PlayerState.SEARCHING_LADDER:
# TODO: Localize with a proper message
raise ClientError("That party is already in queue", recoverable=True)
old_party = self.player_parties.get(recipient)
if old_party is not None:
# Preserve state (like faction selection) from the old party
member = old_party.get_member_by_player(recipient)
assert member is not None
await self.leave_party(recipient)
party.add_member(member)
else:
party.add_player(recipient)
self.player_parties[recipient] = party
self.mark_dirty(party) | [
1437,
1048
] |
def METHOD_NAME(hive=winreg.HKEY_LOCAL_MACHINE, key=None):
if reg_key_exists(hive=hive, key=key):
parent, _, base = key.rpartition("\\")
with winreg.OpenKey(hive, parent, 0, winreg.KEY_ALL_ACCESS) as reg:
winreg.DeleteKey(reg, base) | [
34,
59
] |
def METHOD_NAME(self):
with self.assertRaises(NeptuneException):
init_model(key="MOD", mode="offline") | [
9,
8024,
854
] |
def METHOD_NAME(doc):
"""
Move 'max_num_favorites' from the 'corparch' plug-in
to the 'user_items' plug-in.
"""
srch = doc.find('plugins/corparch/max_num_favorites')
if srch is not None:
srch.getparent().remove(srch)
srch2 = doc.find('plugins/user_items')
if srch2 is not None:
new_elm = etree.SubElement(srch2, 'max_num_favorites')
new_elm.attrib['extension-by'] = 'default'
new_elm.tail = '\n ' | [
86,
1629
] |
def METHOD_NAME():
return Int(1) | [
537,
991
] |
def METHOD_NAME(content, pms = False):
# host = socket.gethostname()
host = "localhost"
port = 5236
contextSize = len(content)
preChunkSize = int(contextSize / 60000)
if pms:
print("Send Length ", contextSize, " Size ", preChunkSize)
if preChunkSize != 0:
chunks, chunk_size = len(content), len(content)//preChunkSize
for i in range(0, chunks, chunk_size):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(content[i:i+chunk_size])
s.recv(8192)
s.close()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall(content)
repr(s.recv(1024))
s.close() | [
353,
717
] |
def METHOD_NAME(source: UPDATABLE_DATA, key: str, action: str, to_set: Any, result: UPDATABLE_DATA) -> None:
"""Test that a single level apply works as expected."""
cpy = copy.deepcopy(source)
killswitch._apply(target=cpy, key=key, to_set=to_set, delete=action == 'delete')
assert cpy == result | [
9,
231
] |
def METHOD_NAME(fake_data_dir: Path, osparc_simcore_root_dir: Path) -> dict[str, Any]:
with Path.open(fake_data_dir / "test_activity_config.yml") as fh:
content = fh.read()
config = content.replace(
"${OSPARC_SIMCORE_REPO_ROOTDIR}", str(osparc_simcore_root_dir)
)
return yaml.safe_load(config) | [
991,
200
] |
def METHOD_NAME(hexdigest):
"""Gets name of the hash algorithm for a hex digest."""
bytes = len(hexdigest) / 2
if bytes not in _size_to_hash:
raise ValueError("Spack knows no hash algorithm for this digest: %s" % hexdigest)
return _size_to_hash[bytes] | [
1161,
6080,
43,
5347
] |
f METHOD_NAME(value): | [
3,
24,
201
] |
def METHOD_NAME(flux_handle, jobid=lib.FLUX_JOBID_ANY):
"""Wait for a job to complete
Submit a request to wait for job completion, blocking until a
response is received, then return the job status.
Only jobs submitted with waitable=True can be waited for.
:param flux_handle: handle for Flux broker from flux.Flux()
:type flux_handle: Flux
:param jobid: the job ID to wait for (default is any waitable job)
:returns: job status, a tuple of: Job ID (int), success (bool),
and an error (string) if success=False
:rtype: JobWaitResult
"""
future = wait_async(flux_handle, jobid)
return future.get_status() | [
618
] |
def METHOD_NAME(
self, config_command: str = "config", pattern: str = "", re_flags: int = 0
) -> str:
"""Enter configuration mode."""
return super().METHOD_NAME(
config_command=config_command, pattern=pattern, re_flags=re_flags
) | [
200,
854
] |
def METHOD_NAME(variant, attrs):
for attr, value in attrs.items():
setattr(variant, attr, value)
assert getattr(variant, attr) == value | [
9,
5439
] |
def METHOD_NAME():
return _(
"Manage the default Python interpreter used to run, analyze and "
"profile your code in Spyder."
) | [
19,
1067
] |
def METHOD_NAME(transaction_id, params: Incomplete | None = None): ... | [
86,
2051
] |
def METHOD_NAME(instance, subpath, filename):
return './sapl/audiencia/%s/%s/%s' % (instance.numero, subpath, filename) | [
19,
-1,
1091,
157
] |
def METHOD_NAME(self, items, namespace="default"):
"""Delete resources in kubernetes cluster
:returns: None if succeed
:raises: KubectlError or KubectlExecutionError
"""
for item in items:
# 针对chart中以`-`开头的资源名称,进行忽略,防止出现删除不掉的情况
# TODO: 如果后续有其它场景,再以正则进行完整校验
if item["name"].startswith("-"):
continue
cmd_arguments = self.kubectl_base + [
"--namespace=%s" % namespace,
"delete",
"--ignore-not-found=true",
item["kind"],
item["name"],
]
try:
self._run_command_with_retry(max_retries=0, cmd_arguments=cmd_arguments)
except KubectlExecutionError as e:
if "the server doesn't have a resource type " in e.output:
pass
else:
raise | [
34,
206,
604,
206
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.