text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, image_path):
"""
Tests the response plot.
"""
sta = read_inventory()[0][0]
with WarningsCapture():
sta.plot(0.05, channel="*[NE]", plot_degrees=True,
outfile=image_path) | [
9,
17,
1288,
9130
] |
def METHOD_NAME() -> None:
self.client.pop_state() # close confirmation menu | [
2927,
3485
] |
def METHOD_NAME(self):
dt = np.datetime64('2000-01', ('M', 2))
assert np.datetime_data(dt) == ('M', 2)
with pytest.raises(TypeError):
np.datetime64('2000', garbage=True) | [
9,
884
] |
async def METHOD_NAME(self, options: scrypted_sdk.ImageOptions = None) -> Any:
if options and options.get('format', None):
raise Exception('format can only be used with toBuffer')
newVipsImage = await self.toImageInternal(options)
return await createImageMediaObject(newVipsImage) | [
24,
660
] |
def METHOD_NAME(self, path, default=False):
# Return text or False
items = self._xpath(path, namespaces=self._ns)
return items[0].text if items else default | [
4855,
526
] |
METHOD_NAME( self ) : | [
1186,
1462
] |
def METHOD_NAME(self, epoch, logs=None):
"""
Run on end of each epoch
"""
if logs is None:
logs = dict()
logger.debug(logs)
# TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy`
if 'val_acc' in logs:
nni.report_intermediate_result(logs['val_acc'])
else:
nni.report_intermediate_result(logs['val_accuracy']) | [
69,
1165,
1798
] |
def METHOD_NAME(self) -> Sequence[str]:
"""
Set of S3 canonical user IDs of the matched origin access identities.
"""
return pulumi.get(self, "s3_canonical_user_ids") | [
607,
6208,
21,
308
] |
def METHOD_NAME():
num_deleted = delete_old_traffic_data(session.session)
print(f"Deleted old traffic data ({num_deleted} rows)") | [
188,
2228,
2219,
365
] |
def METHOD_NAME(self):
PyPIRCCommand.METHOD_NAME(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None | [
15,
1881
] |
def METHOD_NAME(self):
self.test_chains_sync_when_long_enough()
self.test_large_reorgs_can_succeed()
self.test_peerinfo_includes_headers_presync_height() | [
22,
9
] |
def METHOD_NAME(self) -> None:
... | [
624
] |
def METHOD_NAME(self, widget):
self.fileselectionDialog = gui.FileSelectionDialog('File Selection Dialog', 'Select an image file', False, '.')
self.fileselectionDialog.confirm_value.do(
self.on_image_file_selected)
self.fileselectionDialog.cancel_dialog.do(
self.on_dialog_cancel)
# here is shown the dialog as root widget
self.fileselectionDialog.show(self) | [
2470,
1452,
2859
] |
def METHOD_NAME(cls: Type[Self]) -> Type[SinkConfig]:
config_class = get_class_from_annotation(cls, Sink, ConfigModel)
assert config_class, "Sink subclasses must define a config class"
return cast(Type[SinkConfig], config_class) | [
19,
200,
2
] |
def METHOD_NAME(self):
zone = get_thermal_zone(self.location_site)
self.assertEqual(zone, 'upper') | [
9,
19,
4469,
2456
] |
def METHOD_NAME(self):
"""Test generate dockerfile."""
# prepare
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
target_dockerfile = os.path.join(test_data_dir,
'component.temp.dockerfile')
golden_dockerfile_payload_one = '''\ | [
9,
567,
7215
] |
def METHOD_NAME(self, element, mapping):
# parse the mapping
out = element.parse()
assert out.mapping == mapping | [
9,
53,
445
] |
def METHOD_NAME(self):
with pytest.raises(
exceptions.InvalidSyntax,
match=re.escape(r"version string '5.0.0a-SNAPSHOT' does not conform to pattern "
r"'^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$'")
):
versions.VersionVariants("5.0.0a-SNAPSHOT") | [
9,
295,
3795,
532,
281,
3224
] |
def METHOD_NAME(samp_eve):
value = "value"
header_without_prefix = f"{HOST}={value}_1 header={value}_2"
event_body = "event body"
event, metadata, key_fields = samp_eve.update_metadata(
f"***SPLUNK*** {header_without_prefix}\n{event_body}",
{HOST: f"{HOST}_{value}"},
{},
)
assert event == event_body
assert metadata == {HOST: f"{HOST}_{HOST}_{value}", "header": f"{value}_2"}
assert key_fields == {HOST: [f"{HOST}_{HOST}_{value}"]} | [
9,
86,
773
] |
def METHOD_NAME(self):
defs = Defs() + (Suite('s') + Family('f').add((Task('t') + Edit(var="1") + RepeatDate("YMD", 20100111, 20100115, 2)),
(Task('t2') + Edit(var="1") + RepeatDateList("YMD",[20100111, 20100115 ]))))
defs.s.f.t += Meter("meter",0,100)
defs.s.f.t += Event("event")
defs.s.f.t += Limit("limitx",10)
#PrintStyle.set_style(Style.STATE)
#print(defs)
self.assertTrue(defs.ECF_MICRO, "expected generated variable")
self.assertTrue(defs.ECF_HOME, "expected generated variable")
self.assertTrue(defs.ECF_JOB_CMD , "expected generated variable")
self.assertTrue(defs.ECF_KILL_CMD , "expected generated variable")
self.assertTrue(defs.ECF_STATUS_CMD , "expected generated variable")
self.assertTrue(defs.ECF_URL_CMD , "expected generated variable")
self.assertTrue(defs.ECF_LOG , "expected generated variable")
self.assertTrue(defs.ECF_INTERVAL , "expected generated variable")
self.assertTrue(defs.ECF_LISTS , "expected generated variable")
self.assertTrue(defs.ECF_CHECK , "expected generated variable")
self.assertTrue(defs.ECF_CHECKOLD , "expected generated variable")
self.assertTrue(defs.ECF_CHECKINTERVAL , "expected generated variable")
self.assertTrue(defs.ECF_CHECKMODE , "expected generated variable")
self.assertTrue(defs.ECF_TRIES , "expected generated variable")
self.assertTrue(defs.ECF_VERSION , "expected generated variable")
self.assertTrue(defs.ECF_PORT , "expected generated variable")
self.assertTrue(defs.ECF_HOST , "expected generated variable")
self.assertTrue(defs.s.SUITE, "expected generated variable")
self.assertEqual(defs.s.SUITE.value() , 's', "expected suite name of 's' but found")
self.assertTrue(defs.s.ECF_DATE , "expected generated variable")
self.assertTrue(defs.s.YYYY , "expected generated variable")
self.assertTrue(defs.s.DOW , "expected generated variable")
self.assertTrue(defs.s.DOY , "expected generated variable")
self.assertTrue(defs.s.DATE , "expected generated variable")
self.assertTrue(defs.s.DAY , "expected generated variable")
self.assertTrue(defs.s.DD , "expected generated variable")
self.assertTrue(defs.s.MM , "expected generated variable")
self.assertTrue(defs.s.MONTH , "expected generated variable")
self.assertTrue(defs.s.ECF_CLOCK , "expected generated variable")
self.assertTrue(defs.s.ECF_TIME , "expected generated variable")
self.assertTrue(defs.s.TIME , "expected generated variable")
self.assertTrue(defs.s.f.FAMILY , "expected generated variable")
self.assertTrue(defs.s.f.FAMILY1 , "expected generated variable")
self.assertTrue(defs.s.f.t.TASK , "expected generated variable")
self.assertEqual(defs.s.f.t.TASK.value() , 't', "expected task name of 's'")
self.assertTrue(defs.s.f.t.ECF_JOB , "expected generated variable")
self.assertTrue(defs.s.f.t.ECF_SCRIPT , "expected generated variable")
self.assertTrue(defs.s.f.t.ECF_JOBOUT , "expected generated variable")
self.assertTrue(defs.s.f.t.ECF_TRYNO , "expected generated variable")
self.assertEqual(defs.s.f.t.ECF_TRYNO.value() , '0', "expected task try no of '0'")
self.assertTrue(defs.s.f.t.ECF_RID , "expected generated variable")
self.assertTrue(defs.s.f.t.ECF_NAME , "expected generated variable")
self.assertEqual(defs.s.f.t.ECF_NAME.value() , '/s/f/t', "expected task ECF_NAME of '/s/f/t'")
self.assertTrue(defs.s.f.t.ECF_PASS , "expected generated variable")
self.assertEqual(defs.s.f.t.YMD.value() , '20100111', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.YMD_YYYY.value() , '2010', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.YMD_MM.value() , '1', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.YMD_DD.value() , '11', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.YMD_DOW.value() , '1', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.YMD_JULIAN.value() , '2455208', "expected generated YMD of value")
self.assertEqual(defs.s.f.t.event.value() , 0, "expected generated event of value 0 but found " + str(defs.s.f.t.event.value()))
self.assertEqual(defs.s.f.t.meter.value() , 0, "expected generated meter of value 0 but found " + str(defs.s.f.t.meter.value()))
self.assertEqual(defs.s.f.t.limitx.value() , 0, "expected generated limit of value 0 but found " + str(defs.s.f.t.limitx.value()))#
self.assertEqual(defs.s.f.t2.YMD.value() , '20100111', "expected generated YMD of value")
self.assertEqual(defs.s.f.t2.YMD_YYYY.value() , '2010', "expected generated YMD of value")
self.assertEqual(defs.s.f.t2.YMD_MM.value() , '1', "expected generated YMD of value")
self.assertEqual(defs.s.f.t2.YMD_DD.value() , '11', "expected generated YMD of value")
self.assertEqual(defs.s.f.t2.YMD_DOW.value() , '1', "expected generated YMD of value")
self.assertEqual(defs.s.f.t2.YMD_JULIAN.value() , '2455208', "expected generated YMD of value") | [
9,
19,
864,
4207,
2045
] |
def METHOD_NAME(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy | [
-1
] |
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset",
**self.url_parameters
) | [
274
] |
def METHOD_NAME(self, task_type, input_string, input_string2=None, entity1=None, entity2=None, max_predict_len=None):
max_predict_len = max_predict_len if max_predict_len is not None else self.max_predict_len
input_text = create_input_with_prompt(task_type, input_string, input_string2, entity1, entity2)
# tokenize
encodings = self.tokenizer(input_text, max_seq_len=512)
encodings = {k: paddle.to_tensor(v) for k, v in encodings.items()}
outputs = self.model.METHOD_NAME(**encodings, max_length=max_predict_len)[0]
dec_out = list(map(self.token_decode, outputs))
output = self.pick_most_common(dec_out)
print("input_text:", input_text[0])
print("output:", output)
print("=" * 50)
return output | [
567
] |
def METHOD_NAME(network_security_perimeter_name: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNspProfileResult:
"""
Gets the specified NSP profile.
Azure REST API version: 2021-02-01-preview.
:param str network_security_perimeter_name: The name of the network security perimeter.
:param str profile_name: The name of the NSP profile.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkSecurityPerimeterName'] = network_security_perimeter_name
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getNspProfile', __args__, opts=opts, typ=GetNspProfileResult).value
return AwaitableGetNspProfileResult(
access_rules_version=pulumi.get(__ret__, 'access_rules_version'),
diagnostic_settings_version=pulumi.get(__ret__, 'diagnostic_settings_version'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type')) | [
19,
5921,
337
] |
def METHOD_NAME(self):
return self.value[0][3] | [
4065
] |
def METHOD_NAME(d1, d2):
"""
dictionary merge.
d1 is the default source. Leaf values from d2 will override.
d1 is the 'default' source; leaf values from d2 will override.
Returns the merged tree.
Set a leaf in d2 to None to create a tombstone (discard any key
from d1).
if a (sub) key in d1, d2 differ in type (dict vs. non-dict) then
the merge will proceed with the non-dict promoted to a dict using
the default-key schema ('='). Consumers of this function should be
prepared to handle such keys.
"""
merged = {}
q = [(d1, d2, merged)]
while True:
if not q: break
c1, c2, c3 = q.pop(0)
# add in non-overlapping keys
# 'None' keys from p2 are tombstones
s1 = set(c1.keys())
s2 = set(c2.keys())
for k in s1.difference(s2):
v = c1[k]
if type(v) == dict:
c3.setdefault(k, {})
q.append((v, {}, c3[k],))
else:
c3.setdefault(k, v)
for k in s2.difference(s1):
v = c2[k]
if v is None: continue
if type(v) == dict:
c3.setdefault(k, {})
q.append(({}, v, c3[k],))
else:
c3.setdefault(k, v)
# handle overlapping keys
for k in s1.intersection(s2):
v1 = c1[k]
v2 = c2[k]
if v2 is None: continue
# two dicts, key-by-key reconciliation required
if type(v1) == dict and type(v2) == dict:
c3.setdefault(k, {})
q.append((v1, v2, c3[k],))
continue
# two non-dicts, p2 wins
if type(v1) != dict and type(v2) != dict:
c3[k] = v2
continue
if type(v1) != dict:
v1 = { '=' : v1, }
if type(v2) != dict:
v2 = { '=' : v2, }
c3.setdefault(k, {})
q.append((v1, v2, c3[k],))
return merged | [
-1
] |
def METHOD_NAME(evm: Evm, amount: Uint) -> None:
"""
Subtracts `amount` from `evm.gas_left`.
Parameters
----------
evm :
The current EVM.
amount :
The amount of gas the current operation requires.
"""
if evm.gas_left < amount:
raise OutOfGasError
else:
evm.gas_left -= U256(amount) | [
4503,
1921
] |
def METHOD_NAME():
context = context_wrap(KDUMP_WITH_EQUAL)
kd = kdump.KDumpConf(context)
assert '10.209.136.62' == kd.ip
context = context_wrap(KDUMP_MATCH_1)
kd = kdump.KDumpConf(context)
assert kd.ip is None | [
9,
19,
1213
] |
def METHOD_NAME(self):
assert len(self.parser.find_dir("MOD_DIRECTIVE")) == 2
assert len(self.parser.find_dir("INVALID_MOD_DIRECTIVE")) == 0 | [
9,
756,
-1
] |
def METHOD_NAME(self, method):
inp = self.descriptor.FindMethodByName(method).input_type
return Field.to_json(inp) | [
19,
362,
763
] |
def METHOD_NAME(self):
ns = Namespace(system_assigned=False,
assign_identity=False)
with self.assertRaises(InvalidArgumentValueError) as context:
validate_create_app_with_system_identity_or_warning(ns)
self.assertTrue('Parameter "system-assigned" should not use together with "assign-identity".' in str(context.exception)) | [
9,
4206,
511,
822
] |
def METHOD_NAME(
saas_file_changetype: ChangeTypeV1, saas_file: StubFile
) -> None:
"""
in this testcase, the schema of the bundle change and the schema of the
change types do not match and hence no file context is extracted.
"""
saas_file.fileschema = "/some/other/schema.yml"
bundle_change = saas_file.create_bundle_change(
{"resourceTemplates[0].targets[0].ref": "new-ref"}
)
ctp = change_type_to_processor(saas_file_changetype)
file_refs = ctp.find_context_file_refs(
FileChange(
file_ref=bundle_change.fileref,
old=bundle_change.old,
new=bundle_change.new,
),
set(),
)
assert not file_refs | [
9,
297,
198,
171,
2925,
280,
1727
] |
def METHOD_NAME(self):
return self._a_data | [
385,
365
] |
def METHOD_NAME(self):
"""This tests if Python module works"""
find_program("ls", "--version")
write_command(
"r.category",
map=self.raster,
rules="-",
stdin="1:kůň\n2:kráva\n3:ovečka\n4:býk",
separator=":",
)
res = read_command("r.category", map=self.raster, separator=":").strip()
self.assertEqual(res, "1:kůň\n2:kráva\n3:ovečka\n4:býk")
self.assertIsInstance(res, str) | [
9,
77,
415,
774
] |
def METHOD_NAME(self) -> bool:
return self.vidx < self.vlen | [
220,
4138
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME():
array1 = ak.operations.from_iter(
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}], highlevel=False
)
assert to_list(array1) == [
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
array2 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"z",
)
assert to_list(array2) == [
{"x": 1, "y": 1.1, "z": []},
{"x": 2, "y": 2.2, "z": [1]},
{"x": 3, "y": 3.3, "z": [2, 2]},
]
array3 = ak.operations.with_field(
array1, ak.operations.from_iter([[], [1], [2, 2]], highlevel=False)
)
assert to_list(array3) == [
{"x": 1, "y": 1.1, "2": []},
{"x": 2, "y": 2.2, "2": [1]},
{"x": 3, "y": 3.3, "2": [2, 2]},
]
array3 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"0",
)
assert to_list(array3) == [
{"x": 1, "y": 1.1, "0": []},
{"x": 2, "y": 2.2, "0": [1]},
{"x": 3, "y": 3.3, "0": [2, 2]},
]
array1 = ak.operations.from_iter([(1, 1.1), (2, 2.2), (3, 3.3)], highlevel=False)
assert to_list(array1) == [(1, 1.1), (2, 2.2), (3, 3.3)]
array2 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"z",
)
assert to_list(array2) == [
{"0": 1, "1": 1.1, "z": []},
{"0": 2, "1": 2.2, "z": [1]},
{"0": 3, "1": 3.3, "z": [2, 2]},
]
array3 = ak.operations.with_field(
array1, ak.operations.from_iter([[], [1], [2, 2]], highlevel=False)
)
assert to_list(array3) == [(1, 1.1, []), (2, 2.2, [1]), (3, 3.3, [2, 2])]
array3 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"0",
)
assert to_list(array3) == [
{"0": [], "1": 1.1},
{"0": [1], "1": 2.2},
{"0": [2, 2], "1": 3.3},
]
array3 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"1",
)
assert to_list(array3) == [
{"0": 1, "1": []},
{"0": 2, "1": [1]},
{"0": 3, "1": [2, 2]},
]
array3 = ak.operations.with_field(
array1,
ak.operations.from_iter([[], [1], [2, 2]], highlevel=False),
"100",
)
assert to_list(array3) == [
{"0": 1, "1": 1.1, "100": []},
{"0": 2, "1": 2.2, "100": [1]},
{"0": 3, "1": 3.3, "100": [2, 2]},
] | [
9,
148
] |
def METHOD_NAME(self, mock_ast_parse):
command = "variable.method()"
mock_ast_parse.assert_not_called()
SampleCodeValidator(command)
mock_ast_parse.assert_called_with(command, mode="eval") | [
9,
7745,
1319,
259,
623,
1171,
854
] |
def METHOD_NAME(
data_folder,
save_json_train,
save_json_valid,
save_json_test,
split_ratio=[80, 10, 10],
):
"""
Prepares the json files for the Mini Librispeech dataset.
Downloads the dataset if it is not found in the `data_folder`.
Arguments
---------
data_folder : str
Path to the folder where the Mini Librispeech dataset is stored.
save_json_train : str
Path where the train data specification file will be saved.
save_json_valid : str
Path where the validation data specification file will be saved.
save_json_test : str
Path where the test data specification file will be saved.
split_ratio: list
List composed of three integers that sets split ratios for train, valid,
and test sets, respectively. For instance split_ratio=[80, 10, 10] will
assign 80% of the sentences to training, 10% for validation, and 10%
for test.
Example
-------
>>> data_folder = '/path/to/mini_librispeech'
>>> prepare_mini_librispeech(data_folder, 'train.json', 'valid.json', 'test.json')
"""
# Check if this phase is already done (if so, skip it)
if skip(save_json_train, save_json_valid, save_json_test):
logger.info("Preparation completed in previous run, skipping.")
return
# If the dataset doesn't exist yet, download it
train_folder = os.path.join(data_folder, "LibriSpeech", "train-clean-5")
if not check_folders(train_folder):
download_mini_librispeech(data_folder)
# List files and create manifest from list
logger.info(
f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}"
)
extension = [".flac"]
wav_list = get_all_files(train_folder, match_and=extension)
# Random split the signal list into train, valid, and test sets.
data_split = split_sets(wav_list, split_ratio)
# Creating json files
create_json(data_split["train"], save_json_train)
create_json(data_split["valid"], save_json_valid)
create_json(data_split["test"], save_json_test) | [
123,
9027,
10302
] |
def METHOD_NAME(ivp, steprule_large, prior_iwp, diffusion_model):
return diffeq.odefilter.ODEFilter(
steprule=steprule_large,
prior_process=prior_iwp,
diffusion_model=diffusion_model,
init_routine=diffeq.odefilter.init_routines.Stack(),
with_smoothing=False,
) | [
1964,
10531,
1953,
367
] |
def METHOD_NAME(params: Dict[str, Any]) -> bool:
if "after" in params:
return False
if "before" in params:
return False
return True | [
434,
472,
-1
] |
def METHOD_NAME(self, win=None, keepMatrix=False):
if self.debug:
# Only draw if in debug mode
ShapeStim.METHOD_NAME(self, win=win, keepMatrix=keepMatrix) | [
1100
] |
def METHOD_NAME(env):
if hasattr(env, "agents"):
for i, agent in enumerate(env.agents):
env.action_space(agent).seed(42 + i) | [
484,
1006,
1041
] |
def METHOD_NAME(
self,
) -> Tuple[PaginatedList[NamedUser], PaginatedList[Team]]: ... | [
19,
4381,
311
] |
def METHOD_NAME(self):
"""This is run at the beginning, needs to be called explicitly. It verifies that the router is connected
properly"""
print('')
CTRexScenario.emu_init_error = 'Unknown error'
if not self.is_loopback:
# Try to configure the DUT device.
try:
self.config_dut()
except Exception as e:
CTRexScenario.emu_init_error = 'Could not configure the DUT device, err: %s' % e
self.fail(CTRexScenario.emu_init_error)
return
print('Configured DUT')
# The DUT device is configured, let's attempt to start TRex.
try:
self.start_trex()
except Exception as e:
CTRexScenario.emu_init_error = 'Could not start TRex, err: %s' % e
self.fail(CTRexScenario.emu_init_error)
return
print('Started TRex')
# Let's attempt to connect to the TRex server we just started.
if not self.connect():
CTRexScenario.emu_init_error = 'Client could not connect to the server.'
self.fail(CTRexScenario.emu_init_error)
return
print('Connected')
#update elk const object
if self.elk:
self.update_elk_obj()
CTRexScenario.emu_init_error = None | [
9,
1939
] |
def METHOD_NAME(edges, reverse=True, flip=True):
result = []
input = reversed(edges) if reverse else edges
for edge in input:
if flip:
edge.reverse()
result.append(edge)
return result | [
1354,
491
] |
async def METHOD_NAME(self):
"""Close previously-opened store, removing it if so configured."""
if self.store:
await self.store.METHOD_NAME(remove=self.config.auto_remove)
self.store = None | [
1462
] |
def METHOD_NAME(self, arch):
env = self.get_recipe_env(arch)
recipe_build_dir = self.get_build_dir(arch.arch)
# Create a subdirectory to actually perform the build
build_dir = join(recipe_build_dir, self.build_subdir)
ensure_dir(build_dir)
# Configure the build
with current_directory(build_dir):
if not Path('config.status').exists():
shprint(sh.Command(join(recipe_build_dir, 'configure')), _env=env)
with current_directory(recipe_build_dir):
# Create the Setup file. This copying from Setup.dist is
# the normal and expected procedure before Python 3.8, but
# after this the file with default options is already named "Setup"
setup_dist_location = join('Modules', 'Setup.dist')
if Path(setup_dist_location).exists():
shprint(sh.cp, setup_dist_location,
join(build_dir, 'Modules', 'Setup'))
else:
# Check the expected file does exist
setup_location = join('Modules', 'Setup')
if not Path(setup_location).exists():
raise BuildInterruptingException(
SETUP_DIST_NOT_FIND_MESSAGE
)
shprint(sh.make, '-j', str(cpu_count()), '-C', build_dir, _env=env)
# make a copy of the python executable giving it the name we want,
# because we got different python's executable names depending on
# the fs being case-insensitive (Mac OS X, Cygwin...) or
# case-sensitive (linux)...so this way we will have an unique name
# for our hostpython, regarding the used fs
for exe_name in ['python.exe', 'python']:
exe = join(self.get_path_to_python(), exe_name)
if Path(exe).is_file():
shprint(sh.cp, exe, self.python_exe)
break
self.ctx.hostpython = self.python_exe | [
56,
2837
] |
def METHOD_NAME(
self, pipelines: List[DialogModelingPipeline]):
result = {}
pipeline_len = len(pipelines)
for step, item in enumerate(self.test_case['sng0073']['log']):
user = item['user']
print('user: {}'.format(user))
result = pipelines[step % pipeline_len]({
'user_input': user,
'history': result
})
print('response : {}'.format(result[OutputKeys.OUTPUT])) | [
567,
61,
38,
1301,
17
] |
def METHOD_NAME(self, keyid, keyserver=None):
"""
This function adds imports repository keys from keyserver.
default keyserver is hkp://keyserver.ubuntu.com
user can provide other keyserver with keyserver="hkp://xyz"
"""
try:
WOShellExec.cmd_exec(
self, "apt-key adv --keyserver {serv}"
.format(serv=(keyserver or
"hkp://keyserver.ubuntu.com")) +
" --recv-keys {key}".format(key=keyid))
except Exception as e:
Log.debug(self, "{0}".format(e))
Log.error(self, "Unable to import repo key") | [
238,
59
] |
def METHOD_NAME(parameters: JSONMapping | None) -> bool:
"""
Args:
parameters (dict or None): parameters dictionary, or None
Return True if the parameters dictionary is considered empty, either because it is
None, or because it does not have any meaningful (non-None) values; otherwise,
return False.
"""
if parameters is None:
return True
for item in parameters.values():
if item is not None:
return False
return True | [
386,
472,
35
] |
def METHOD_NAME():
env_template = {
"WEBSITE_RESOURCE_GROUP": "resource_group",
"WEBSITE_SITE_NAME": "site_name",
"WEBSITE_INSTANCE_ID": "instance_id",
}
for owner_name in (
"f5940f10-2e30-3e4d-a259-63451ba6dae4+elastic-apm-AustraliaEastwebspace",
"f5940f10-2e30-3e4d-a259-63451ba6dae4+appsvc_linux_australiaeast-AustraliaEastwebspace-Linux",
):
env = env_template.copy()
env["WEBSITE_OWNER_NAME"] = owner_name
with mock.patch.dict(os.environ, env):
metadata = elasticapm.utils.cloud.azure_app_service_metadata()
assert metadata == {
"account": {"id": "f5940f10-2e30-3e4d-a259-63451ba6dae4"},
"instance": {"id": "instance_id", "name": "site_name"},
"project": {"name": "resource_group"},
"provider": "azure",
"region": "AustraliaEast",
} | [
9,
1507,
991,
549,
773
] |
def METHOD_NAME(self, connection, **kw):
rp = connection.exec_driver_sql("SHOW schemas")
return [r[0] for r in rp] | [
19,
135,
83
] |
def METHOD_NAME(monster):
mon_json = {
'unit_id': monster.com2us_id if monster.com2us_id else random.randint(1, 999999999),
'unit_master_id': monster.monster.com2us_id,
'building_id': 1234567890 if monster.in_storage else 0,
'island_id': 0,
'homunculus': 1 if monster.monster.homunculus else 0,
'attribute': element_map[monster.monster.element],
'unit_level': monster.level,
'class': monster.stars,
'con': monster.base_hp / 15,
'def': monster.base_defense,
'atk': monster.base_attack,
'spd': monster.base_speed,
'critical_rate': monster.base_crit_rate,
'critical_damage': monster.base_crit_damage,
'accuracy': monster.base_accuracy,
'resist': monster.base_resistance,
'skills': [],
'runes': [],
'artifacts': [],
}
# Fill in skills
skill_levels = [
monster.skill_1_level,
monster.skill_2_level,
monster.skill_3_level,
monster.skill_4_level,
]
for idx, skill in enumerate(monster.monster.skills.all().order_by('slot')):
mon_json['skills'].append([
skill.com2us_id,
skill_levels[idx]
])
# Fill in runes and artifacts
for rune in monster.runes.all():
mon_json['runes'].append(_convert_rune_to_win10_json(rune))
for artifact in monster.artifacts.all():
mon_json['artifacts'].append(_convert_artifact_to_win10_json(artifact))
return mon_json | [
197,
1678,
24,
1679,
763
] |
def METHOD_NAME():
hooks = [mock.Mock("hook%d" % _) for _ in range(2)]
lo = min(linenos(injection_target))
lines = list(range(lo, lo + 2))
hook_data = list(zip(hooks, lines, hooks))
failed = inject_hooks(injection_target, hook_data)
assert failed == []
invalid_hook = (hooks[-1], lines[-1] + 200, hooks[-1])
hook_data.append(invalid_hook)
failed = eject_hooks(injection_target, hook_data)
assert failed == [invalid_hook]
assert injection_target(1, 2) == (2, 1)
for hook in hooks:
hook.assert_not_called() | [
9,
4601,
4602,
3368,
532
] |
def METHOD_NAME(cls,
items: 'List[StatDict]',
agg_mode: str = AGG_DEFAULT,
):
"""
Aggregates array of `StatDict` items by a given `mode`
"""
aggregated_stat = cls()
# Return empty item if items array is empty
if not items or len(items) == 0:
return aggregated_stat
gpu_stats = []
for s in items:
# Collect system stats
for k in s.system.keys():
aggregated_stat.system.setdefault(k, [])
aggregated_stat.system[k].append(s.system[k])
# Collect GPU device stats
for stat_item_gpu_idx in range(len(s.gpus)):
stat_item_gpu_stat = s.gpus[stat_item_gpu_idx]
if len(gpu_stats) == stat_item_gpu_idx:
gpu_stats.append({})
for gpu_stat_key in stat_item_gpu_stat.keys():
gpu_stat = stat_item_gpu_stat[gpu_stat_key]
gpu_stats[stat_item_gpu_idx].setdefault(gpu_stat_key, [])
gpu_stats[stat_item_gpu_idx][gpu_stat_key].append(gpu_stat)
# Aggregate system stats
for k in aggregated_stat.system.keys():
aggregated_stat.system[k] = cls.aggregate(aggregated_stat.system[k],
agg_mode)
# Aggregate GPU device stats
for g in range(len(gpu_stats)):
for k in gpu_stats[g].keys():
gpu_stats[g][k] = cls.aggregate(gpu_stats[g][k], agg_mode)
aggregated_stat.gpu = gpu_stats
return aggregated_stat | [
3428,
1768
] |
def METHOD_NAME(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="keyword1", regexp="`[^`]+`") | [
3183,
11258
] |
def METHOD_NAME(self):
tbl = Table({"a": [True]})
assert tbl.schema() == {"a": bool}
tbl.update({"a": np.array(["False"])})
assert tbl.view().to_dict() == {"a": [True, False]} | [
9,
86,
2212,
863,
3
] |
def METHOD_NAME(guid, chat_flags, message, chat_type, lang, channel=None):
message_bytes = PacketWriter.string_to_bytes(message)
data = pack('<BI', chat_type, lang)
if not channel:
data += pack('<Q', guid)
else:
channel_bytes = PacketWriter.string_to_bytes(channel)
data += pack(f'<{len(channel_bytes)}sQ', channel_bytes, guid)
data += pack(f'<{len(message_bytes)}sB', message_bytes, chat_flags)
return PacketWriter.get_packet(OpCode.SMSG_MESSAGECHAT, data) | [
19,
277,
5788
] |
def METHOD_NAME(omni_con, table_name, data):
num_rows = len(data.index)
base_insert_sql = "INSERT INTO " + table_name + "(a, b, c, d) VALUES ({0}, {1}, {2}, {3})"
insert_statements = []
for r in range(num_rows):
insert_statements.append(base_insert_sql.format(data.iat[r,0], data.iat[r,1], data.iat[r,2], data.iat[r,3]))
start_time = time.perf_counter()
for r in range(num_rows):
omni_con.query(insert_statements[r])
end_time = time.perf_counter()
time_diff = end_time - start_time
rows_per_second = num_rows / time_diff
print("Streaming – SQL Inserts: {0} rows in {1} seconds at {2} rows/sec".format(num_rows, time_diff, rows_per_second)) | [
7608,
3560,
1621,
10196
] |
def METHOD_NAME(self) -> BinaryType:
return self._type | [
44
] |
def METHOD_NAME(self, poly, x, y, w, h):
poly = np.array(poly)
if poly[:, 0].max() < x or poly[:, 0].min() > x + w:
return True
if poly[:, 1].max() < y or poly[:, 1].min() > y + h:
return True
return False | [
137,
9229,
261,
1539
] |
async def METHOD_NAME(coros, mock_logger):
results = await logged_gather(*coros, reraise=False, log=mock_logger)
assert results[0] == 0
assert isinstance(results[1], ValueError)
assert results[2] == 2
assert isinstance(results[3], RuntimeError)
assert isinstance(results[4], ValueError)
assert results[5] == 5 | [
9,
2717,
1432,
2137,
3940
] |
def METHOD_NAME(self):
# divset has divisions and no related geographies but has end date
# normally this setup would import fine, but we're going to
# set an end date on the division set so it should fail
self.valid_divset.end_date = "2018-05-02"
self.valid_divset.save()
with self.assertRaises(Exception):
self.run_import_with_test_data(self.valid_org_code, {}) | [
9,
-1,
220,
1798,
153
] |
f METHOD_NAME(self): | [
9,
1496
] |
async def METHOD_NAME():
pool = await mo.create_actor_pool("127.0.0.1", n_process=0)
async with pool:
yield pool | [
7675,
1567
] |
def METHOD_NAME(filename_json):
"""Reading and writing layers with "Unknown" (i.e. any) geometry type"""
# write a layer with a mixture of geometry types
schema = {"geometry": "Unknown", "properties": properties}
with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection:
write_point(collection)
write_linestring(collection)
write_polygon(collection)
write_geometrycollection(collection)
write_null(collection)
with pytest.raises(GeometryTypeValidationError):
write_invalid(collection)
# copy the features to a new layer, reusing the layers metadata
with fiona.open(filename_json, "r", driver="GeoJSON") as src:
filename_dst = filename_json.replace(".json", "_v2.json")
assert src.schema["geometry"] == "Unknown"
with fiona.open(filename_dst, "w", **src.meta) as dst:
dst.writerecords(src) | [
9,
46
] |
def METHOD_NAME(self, expected, received):
Ntest = 50
expected = expected[-Ntest:]
received = received[-Ntest:]
expected = [x / expected[0] for x in expected]
received = [x / received[0] for x in received]
self.assertComplexTuplesAlmostEqual(expected, received, 3) | [
979,
365
] |
def METHOD_NAME(self):
"""Simpler test"""
t_rast_what = SimpleModule(
"t.rast.what.aggr",
strds="A",
input="points",
date="2001-04-01",
granularity="2 months",
overwrite=True,
verbose=True,
)
self.assertModule(t_rast_what)
text = """1|2001-04-01|200.0 | [
9,
53
] |
def METHOD_NAME(self, thumbnails_dict):
return [{
'url': url,
'preference': index,
} for index, url in enumerate(
traverse_obj(thumbnails_dict, (('small', 'medium', 'large'),))) if url] | [
297,
3828
] |
def METHOD_NAME(self) -> Optional['outputs.SkuResponse']:
"""
Sku details required for ARM contract for Autoscaling.
"""
return pulumi.get(self, "sku") | [
4162
] |
async def METHOD_NAME(request, atv):
"""Handle incoming websocket requests."""
device_id = request.match_info["id"]
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app["clients"].setdefault(device_id, []).append(ws)
playstatus = await atv.metadata.playing()
await ws.send_str(str(playstatus))
async for msg in ws:
if msg.type == WSMsgType.TEXT:
# Handle custom commands from client here
if msg.data == "close":
await ws.close()
elif msg.type == WSMsgType.ERROR:
print(f"Connection closed with exception: {ws.exception()}")
request.app["clients"][device_id].remove(ws)
return ws | [
4389,
1519
] |
def METHOD_NAME():
return pace.util.CubedSphereCommunicator(
DummyComm(0, 6, {}),
pace.util.CubedSpherePartitioner(pace.util.TilePartitioner((1, 1))),
) | [
19,
784,
11060
] |
def METHOD_NAME():
"""
:rtype: Iterable[class[EventExtension]]
"""
for app_config in apps.get_app_configs():
if hasattr(app_config, "event_extension"):
yield app_config.event_extension | [
19,
583
] |
def METHOD_NAME():
env_variables = {
"some_env_1": "some-value",
"SOMETHING": "ELSE",
"and_another": "like_this",
}
function = mlrun.new_function(
"function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job
)
assert function.spec.env == []
# Using a dictionary
function.apply(mlrun.platforms.set_env_variables(env_variables))
env_dict = {var["name"]: var.get("value") for var in function.spec.env}
assert env_dict == env_variables
function = mlrun.new_function(
"function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job
)
assert function.spec.env == []
# And using key=value parameters
function.apply(mlrun.platforms.set_env_variables(**env_variables))
env_dict = {var["name"]: var.get("value") for var in function.spec.env}
assert env_dict == env_variables | [
9,
0,
485,
2045
] |
def METHOD_NAME(s):
tags = []
for _ in s.strip(';').split(';'):
tag = _.strip(' ').split(' ')
tags.append((tag[0], tag[1].strip('"')))
return dict(tags) | [
203,
114
] |
def METHOD_NAME(self):
self.assertEqual(si._expand_ethnicity("Polish"), '(subject:"Polish" OR subject:"Poles" OR subject:"Polish Americans")')
self.assertEqual(si._expand_ethnicity("French"), '(subject:"French" OR subject:"French Americans" OR subject:"French in South Carolina" OR subject:"French-American newspapers." OR subject:"French-Canadians")') | [
9,
2450,
7801
] |
def METHOD_NAME(self):
"""
Returns:
The global access config shared across all access mixin modules.
"""
global _ACCESS_CFG
return _ACCESS_CFG | [
1089,
2610
] |
def METHOD_NAME(self):
return self.backend.get_object_url() | [
279,
274
] |
def METHOD_NAME(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True | [
137,
769
] |
def METHOD_NAME(resource_group_name: Optional[str] = None,
secret_resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretResult:
"""
Gets the information about the secret resource with the given name. The information include the description and other properties of the secret.
Azure REST API version: 2018-09-01-preview.
:param str resource_group_name: Azure resource group name
:param str secret_resource_name: The name of the secret resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['secretResourceName'] = secret_resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:servicefabricmesh:getSecret', __args__, opts=opts, typ=GetSecretResult).value
return AwaitableGetSecretResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type')) | [
19,
444
] |
async def METHOD_NAME():
await analyze_identity_documents_async() | [
57
] |
def METHOD_NAME(file, multiline_file):
# https://github.com/saltstack/salt/pull/61326
try:
# Create a symlink to target
sym_link = multiline_file.parent / "symlink.lnk"
sym_link.symlink_to(multiline_file)
# file.replace on the symlink
file.replace(str(sym_link), r"Etiam", "Salticus")
# test that the target was changed
assert "Salticus" in multiline_file.read_text()
finally:
if os.path.exists(str(sym_link)):
sym_link.unlink() | [
9,
953
] |
def METHOD_NAME(self) -> None:
self.solvers = [x for x in QP_SOLVERS if x in INSTALLED_SOLVERS] | [
0,
1
] |
def METHOD_NAME(stream, size):
"""Read the Element Data of type :data:`DATE`
:param stream: file-like object from which to read
:param int size: size of element's data
:raise ReadError: when not all the required bytes could be read
:raise SizeError: if size is incorrect
:return: the read date
:rtype: datetime
"""
if size != 8:
raise SizeError(size)
nanoseconds = unpack('>q', _read(stream, 8))[0]
return datetime(2001, 1, 1, 0, 0, 0, 0, None) + timedelta(microseconds=nanoseconds // 1000) | [
203,
669,
153
] |
def METHOD_NAME(self):
assert (dates.get_numerical_date_from_value("2000-03-29", "%Y-%m-%d")
== pytest.approx(dates.numeric_date(datetime.date(year=2000, month=3, day=29)), abs=1e-3)
== pytest.approx(2000.242, abs=1e-3)
) | [
9,
19,
7365,
153,
280,
99,
130
] |
def METHOD_NAME():
input_map = dict(
allcostx=dict(
argstr="-allcostx |& tee %s",
extensions=None,
position=-1,
xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"],
),
args=dict(
argstr="%s",
),
autobox=dict(
argstr="-autobox",
),
automask=dict(
argstr="-automask+%d",
),
autoweight=dict(
argstr="-autoweight%s",
),
center_of_mass=dict(
argstr="-cmass%s",
),
check=dict(
argstr="-check %s",
),
convergence=dict(
argstr="-conv %f",
),
cost=dict(
argstr="-cost %s",
),
environ=dict(
nohash=True,
usedefault=True,
),
epi=dict(
argstr="-EPI",
),
final_interpolation=dict(
argstr="-final %s",
),
fine_blur=dict(
argstr="-fineblur %f",
),
in_file=dict(
argstr="-source %s",
copyfile=False,
extensions=None,
mandatory=True,
),
in_matrix=dict(
argstr="-1Dmatrix_apply %s",
extensions=None,
position=-3,
xor=["out_matrix"],
),
in_param_file=dict(
argstr="-1Dparam_apply %s",
extensions=None,
xor=["out_param_file"],
),
interpolation=dict(
argstr="-interp %s",
),
master=dict(
argstr="-master %s",
extensions=None,
),
maxrot=dict(
argstr="-maxrot %f",
),
maxscl=dict(
argstr="-maxscl %f",
),
maxshf=dict(
argstr="-maxshf %f",
),
maxshr=dict(
argstr="-maxshr %f",
),
newgrid=dict(
argstr="-newgrid %f",
),
nmatch=dict(
argstr="-nmatch %d",
),
no_pad=dict(
argstr="-nopad",
),
nomask=dict(
argstr="-nomask",
),
num_threads=dict(
nohash=True,
usedefault=True,
),
nwarp=dict(
argstr="-nwarp %s",
),
nwarp_fixdep=dict(
argstr="-nwarp_fixdep%s...",
),
nwarp_fixmot=dict(
argstr="-nwarp_fixmot%s...",
),
one_pass=dict(
argstr="-onepass",
),
out_file=dict(
argstr="-prefix %s",
extensions=None,
hash_files=False,
name_source="in_file",
name_template="%s_allineate",
xor=["allcostx"],
),
out_matrix=dict(
argstr="-1Dmatrix_save %s",
extensions=None,
xor=["in_matrix", "allcostx"],
),
out_param_file=dict(
argstr="-1Dparam_save %s",
extensions=None,
xor=["in_param_file", "allcostx"],
),
out_weight_file=dict(
argstr="-wtprefix %s",
extensions=None,
xor=["allcostx"],
),
outputtype=dict(),
overwrite=dict(
argstr="-overwrite",
),
quiet=dict(
argstr="-quiet",
),
reference=dict(
argstr="-base %s",
extensions=None,
),
replacebase=dict(
argstr="-replacebase",
),
replacemeth=dict(
argstr="-replacemeth %s",
),
source_automask=dict(
argstr="-source_automask+%d",
),
source_mask=dict(
argstr="-source_mask %s",
extensions=None,
),
two_best=dict(
argstr="-twobest %d",
),
two_blur=dict(
argstr="-twoblur %f",
),
two_first=dict(
argstr="-twofirst",
),
two_pass=dict(
argstr="-twopass",
),
usetemp=dict(
argstr="-usetemp",
),
verbose=dict(
argstr="-verb",
),
warp_type=dict(
argstr="-warp %s",
),
warpfreeze=dict(
argstr="-warpfreeze",
),
weight=dict(
argstr="-weight %s",
),
weight_file=dict(
argstr="-weight %s",
deprecated="1.0.0",
extensions=None,
new_name="weight",
),
zclip=dict(
argstr="-zclip",
),
)
inputs = Allineate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value | [
9,
-1,
1461
] |
def METHOD_NAME(check, instance, aggregator):
check = check({}, instance)
check.check(instance)
metrics = {}
for d in (
check.CLUSTER_TASKS_METRICS,
check.CLUSTER_SLAVES_METRICS,
check.CLUSTER_RESOURCES_METRICS,
check.CLUSTER_REGISTRAR_METRICS,
check.CLUSTER_FRAMEWORK_METRICS,
check.SYSTEM_METRICS,
check.STATS_METRICS,
):
metrics.update(d)
for _, v in iteritems(check.FRAMEWORK_METRICS):
aggregator.assert_metric(v[0])
for _, v in iteritems(metrics):
aggregator.assert_metric(v[0])
for _, v in iteritems(check.ROLE_RESOURCES_METRICS):
aggregator.assert_metric(v[0])
aggregator.assert_metric('mesos.cluster.total_frameworks')
aggregator.assert_metric('mesos.framework.total_tasks')
aggregator.assert_metric('mesos.role.frameworks.count')
aggregator.assert_metric('mesos.role.weight') | [
9,
250
] |
def METHOD_NAME(scoping):
from ansys.grpc.dpf import scoping_pb2, base_pb2
request = scoping_pb2.CountRequest()
request.entity = base_pb2.NUM_ELEMENTARY_DATA
request.scoping.CopyFrom(scoping._internal_obj)
return _get_stub(scoping._server).Count(request).count | [
7628,
19,
1318
] |
def METHOD_NAME(self, duration: int) -> None:
"""Set the duration in seconds.
This will convert the given amount of seconds into an amount of days, hours, minutes and seconds.
Note that this is mostly a workaround for issues with PyQt, as a value type this class should not
really have a setter.
"""
if duration < 0:
self._days = -1
self._hours = -1
self._minutes = -1
self._seconds = -1
else:
try:
duration = round(duration)
except OverflowError:
Logger.log("w", "Duration was too large to convert, so resetting it.")
duration = 0
# If a Python int goes above the upper bound of C++ int, which is 2^16 - 1, you will get a error when Qt
# tries to convert the Python int to C++ int:
# TypeError: unable to convert a Python 'int' object to a C++ 'int' instance
# So we make sure here that the number won't exceed the limit due to CuraEngine bug or whatever, and
# Cura won't crash.
if int(duration) >= (2**31):
Logger.log("w", "Duration was too large to convert, so resetting it.")
duration = 0
self._days = math.floor(duration / (3600 * 24))
duration -= self._days * 3600 * 24
self._hours = math.floor(duration / 3600)
duration -= self._hours * 3600
self._minutes = math.floor(duration / 60)
duration -= self._minutes * 60
self._seconds = duration
self.durationChanged.emit() | [
0,
2205
] |
def METHOD_NAME(program_config, predictor_config):
input_data_type = program_config.inputs["input_data_x"].dtype
if input_data_type == np.float32:
err_msg = "Paddle's elementwise_floordiv op doesn't support float32 datatype!"
return True | [
1351
] |
def METHOD_NAME(self):
self.requires("tinyxml2/9.0.0")
self.requires("asio/1.28.0") # This is now a package_type = header
self.requires("fast-cdr/1.0.27", transitive_headers=True, transitive_libs=True)
self.requires("foonathan-memory/0.7.3")
if self.options.with_ssl:
self.requires("openssl/[>=1.1 <4]") | [
5186
] |
def METHOD_NAME(self):
process_cpu_usage = ProcessCpuUsage(2,1.34,self.__metric_repository)
total_percent = process_cpu_usage.total_percent()
self.assertIsNone(total_percent, None) | [
9,
395,
1597,
217,
1511,
99,
98
] |
def METHOD_NAME(self):
NC = self.number_of_cells()
NEC = self.NEC
edge2cell = self.edge2cell
cell2edgeSign = torch.zeros((NC, NEC), dtype=torch.bool, device=self.device)
cell2edgeSign[edge2cell[:, 0], edge2cell[:, 2]] = True
return cell2edgeSign | [
118,
24,
540,
2452
] |
f METHOD_NAME(self): | [
22,
9
] |
def METHOD_NAME(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 3 bytes, expected no more than 2 bytes",
) | [
9,
203,
534,
564,
524,
562,
4138
] |
def METHOD_NAME(self):
if self.product_id.type == "product" and self.is_l10n_ro_record:
if self.move_id.is_purchase_document():
purchase = self.purchase_order_id
if purchase and self.product_id.purchase_method == "receive":
# Control bills based on received quantities
if any(
[
p.l10n_ro_notice or p._is_dropshipped()
for p in purchase.picking_ids
]
):
self = self.with_context(valued_type="invoice_in_notice")
if self.move_id.is_sale_document():
sales = self.sale_line_ids
if sales and self.product_id.invoice_policy == "delivery":
# Control bills based on received quantities
sale = self.sale_line_ids[0].order_id
if any(
[
p.l10n_ro_notice and not p._is_dropshipped()
for p in sale.picking_ids
]
):
self = self.with_context(valued_type="invoice_out_notice")
return super(AccountMoveLine, self).METHOD_NAME() | [
19,
3704,
598
] |
def METHOD_NAME(self):
return "button" | [
19,
573,
156
] |
def METHOD_NAME(self, data: str, meta: dict = None):
self.data.append({ProtoKey.TYPE: ProtoKey.STRING, ProtoKey.DATA: data})
self.update_meta(meta) | [
1459,
144
] |
def METHOD_NAME(doc):
"""
conc_persistence_time
"""
x = doc.find('/global/conc_persistence_time')
if x is not None:
remove_element(x) | [
86,
1842
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.