text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
"""test save/load """
testValues = [
"string", 123, 123.456,
["list", 789, 10.1 ],
{ "dict1" : "value", "dict2" : 10.0 }
]
config = Configuration()
for x in range(0, 5):
config.section_("Section%s" % x)
config.component_("Component%s" % x)
sect = getattr(config, "Section%s" % x)
comp = getattr(config, "Component%s" % x)
sect.document_("This is Section%s" % x)
comp.document_("This is Component%s" % x)
for i in range(0, 5):
setattr(comp, "Parameter%s" % i, testValues[i])
setattr(sect, "Parameter%s" % i, testValues[i])
comp.document_("This is Parameter%s" % i,
"Parameter%s" %i)
sect.document_("This is Parameter%s" %i,
"Parameter%s" %i)
stringSave = str(config)
documentSave = config.documentedString_()
commentSave = config.commentedString_()
saveConfigurationFile(config, self.normalSave)
saveConfigurationFile(config, self.docSave, document = True)
saveConfigurationFile(config, self.commentSave, comment = True)
plainConfig = loadConfigurationFile(self.normalSave)
docConfig = loadConfigurationFile(self.docSave)
commentConfig = loadConfigurationFile(self.commentSave)
#print commentConfig.commentedString_()
#print docConfig.documentedString_()
#print docConfig.commentedString_() | [
9,
1178
] |
def METHOD_NAME():
return True | [
137,
2659,
1111
] |
def METHOD_NAME(self):
method.xc = 'b3lypg'
self.assertAlmostEqual(method.scf(), -2.9070540942168002, 9)
m = mol.UKS()
m.xc = 'b3lyp5'
self.assertAlmostEqual(m.scf(), -2.89992555753, 9) | [
9,
6166,
8179
] |
def METHOD_NAME(
cls,
parent: 'ExecutePlugin',
test: 'Test',
guest: 'Guest',
logger: tmt.log.Logger) -> List[tmt.result.Result]:
""" Check result of a beakerlib test """
# Initialize data, prepare log paths
note: Optional[str] = None
log: List[Path] = []
for filename in [tmt.steps.execute.TEST_OUTPUT_FILENAME, 'journal.txt']:
if parent.data_path(test, guest, filename, full=True).is_file():
log.append(parent.data_path(test, guest, filename))
# Check beakerlib log for the result
try:
beakerlib_results_file = parent.data_path(test, guest, 'TestResults', full=True)
results = parent.read(beakerlib_results_file, level=3)
except tmt.utils.FileError:
logger.debug(f"Unable to read '{beakerlib_results_file}'.", level=3)
note = 'beakerlib: TestResults FileError'
return [tmt.Result.from_test(
test=test,
result=ResultOutcome.ERROR,
note=note,
log=log,
guest=guest)]
search_result = re.search('TESTRESULT_RESULT_STRING=(.*)', results)
# States are: started, incomplete and complete
# FIXME In quotes until beakerlib/beakerlib/pull/92 is merged
search_state = re.search(r'TESTRESULT_STATE="?(\w+)"?', results)
if search_result is None or search_state is None:
# Same outcome but make it easier to debug
if search_result is None:
missing_piece = 'TESTRESULT_RESULT_STRING='
hint = ''
else:
missing_piece = 'TESTRESULT_STATE='
hint = ', possibly outdated beakerlib (requires 1.23+)'
logger.debug(
f"No '{missing_piece}' found in '{beakerlib_results_file}'{hint}.",
level=3)
note = 'beakerlib: Result/State missing'
return [tmt.Result.from_test(
test=test,
result=ResultOutcome.ERROR,
note=note,
log=log,
guest=guest)]
result = search_result.group(1)
state = search_state.group(1)
# Check if it was killed by timeout (set by tmt executor)
actual_result = ResultOutcome.ERROR
if test.returncode == tmt.utils.PROCESS_TIMEOUT:
note = 'timeout'
parent.timeout_hint(test, guest)
# Test results should be in complete state
elif state != 'complete':
note = f"beakerlib: State '{state}'"
# Finally we have a valid result
else:
actual_result = ResultOutcome.from_spec(result.lower())
return [tmt.Result.from_test(
test=test,
result=actual_result,
note=note,
log=log,
guest=guest)] | [
297,
51
] |
def METHOD_NAME(uid2refs, uid2hyps, verbose):
(p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
uid2refs, uid2hyps
)
ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
(mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
outputs = {
"ref pur": ref_pur,
"hyp pur": hyp_pur,
"H(ref)": h_ref,
"H(hyp)": h_hyp,
"MI": mi,
"MI/H(ref)": mi_norm_by_ref,
"ref segL": comp_avg_seg_dur(uid2refs.values()),
"hyp segL": comp_avg_seg_dur(uid2hyps.values()),
"p_xy shape": p_xy.shape,
"frm tot": tot,
"frm diff": frmdiff,
"utt tot": len(uid2refs),
"utt miss": len(skipped),
}
print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f")) | [
57
] |
def METHOD_NAME(self, cmd):
# appends the null character and sends the string over I2C
cmd += "\00"
self.file_write.METHOD_NAME(cmd) | [
77
] |
def METHOD_NAME():
with warnings.catch_warnings(record=True) as w:
assert db.get_halo("sim/ts1/1").calculate('testlink_univalued.testval')==1.0
assert len(w)==0 | [
9,
14338,
548
] |
def METHOD_NAME(john_output, expected_result):
assert parse_john_output(john_output) == expected_result | [
9,
214,
146
] |
def METHOD_NAME(self, install_req, parent_req_name=None,
extras_requested=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environment markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers(extras_requested):
logger.info("Ignoring %s: markers '%s' don't match your "
"environment", install_req.name,
install_req.markers)
return [], None
# This check has to come after we filter requirements with the
# environment markers.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
# This next bit is really a sanity check.
assert install_req.is_direct == (parent_req_name is None), (
"a direct req shouldn't have a parent and also, "
"a non direct req should have a parent"
)
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req], None
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name,
)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
# We return install_req here to allow for the caller to add it to
# the dependency information for the parent package.
return result, install_req | [
238,
6577
] |
METHOD_NAME(self): | [
531,
481
] |
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC | [
200,
1881
] |
def METHOD_NAME():
#### Create an ActionsParser object
actions_parser = ActionsParser()
#### Set a simple list of actions
actions_list = [
"clear_results",
"evaluate_query_graph",
"filter(start_node=1, maximum_results=10, minimum_probability=0.5, maximum_ngd=0.8)",
"expand(command=expand_nodes, edge_id=[e00,e01,e02], add_virtual_nodes=true)",
"expand(edge_id=[e00,e01,e02], sort_by=[type,name])",
"expand(edge_id=[e00], sort_by=[type], empty_list=[] , dangling_comma=[p,q,])",
"overlay(compute_ngd=true,default)",
"overlay(add_pubmed_ids=true, start_node=2)",
"return(message=true,store=false,test=accept=true)"
]
#### Parse the action_list
result = actions_parser.parse(actions_list)
#### Print the response information (including debug information)
print(result.show(level=ARAXResponse.DEBUG))
#### Print the final response data envelope
print(json.dumps(ast.literal_eval(repr(result.data)),sort_keys=True,indent=2)) | [
57
] |
def METHOD_NAME(self):
# syntax error in GRUB_CMDLINE_LINUX, recoverable
for error in [err for err in self.consume(GrubConfigError)
if err.error_type == GrubConfigError.ERROR_GRUB_CMDLINE_LINUX_SYNTAX]:
_create_grub_error_report(
error=error,
title='Syntax error detected in grub configuration',
summary=('Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration. '
'This error is causing booting and other issues. '
'Error is automatically fixed by add_upgrade_boot_entry actor.'),
)
break
# missing newline, recoverable
for error in [err for err in self.consume(GrubConfigError)
if err.error_type == GrubConfigError.ERROR_MISSING_NEWLINE]:
_create_grub_error_report(
error=error,
title='Detected a missing newline at the end of grub configuration file',
summary=('The missing newline in /etc/default/grub causes booting issues when appending '
'new entries to this file during the upgrade. Leapp will automatically fix this '
'problem by appending the missing newline to the grub configuration file.')
)
break
# corrupted configuration, inhibitor
for error in [err for err in self.consume(GrubConfigError)
if err.error_type == GrubConfigError.ERROR_CORRUPTED_GRUBENV]:
_create_grub_error_report(
error=error,
title='Detected a corrupted grubenv file',
summary=('The grubenv file must be valid to pass the upgrade correctly: \n'
'- an exact size of 1024 bytes is expected \n'
'- it cannot end with a newline. \n'
'The corruption could be caused by a manual modification of the file which '
'is not recommended.'),
severity=reporting.Severity.HIGH,
is_inhibitor=True,
remediation=reporting.Remediation(
hint='Delete {} file(s) and regenerate grubenv using the grub2-mkconfig tool'.format(
','.join(error.files))))
break | [
356
] |
def METHOD_NAME(key, value, subkey):
column = AnalysisEntry.result
if isinstance(value, dict) and '$exists' in value:
# "$exists" (aka key exists in json document) is a special case because
# we need to query the element one level above the actual key
for nested_key in subkey.split('.')[:-1]:
column = column[nested_key]
else:
for nested_key in subkey.split('.'):
column = column[nested_key]
if isinstance(value, dict):
for key_, value_ in value.items():
if key_ in ['$in', '$like']:
column = column.astext
break
value[key_] = dumps(value_)
else:
value = type_coerce(value, JSONB)
return _dict_key_to_filter(column, key, value) | [
238,
763,
527
] |
def METHOD_NAME(self, *, block: Block, height: int) -> Block:
"""
Given a block, returns its ancestor at a specific height.
Uses the height index if the block is in the best blockchain, or search iteratively otherwise.
"""
assert height < block.get_height(), (
f"ancestor height must be lower than the block's height: {height} >= {block.get_height()}"
)
metadata = block.get_metadata()
if not metadata.voided_by and (ancestor := self._tx_storage.get_transaction_by_height(height)):
assert isinstance(ancestor, Block)
return ancestor
return _get_ancestor_iteratively(block=block, ancestor_height=height) | [
19,
4490,
1541,
1877
] |
def METHOD_NAME(input_file, output, settings):
sorted_scores, accum_hits, accum_decoys = _accum_decoys(input_file, settings)
fdr_array = compute_fdr(sorted_scores, accum_hits, accum_decoys, settings)
index = 0
for line in __read_lines(input_file):
if not line or line.startswith('#'):
continue
entry = Entry(line, settings, index)
this_fdr = fdr_array[entry.score]
new_line = "%s%s%f" % (line, settings["separator"], this_fdr)
print >> output, new_line
index += 1 | [
1459,
5480
] |
def METHOD_NAME(hService: _win32typing.PySC_HANDLE, InfoLevel): ... | [
539,
549,
3660
] |
def METHOD_NAME(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cuda"]
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
arg_dict["device"] = ["cpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [0.1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["betas"] = [(0.99, 0.9)]
arg_dict["weight_decay"] = [0.001, 0.1]
arg_dict["eps"] = [1e-6]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["adam_w_mode"] = [True, False]
# NOTE(l1aoxingyu): max_norm = -1 means no clip grad
arg_dict["clip_grad_max_norm"] = [-1, 0.0, 0.5, 1.0]
arg_dict["clip_grad_norm_type"] = ["inf", "-inf", 0.0, 1.0, 2.0, 3.5]
arg_dict["reload_state_step"] = [5]
arg_dict["save_load_by_pickle"] = [False, True]
arg_dict["contiguous_params"] = [False, True]
for arg in GenArgList(arg_dict):
compare_with_numpy_lamb(test_case, *arg) | [
9,
9898
] |
def METHOD_NAME(self, *args, **kw):
self._publish(f"will_{name}", *args, can_raise=can_raise, **kw)
result = func(self, *args, **kw)
self._publish(f"did_{name}", result, can_raise=can_raise)
return result | [
80,
559
] |
METHOD_NAME(
ls,
ettings: SupportedSettingType
> Dict: | [
19,
200,
1457,
43,
817
] |
def METHOD_NAME(self):
return None | [
1952
] |
def METHOD_NAME():
"""Test the ``revert_nans_columns`` method."""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3, 2], 'b': [2.5, 2, 3, 2.5], 'c': [1, 2, 3, 2],
'a#b#c.nan_component': ['b', 'a, c', 'None', 'a, b, c']
})
nan_column_name = 'a#b#c.nan_component'
# Run
result = revert_nans_columns(data, nan_column_name)
expected_data = pd.DataFrame({
'a': [1, np.nan, 3, np.nan], 'b': [np.nan, 2, 3, np.nan],
'c': [1, np.nan, 3, np.nan]
})
# Assert
pd.testing.assert_frame_equal(result, expected_data) | [
9,
4026,
8386,
1951
] |
def METHOD_NAME(self):
Fake, FakeForm = self.factory()
fake = Fake()
form = FakeForm()
form.populate_obj(fake)
assert fake.extras == {} | [
9,
35,
365
] |
def METHOD_NAME(self, obj):
if obj.channel is None:
return None
return {
"display_name": obj.slack_channel_name,
"slack_id": obj.channel,
"id": obj.slack_channel_pk,
} | [
19,
729,
307
] |
def METHOD_NAME(in_folder=TMP_FOLDER):
"""
Generate spm module in the specified folder, default
to the system's tmp folder.
After generation, user can simply use the prompt SPM
code to include the ANTLR4 Swift runtime package.
:param in_folder: the folder where we generate the SPM module.
:return: None
"""
tmp_antlr_folder = in_folder + "Antlr4-tmp-" + str(int(time.time()))
os.mkdir(tmp_antlr_folder)
# Copy folders and SPM manifest file.
dirs_to_copy = ["Sources", "Tests"]
for dir_to_copy in dirs_to_copy:
shutil.copytree(DIR + "/" + dir_to_copy, tmp_antlr_folder + "/" + dir_to_copy)
shutil.copy("Package.swift", tmp_antlr_folder)
os.chdir(tmp_antlr_folder)
check_call(["git", "init"])
check_call(["git", "add", "*"])
check_call(["git", "commit", "-m", "Initial commit."])
check_call(["git", "tag", "{}.0.0".format(MAJOR_VERSION)])
antlr_says("Created local repository.")
antlr_says("(swift-tools-version:3.0) "
"Put .Package(url: \"{}\", majorVersion: {}) in Package.swift.".format(os.getcwd(), MAJOR_VERSION))
antlr_says("(swift-tools-wersion:4.0) "
"Put .package(url: \"{}\", from: \"{}.0.0\") in Package.swift "
"and add \"Antlr4\" to target dependencies. ".format(os.getcwd(), MAJOR_VERSION)) | [
567,
14483,
298
] |
async def METHOD_NAME(self, retries: int):
backoff = BackoffMock()
retry = Retry(backoff, retries)
with pytest.raises(ConnectionError):
await retry.call_with_retry(self._do, self._fail)
assert self.actual_attempts == 1 + retries
assert self.actual_failures == 1 + retries
assert backoff.reset_calls == 1
assert backoff.calls == retries | [
9,
2052
] |
def METHOD_NAME(self, polygon: Geometry) -> Iterable[Tuple[int, int]]:
""" Return tile indexes overlapping with a given geometry.
"""
if self._gbox.crs is None:
poly = polygon
else:
poly = polygon.to_crs(self._gbox.crs)
yy, xx = self.range_from_bbox(poly.boundingbox)
for idx in itertools.product(yy, xx):
gbox = self[idx]
if gbox.extent.intersects(poly):
yield idx | [
299
] |
def METHOD_NAME(self):
msg_id = 'msg-id'
get_ipython = self._mock_get_ipython(msg_id)
clear_output = self._mock_clear_output()
with self._mocked_ipython(get_ipython, clear_output):
widget = widget_output.Output()
@widget.capture(clear_output=True, wait=True)
def captee(*args, **kwargs):
# Check that we are capturing output
assert widget.msg_id == msg_id
captee()
captee()
assert len(clear_output.calls) == 2
assert clear_output.calls[0] == clear_output.calls[1] == \
((), {'wait': True}) | [
9,
4987,
972,
537,
146
] |
def METHOD_NAME(container_name: Optional[str] = None,
device_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_account_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContainerResult:
"""
Represents a container on the Data Box Edge/Gateway device.
Azure REST API version: 2022-03-01.
:param str container_name: The container Name
:param str device_name: The device name.
:param str resource_group_name: The resource group name.
:param str storage_account_name: The Storage Account Name
"""
__args__ = dict()
__args__['containerName'] = container_name
__args__['deviceName'] = device_name
__args__['resourceGroupName'] = resource_group_name
__args__['storageAccountName'] = storage_account_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge:getContainer', __args__, opts=opts, typ=GetContainerResult).value
return AwaitableGetContainerResult(
container_status=pulumi.get(__ret__, 'container_status'),
created_date_time=pulumi.get(__ret__, 'created_date_time'),
data_format=pulumi.get(__ret__, 'data_format'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
refresh_details=pulumi.get(__ret__, 'refresh_details'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
224
] |
def METHOD_NAME(instance):
"""
Return the `ContentType` ID for the given object.
"""
content_type = ContentType.objects.get_for_model(instance)
if content_type:
return content_type.pk
else:
return None | [
459,
44,
147
] |
def METHOD_NAME(self):
self._test_qt_labels_right_resizing(HResizeTestDialog) | [
9,
3076,
415,
2786,
10272,
3493
] |
def METHOD_NAME(self) -> pd.Series:
METHOD_NAME = self._capital
if METHOD_NAME is arg_not_supplied:
METHOD_NAME = 1.0
if type(METHOD_NAME) is float or type(METHOD_NAME) is int:
align_index = self._index_to_align_capital_to
METHOD_NAME = pd.Series([METHOD_NAME] * len(align_index), align_index)
return METHOD_NAME | [
11552
] |
def METHOD_NAME(cls):
"""
Obtain an instance of the site service ~~->Site:Service~~
:return: SiteService
"""
from portality.bll.services import site
return site.SiteService() | [
1055,
549
] |
def METHOD_NAME(self):
print('waiting for second process')
self.sock, addr = self.__server_socket.accept()
print('accepted second process')
from tests_python.debugger_unittest import ReaderThread
self.reader_thread = ReaderThread(self.sock)
self.reader_thread.start()
self._sequence = -1
# initial command is always the version
self.write_version()
self.log.append('start_socket')
self.write_make_initial_run()
time.sleep(1)
seq = self.write_list_threads()
msg = self.wait_for_list_threads(seq)
assert msg.thread['name'] == 'MainThread'
assert msg.thread['id'] == 'console_main'
self.write_get_frame('console_main', '1')
self.wait_for_vars([
[
'<var name="a" type="int" qualifier="%s" value="int: 10"' % (builtin_qualifier,),
'<var name="a" type="int" value="int', # jython
],
])
self.finished_ok = True | [
22
] |
def METHOD_NAME(self, loc, dictionary):
"""
Writes the supplied dictionary back to the advancedsettings.xml file
"""
if not dictionary.get('advancedsettings', None):
self.log('Empty dictionary passed to advancedsettings file writer. '
'Preventing write, backing up and removing file.')
subprocess.call(['sudo', 'cp', loc, loc.replace('advancedsettings.xml',
'advancedsettings_backup.xml')])
subprocess.call(['sudo', 'rm', '-f', loc])
return
with open(loc, 'w', encoding='utf-8') as open_file:
xmltodict.unparse(input_dict=dictionary, output=open_file, pretty=True) | [
77,
-1
] |
def METHOD_NAME(self, elapsed):
if self.active and self.owner.is_alive:
self.chunk_elapsed += elapsed
if self.chunk_elapsed >= self.interval:
if self.stop_on_next_tick:
self.stop()
else:
self.set_remaining(self.chunk_elapsed)
self.chunk_elapsed = 0
if self.type == MirrorTimerTypes.BREATH:
self.handle_damage_timer(0.10) # Damage: 10% of players max health.
elif self.type == MirrorTimerTypes.FATIGUE:
self.handle_damage_timer(0.20) # Damage: 20% of players max health.
else: # Feign Death.
self.handle_feign_death_timer() | [
86
] |
def METHOD_NAME(self, Create):
Create.side_effect = fake_migration_create
source = self.client.Dataset(1)
target = self.client.Dataset(2)
migration = source.migrate(target=target.id, follow=False)
self.assertEqual(migration.source_id, source.id)
self.assertEqual(migration.target_id, target.id)
self.assertEqual(migration.commit_mode, 'append') | [
9,
2672,
1030,
126,
147
] |
def METHOD_NAME(self) -> SimpleHeaderPaginator:
"""Return a new paginator for GitLab API endpoints.
Returns:
A new paginator.
"""
return SimpleHeaderPaginator("X-Next-Page") | [
19,
80,
4427
] |
f METHOD_NAME(self): | [
9,
539,
6679,
69
] |
def METHOD_NAME(self, output_channels):
if output_channels not in [1, 3]:
raise ValueError(
"Received invalid argument output_channels. "
f"output_channels must be in 1 or 3. Got {output_channels}"
)
self.output_channels = output_channels | [
250,
362,
434
] |
def METHOD_NAME(self):
self.module_util_all.exists_runner("test-1-20150125")
rvalue = self.module_util_all.delete_runner()
self.assertEqual(rvalue, None) | [
9,
34,
1102
] |
def METHOD_NAME(manifest_string):
"""Does the stubification on an XML string for mobile-install.
Args:
manifest_string: the input manifest as a string.
Returns:
A tuple of (output manifest, old application class, app package)
Raises:
Exception: if something goes wrong
"""
manifest, application = _ParseManifest(manifest_string)
old_application = application.get(
"{%s}name" % ANDROID, "android.app.Application")
application.set("{%s}name" % ANDROID, MOBILE_INSTALL_STUB_APPLICATION)
application.attrib.pop("{%s}hasCode" % ANDROID, None)
read_permission = manifest.findall(
'./uses-permission[@android:name="%s"]' % READ_EXTERNAL_STORAGE,
namespaces={"android": ANDROID})
if not read_permission:
read_permission = ElementTree.Element("uses-permission")
read_permission.set("{%s}name" % ANDROID, READ_EXTERNAL_STORAGE)
manifest.insert(0, read_permission)
new_manifest = ElementTree.tostring(manifest)
app_package = manifest.get("package")
if not app_package:
raise BadManifestException("manifest tag does not have a package specified")
return (new_manifest, old_application, app_package) | [
-1,
18,
428
] |
def METHOD_NAME(self):
return ContentType.objects.get_for_model(self) | [
459,
44
] |
f METHOD_NAME(self): | [
9,
293,
564,
1953,
1541,
1171
] |
def METHOD_NAME(self, ctx, thing):
print("test_i64({})".format(thing))
return thing | [
9,
10574
] |
def METHOD_NAME(cond_type: Type[Any], return_orig: bool = True, return_const: Any = None):
def helper(fn):
if return_orig:
@functools.wraps(fn)
def wrapper_orig(*args):
if _orig_isinstance(args[-1], cond_type):
return fn(*args)
return args[-1]
return wrapper_orig
else:
@functools.wraps(fn)
def wrapper_const(*args):
if _orig_isinstance(args[-1], cond_type):
return fn(*args)
return return_const
return wrapper_const
return helper | [
22,
-1,
89
] |
def METHOD_NAME(self, tags, requestContext=None):
headers = [
header + '=' + value
for (header, value)
in (requestContext or {}).get('forwardHeaders', {}).items()
]
return 'TagDB.find_series:' + ':'.join(sorted(tags)) + ':' + ':'.join(sorted(headers)) | [
416,
4045,
6017
] |
def METHOD_NAME(self):
hex_str_v = self._api_common.get_reg(self.getreg_path, MMC_VERSION_REG)
return int(hex_str_v, 16) | [
19,
16438,
281
] |
def METHOD_NAME():
@I.ir_module
class TestModule:
I.module_attrs({"device_num": 10})
I.module_global_infos(
{
"mesh": [
R.device_mesh((2, 2), I.Range(0, 4)), # mesh[0]
R.device_mesh((1,), I.Range(4, 5)), # mesh[1]
]
}
)
@T.prim_func
def tir_func(
x: T.Buffer((T.int64(128), T.int64(128)), "float32"),
y: T.Buffer((T.int64(128), T.int64(128)), "float32"),
):
T.func_attr({"tir.noalias": True})
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
y[vi, vj] = x[vi, vj] + 1.0
@R.function
def foo(
x: R.DTensor((128, 128), "float32", device_mesh="mesh[0]", placement="S[0], R"),
) -> R.DTensor((128, 128), "float32", device_mesh="mesh[0]", placement="S[0], R"):
gv0 = R.dist.call_tir(
TestModule.tir_func,
x,
R.DTensor(
shape=(128, 128), dtype="float32", device_mesh="mesh[0]", placement="S[0], R"
),
)
gv1 = R.add(
gv0, R.dist.const(1.0, struct_info=R.DTensor((), "float32", "mesh[0]", "R, R"))
)
return gv1
_check(TestModule) | [
9,
928
] |
def METHOD_NAME() -> PerformanceCalculator:
return PerformanceCalculator(
timestamp_column_name='timestamp',
y_pred='y_pred',
y_true='y_true',
metrics=['mae', 'mape', 'mse', 'msle', 'rmse', 'rmsle'],
problem_type='regression',
) | [
3731,
8688
] |
def METHOD_NAME():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-n', '--messages-number', metavar='<messages_number>', type=int,
help='Number of messages to send', required=True, default=0,
dest='messages_number')
arg_parser.add_argument('-m', '--message', metavar='<message>', type=str,
help='Message to send', required=False, default=DEFAULT_MESSAGE,
dest='message')
arg_parser.add_argument('-a', '--address', metavar='<address>', type=str,
help='Sender IP address', required=False, default='localhost',
dest='address')
arg_parser.add_argument('-p', '--port', metavar='<port>', type=int,
help='Sender destination port', required=False, default=514,
dest='port')
arg_parser.add_argument('--protocol', metavar='<protocol>', type=str,
help='Sender protocol', required=False, default='tcp', choices=['tcp', 'udp'],
dest='protocol')
arg_parser.add_argument('--numbered-messages', metavar='<numbered_messages>', required=False, type=int,
help='Add number of message at the end of its content starting with the indicated number '
'and increasing by 1 for each of them', dest='numbered_messages', default=-1)
arg_parser.add_argument('-e', '--eps', metavar='<eps>', type=int,
help='Event per second', required=False, default=-1, dest='eps')
arg_parser.add_argument('-d', '--debug', action='store_true', required=False, help='Activate debug logging')
return arg_parser.parse_args() | [
19,
386
] |
def METHOD_NAME(self):
buf = CircularBuffer(1)
buf.write("1")
self.assertEqual(buf.read(), "1") | [
9,
1046,
203,
3589,
1024,
7852,
6878
] |
async def METHOD_NAME(self, init):
config = validate(init)
resource = await resource_db.load(config.source.id)
self.config = config
self.resource = resource
self.client = HubSpotClient(**resource.credentials.get_credentials(self, None))
self.client.set_retries(self.node.on_connection_error_repeat) | [
0,
1
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_paging(self._execute_operations, self._output) | [
1519
] |
def METHOD_NAME(self):
# Test with a known magnetic field - optimized Cary&Hanson configuration
# with a magnetic axis at R=0.9413. Field created using the Biot-Savart
# solver given a set of two helical coils created using the CurveHelical
# class. The total magnetic field is a superposition of a helical and
# a toroidal magnetic field.
curves = [CurveHelical(200, 2, 5, 2, 1., 0.3) for i in range(2)]
curves[0].set_dofs(np.concatenate(([np.pi/2, 0.2841], [0, 0])))
curves[1].set_dofs(np.concatenate(([0, 0], [0, 0.2933])))
currents = [3.07e5, -3.07e5]
Btoroidal = ToroidalField(1.0, 1.0)
Bhelical = BiotSavart([
Coil(curves[0], Current(currents[0])),
Coil(curves[1], Current(currents[1]))])
bs = Bhelical+Btoroidal
ma = CurveXYZFourier(300, 1)
magnetic_axis_radius = 0.9413
ma.set_dofs([0, 0, magnetic_axis_radius, 0, magnetic_axis_radius, 0, 0, 0, 0])
R0 = [np.linalg.norm(ma.gamma()[0, :2])]
Z0 = [ma.gamma()[0, 2]]
phis = np.arctan2(ma.gamma()[:, 1], ma.gamma()[:, 0])
res_tys, res_phi_hits = compute_fieldlines(
bs, R0, Z0, tmax=2, phis=phis, stopping_criteria=[])
for i in range(len(res_phi_hits[0])):
assert np.linalg.norm(ma.gamma()[i+1, :] - res_phi_hits[0][i, 2:5]) < 2e-3 | [
9,
11326,
-1
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location") | [
708
] |
def METHOD_NAME(name: str) -> bool:
return name.endswith(".tar.gz") or name.endswith(".whl") | [
137,
-1,
894,
754
] |
def METHOD_NAME(self) -> Optional['outputs.ArmIdWrapperResponse']:
"""
PrivateEndpoint of a remote private endpoint connection
"""
return pulumi.get(self, "private_endpoint") | [
547,
841
] |
def METHOD_NAME(repos, sentinel=True):
"""Select only virtual repos.
repos can be either a list of repos, or a repo to descend through.
if sentinel is False, will select all non virtual repos
"""
if not isinstance(repos, (RepositoryGroup, list, tuple)):
repos = get_raw_repos(repos)
return [x for x in repos if isinstance(x, (virtual.tree, SimpleTree)) == sentinel] | [
19,
162,
4822
] |
def METHOD_NAME(con):
table = con.table("array_table")
expr = table.group_by(table.key).aggregate(collected=table.array_int.collect())
result = expr.execute().sort_values("key").reset_index(drop=True)
df = table.compile().toPandas()
expected = (
df.groupby("key")
.array_int.apply(list)
.reset_index()
.rename(columns={"array_int": "collected"})
)
tm.assert_frame_equal(result, expected) | [
9,
877,
1444
] |
def METHOD_NAME(self, y):
y_scale = (self.bounding_box[1][1] - self.bounding_box[0][1]) \
/ (y.max() - y.min() + 1e-10) # to avoid division by zero
y_trafo = y_scale * (y - y.min()) + self.y + self.offset[1]
return y_trafo | [
1053,
320
] |
def METHOD_NAME(data):
_id, rp_id, user_handle, privatekey, sign_count = data
credential = Credential.create_non_resident_credential(_id, rp_id, privatekey, sign_count)
assert credential.id == urlsafe_b64encode(bytearray({1, 2, 3, 4})).decode()
assert credential.private_key == urlsafe_b64encode(privatekey).decode()
assert credential.sign_count == 0
assert credential.rp_id == "localhost"
if credential.is_resident_credential is False:
assert True
else:
assert False
if credential.user_handle is None:
assert True
else:
assert False | [
9,
-1,
1295,
3568
] |
def METHOD_NAME(self, test):
self._add_result(test)
super().METHOD_NAME(test) | [
238,
1434
] |
def METHOD_NAME(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(
prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
os.getpid(), next(_mmap_counter)))
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = bufsize, bufsize
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, bufsize
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
if overlapped[0]:
openmode |= _winapi.FILE_FLAG_OVERLAPPED
if overlapped[1]:
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
else:
flags_and_attribs = 0
h1 = h2 = None
try:
h1 = _winapi.CreateNamedPipe(
address, openmode, _winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
flags_and_attribs, _winapi.NULL)
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
ov.GetOverlappedResult(True)
return h1, h2
except:
if h1 is not None:
_winapi.CloseHandle(h1)
if h2 is not None:
_winapi.CloseHandle(h2)
raise | [
890
] |
def METHOD_NAME(response_body):
# type: (bytes) -> Project
with mock.patch.object(URLFetcher, "get_body_stream", return_value=BytesIO(response_body)):
return Client().request(
Endpoint(
url="https://example.org/simple/",
content_type="application/vnd.pypi.simple.v1+json",
)
) | [
340,
377
] |
def METHOD_NAME():
buf = BytesIO(b"""\ | [
9,
684,
559,
2706
] |
def METHOD_NAME(
tmp_path_factory,
testdata_cbma_full,
estimator,
corrector,
diagnostics,
):
"""Run smoke test for CBMA workflow."""
tmpdir = tmp_path_factory.mktemp("test_cbma_workflow_function_smoke")
dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
if estimator in [ALE, "mkdachi"]:
with pytest.raises(ValueError):
PairwiseCBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
elif estimator == Fishers:
with pytest.raises((AttributeError, ValueError)):
PairwiseCBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
else:
workflow = PairwiseCBMAWorkflow(
estimator=estimator,
corrector=corrector,
diagnostics=diagnostics,
output_dir=tmpdir,
)
cres = workflow.fit(dset1, dset2)
assert isinstance(cres, nimare.results.MetaResult)
assert op.isfile(op.join(tmpdir, "boilerplate.txt"))
assert op.isfile(op.join(tmpdir, "references.bib"))
for imgtype in cres.maps.keys():
filename = f"{imgtype}.nii.gz"
outpath = op.join(tmpdir, filename)
# For MKDAChi2 maps are None
if cres.maps[imgtype] is not None:
assert op.isfile(outpath)
for tabletype in cres.tables.keys():
filename = f"{tabletype}.tsv"
outpath = op.join(tmpdir, filename)
# For MKDAChi2 tables are None
if cres.tables[tabletype] is not None:
assert op.isfile(outpath) | [
9,
6289,
-1,
3855,
3515
] |
def METHOD_NAME(ql: qlast.Base) -> bytes:
raise NotImplementedError(
f'cannot get status for the {type(ql).__name__!r} AST node') | [
19,
452
] |
def METHOD_NAME(self):
# Note: remove this if C git and dulwich implement dumb web shallow
# clones.
raise SkipTest("Dumb web shallow cloning not supported.") | [
9,
5194,
670,
280,
1493,
137,
1741
] |
def METHOD_NAME():
shutil.rmtree(build_dir) | [
1356,
56,
1190
] |
def METHOD_NAME(self) -> 'outputs.ModernizeProjectModelResponseSystemData':
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
import glob, os
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
import fitz
except:
report_execption(chatbot, history,
a = f"解析项目: {txt}",
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 清空历史,以免输入溢出
history = []
# 检测输入参数,如没有给定输入参数,直接退出
if os.path.exists(txt):
project_folder = txt
else:
if txt == "":
txt = '空空如也的输入栏'
report_execption(chatbot, history,
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 搜索需要处理的文件清单
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
# 如果没找到任何文件
if len(file_manifest) == 0:
report_execption(chatbot, history,
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
txt = file_manifest[0]
# 开始正式执行任务
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) | [
-1,
2918,
-1
] |
def METHOD_NAME(opt):
build(opt)
data_path = os.path.join(opt['datapath'], 'Flickr30k', 'dataset.json')
image_path = os.path.join(opt['datapath'], 'Flickr30k', 'flickr30k_images')
return data_path, image_path | [
157
] |
def METHOD_NAME(entry: Dict, similarity_key: str = "distance") -> float:
"""Get converted node similarity from distance."""
distance = entry["_additional"].get(similarity_key, 0.0)
if distance is None:
return 1.0
# convert distance https://forum.weaviate.io/t/distance-vs-certainty-scores/258
return 1.0 - float(distance) | [
19,
1716,
5038
] |
def METHOD_NAME(self):
self.none_removed('\v')
self.none_removed('\n\v')
self.check_result('\v\n', '\v')
self.none_removed(' \n\v')
self.check_result('\v\n ', '\v') | [
9,
1950,
1859
] |
def METHOD_NAME(self, position):
"""
Shows the table's context menu.
Args:
position (QPoint): menu's position on the table
"""
menu = IndexedValueTableContextMenu(self._ui.time_series_table, position)
menu.exec(self._ui.time_series_table.mapToGlobal(position)) | [
697,
410,
198,
2470
] |
def METHOD_NAME(self):
self.cpp_info.set_property("cmake_file_name", "Hexl")
# TODO: Remove in Conan 2.0
self.cpp_info.names["cmake_find_package"] = "Hexl"
self.cpp_info.names["cmake_find_package_multi"] = "Hexl"
if self.settings.build_type == "Debug":
if not is_msvc(self):
self.cpp_info.components["Hexl"].libs = ["hexl_debug"]
else:
self.cpp_info.components["Hexl"].libs = ["hexl"]
self.cpp_info.components["Hexl"].requires.append("easyloggingpp::easyloggingpp")
else:
self.cpp_info.components["Hexl"].libs = ["hexl"]
self.cpp_info.components["Hexl"].names["cmake_find_package"] = "hexl"
self.cpp_info.components["Hexl"].names["cmake_find_package_multi"] = "hexl"
self.cpp_info.components["Hexl"].set_property("cmake_target_name", "Hexl::hexl")
self.cpp_info.components["Hexl"].set_property("pkg_config_name", "hexl")
self.cpp_info.components["Hexl"].requires.append("cpu_features::libcpu_features")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["Hexl"].system_libs = ["pthread", "m"] | [
360,
100
] |
def METHOD_NAME(self, *args):
self.executions.append((self.command, self.launch_order.copy(), args))
return {stack: self.statuses_to_return[stack] for stack in self} | [
750
] |
def METHOD_NAME(filter_gen):
_validator = hasValidFormula()
def _wrapper(*args, **kwargs):
_filter = filter_gen(*args, **kwargs)
def _validating_filter(term):
if not _validator(term):
return False
return _filter(term)
return _validating_filter
return _wrapper | [
2676,
43,
1205,
14598
] |
def METHOD_NAME(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} | [
379,
553
] |
def METHOD_NAME(self):
self.check_args("Belyi/?field=Qsqrt-3", "6T15-4.2_4.2_4.2-a")
self.not_check_args("Belyi/?field=Qsqrt-3", "1T1-1_1_1-a") | [
9,
101,
1070
] |
def METHOD_NAME(imgIn, imgOut):
y1 = dace.define_local([W, H], dtype=datatype)
y2 = dace.define_local([W, H], dtype=datatype)
ym1 = dace.define_local([1], datatype)
ym2 = dace.define_local([1], datatype)
xm1 = dace.define_local([1], datatype)
tm1 = dace.define_local([1], datatype)
yp1 = dace.define_local([1], datatype)
yp2 = dace.define_local([1], datatype)
xp1 = dace.define_local([1], datatype)
xp2 = dace.define_local([1], datatype)
tp1 = dace.define_local([1], datatype)
tp2 = dace.define_local([1], datatype)
for i in range(W):
@dace.tasklet
def reset():
in_ym1 >> ym1
in_ym2 >> ym2
in_xm1 >> xm1
in_ym1 = 0
in_ym2 = 0
in_xm1 = 0
for j in range(H):
@dace.tasklet
def comp_y1():
in_img << imgIn[i, j]
in_xm1 << xm1
in_ym1 << ym1
in_ym2 << ym2
out_y1 >> y1[i, j]
out_xm1 >> xm1
out_ym1 >> ym1
out_ym2 >> ym2
out_y1 = a1 * in_img + a2 * in_xm1 + b1 * in_ym1 + b2 * in_ym2
out_xm1 = in_img
out_ym2 = in_ym1
out_ym1 = out_y1
for i in range(W):
@dace.tasklet
def reset2():
in_yp1 >> yp1
in_yp2 >> yp2
in_xp1 >> xp1
in_xp2 >> xp2
in_yp1 = 0
in_yp2 = 0
in_xp1 = 0
in_xp2 = 0
for j in range(H - 1, -1, -1):
@dace.tasklet
def comp_y2():
in_img << imgIn[i, j]
in_xp1 << xp1
in_xp2 << xp2
in_yp1 << yp1
in_yp2 << yp2
out_y2 >> y2[i, j]
out_xp1 >> xp1
out_xp2 >> xp2
out_yp1 >> yp1
out_yp2 >> yp2
out_y2 = a3 * in_xp1 + a4 * in_xp2 + b1 * in_yp1 + b2 * in_yp2
out_xp2 = in_xp1
out_xp1 = in_img
out_yp2 = in_yp1
out_yp1 = out_y2
@dace.map
def comp_iout(i: _[0:W], j: _[0:H]):
in_y1 << y1[i, j]
in_y2 << y2[i, j]
out_img >> imgOut[i, j]
out_img = c1 * (in_y1 + in_y2)
for j in range(H):
@dace.tasklet
def reset3():
in_ym1 >> ym1
in_ym2 >> ym2
in_tm1 >> tm1
in_ym1 = 0
in_ym2 = 0
in_tm1 = 0
for i in range(W):
@dace.tasklet
def comp_y12():
in_img << imgOut[i, j]
in_tm1 << tm1
in_ym1 << ym1
in_ym2 << ym2
out_y1 >> y1[i, j]
out_tm1 >> tm1
out_ym1 >> ym1
out_ym2 >> ym2
out_y1 = a5 * in_img + a6 * in_tm1 + b1 * in_ym1 + b2 * in_ym2
out_tm1 = in_img
out_ym2 = in_ym1
out_ym1 = out_y1
for j in range(H):
@dace.tasklet
def reset4():
in_yp1 >> yp1
in_yp2 >> yp2
in_tp1 >> tp1
in_tp2 >> tp2
in_yp1 = 0
in_yp2 = 0
in_tp1 = 0
in_tp2 = 0
for i in range(W - 1, -1, -1):
@dace.tasklet
def comp_y22():
in_img << imgOut[i, j]
in_tp1 << tp1
in_tp2 << tp2
in_yp1 << yp1
in_yp2 << yp2
out_y2 >> y2[i, j]
out_tp1 >> tp1
out_tp2 >> tp2
out_yp1 >> yp1
out_yp2 >> yp2
out_y2 = a7 * in_tp1 + a8 * in_tp2 + b1 * in_yp1 + b2 * in_yp2
out_tp2 = in_tp1
out_tp1 = in_img
out_yp2 = in_yp1
out_yp1 = out_y2
@dace.map
def comp_iout2(i: _[0:W], j: _[0:H]):
in_y1 << y1[i, j]
in_y2 << y2[i, j]
out_img >> imgOut[i, j]
out_img = c1 * (in_y1 + in_y2) | [
14394
] |
def METHOD_NAME(self):
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(
self.overlay,
(x, int(y - self.side_img.shape[0])),
self.circle_scale,
self.color_lst[self.bp_cnt],
-1,
)
cv2.putText(
self.overlay,
str(self.bp_cnt + 1),
(x + 4, int(y - self.side_img.shape[0])),
cv2.FONT_HERSHEY_SIMPLEX,
self.font_size,
self.color_lst[self.bp_cnt],
2,
)
self.cord_written = True
for bp_cnt, bp_name in enumerate(self.bp_list):
self.cord_written = False
self.bp_cnt = bp_cnt
self.side_img = np.zeros(
(int(self.img_height / 4), self.img_width, 3), np.uint8
)
cv2.putText(
self.side_img,
"Double left click on body part {}.".format(bp_name),
(10, 50),
self.font,
self.font_size,
self.color_lst[bp_cnt],
2,
)
img_concat = cv2.vconcat([self.side_img, self.overlay])
cv2.namedWindow("Define pose", cv2.WINDOW_NORMAL)
cv2.imshow("Define pose", img_concat)
while not self.cord_written:
cv2.setMouseCallback("Define pose", draw_circle)
img_concat = cv2.vconcat([self.side_img, self.overlay])
cv2.namedWindow("Define pose", cv2.WINDOW_NORMAL)
cv2.imshow("Define pose", img_concat)
cv2.waitKey(1)
cv2.destroyWindow("Define pose")
self.save() | [
1440
] |
def METHOD_NAME(self, particulator):
if len(particulator.products) != 0:
path = (
self.products_file_path
+ "_num"
+ self.add_leading_zeros(particulator.n_steps)
)
self.exported_times["products"][path] = (
particulator.n_steps * particulator.dt
)
if self.verbose:
print("Exporting Products to vtk, path: " + path)
payload = {}
if particulator.mesh.dimension != 2:
raise NotImplementedError(
"Only 2 dimensions data is supported at the moment."
)
data_shape = (particulator.mesh.grid[1], particulator.mesh.grid[0], 1)
for k in particulator.products.keys():
v = particulator.products[k].get()
if isinstance(v, np.ndarray):
if v.shape == particulator.mesh.grid:
payload[k] = v[:, :, np.newaxis]
else:
if self.verbose:
print(
f"{k} shape {v.shape} not equals data shape {data_shape}"
f" and will not be exported",
file=sys.stderr,
)
elif isinstance(v, numbers.Number):
if self.verbose:
print(
f"{k} is a Number and will not be exported", file=sys.stderr
)
else:
if self.verbose:
print(f"{k} export is not possible", file=sys.stderr)
y, x, z = np.mgrid[
: particulator.mesh.grid[0] + 1, : particulator.mesh.grid[1] + 1, :1
]
y = y * particulator.mesh.size[0] / particulator.mesh.grid[0]
x = x * particulator.mesh.size[1] / particulator.mesh.grid[1]
z = z * 1.0
gridToVTK(path, x, y, z, cellData=payload)
else:
if self.verbose:
print("No products to export") | [
294,
4866
] |
def METHOD_NAME(params: List[torch.Tensor], arg: Any) -> Any:
ColoParamOpHookManager._trigger_post_forward(params)
return PostFwdPreBwd.apply(params, arg) | [
72,
441
] |
def METHOD_NAME(self) -> Sequence['outputs.ResourceReferenceResponse']:
"""
Collector Policies for Azure Traffic Collector.
"""
return pulumi.get(self, "collector_policies") | [
4523,
4152
] |
def METHOD_NAME(self, state: SDFGState, sdfg: SDFG):
map_entry = self.map_entry
current_map = map_entry.map
# Expand the innermost map if multidimensional
if len(current_map.params) > 1:
ext, rem = dace.transformation.helpers.extract_map_dims(sdfg, map_entry,
list(range(len(current_map.params) - 1)))
map_entry = rem
current_map = map_entry.map
subgraph = state.scope_subgraph(map_entry)
# Set the schedule
current_map.schedule = dace.dtypes.ScheduleType.SVE_Map
# Infer all connector types and apply them
inferred = infer_types.infer_connector_types(sdfg, state, subgraph)
infer_types.apply_connector_types(inferred)
# Infer vector connectors and AccessNodes and apply them
vector_inference.infer_vectors(sdfg,
state,
map_entry,
self.vec_len,
flags=vector_inference.VectorInferenceFlags.Allow_Stride,
METHOD_NAME=True) | [
231
] |
def METHOD_NAME(json_result, observable):
links = set()
for page in json_result:
if not page.get("page"):
continue
# IP iocs has more data than the rest
if not isinstance(observable, Ip) and page["page"].get("ip"):
try:
ip = page["page"]["ip"]
new_ip = Ip.get_or_create(value=ip)
new_ip.add_context({"source": "UrlScanIo"})
links.update(
new_ip.active_link_to(observable, "ip", "UrlScanIo Query")
)
except ObservableValidationError:
logging.error("This ip address is not valid %s" % ip)
if not isinstance(observable, Hostname) and page["page"].get("domain"):
try:
hostname = page["page"]["domain"]
new_host = Hostname.get_or_create(value=hostname)
new_host.add_context({"source": "UrlScanIo"})
links.update(
new_host.active_link_to(
observable, "hostname", "UrlScanIo Query"
)
)
except ObservableValidationError:
logging.error("This hostname not valid: %s" % hostname)
if not isinstance(observable, Url) and page["page"].get("url"):
try:
url = page["page"]["url"]
new_url = Url.get_or_create(value=url)
new_url.add_context({"source": "UrlScanIo"})
links.update(
new_url.active_link_to(observable, "url", "UrlScanIo Query")
)
except ObservableValidationError:
logging.error("This url is not valid %s" % url)
links.update(UrlScanIoApi._process_asn_data(page, observable)) | [
356,
365
] |
def METHOD_NAME(path):
'''Checks if file is in json format and returns data in dictionary form'''
with open(path) as f:
dashboards_dict = validate_json(f)
if not dashboards_dict:
raise JsonFormattingError("{} content could not be loaded".format(path))
return dashboards_dict | [
19,
734,
4774,
763
] |
def METHOD_NAME(name="MultiTaskProduction"):
workload = newWorkload("MultiTaskProduction")
workload.setOwner("DMWMTest")
workload.setStartPolicy('MonteCarlo')
workload.setEndPolicy('SingleShot')
# //
# // set up the production task
#//
production = workload.newTask("Production1")
production.addProduction(totalevents = 1000)
prodCmssw = production.makeStep("cmsRun1")
prodCmssw.setStepType("CMSSW")
prodStageOut = prodCmssw.addStep("stageOut1")
prodStageOut.setStepType("StageOut")
production.applyTemplates()
production.setSiteWhitelist(["T2_XX_SiteA"])
# //
# // set up the merge task
#//
merge = production.addTask("Merge")
mergeCmssw = merge.makeStep("cmsRun1")
mergeCmssw.setStepType("CMSSW")
mergeStageOut = mergeCmssw.addStep("stageOut1")
mergeStageOut.setStepType("StageOut")
merge.applyTemplates()
# //
# // populate the details of the production tasks
#//
# //
# // production cmssw step
#//
#
# TODO: Anywhere helper.data is accessed means we need a method added to the
# type based helper class to provide a clear API.
prodCmsswHelper = prodCmssw.getTypeHelper()
prodCmsswHelper.data.application.setup.cmsswVersion = "CMSSW_X_Y_Z"
prodCmsswHelper.data.application.setup.softwareEnvironment = " . /uscmst1/prod/sw/cms/bashrc prod"
prodCmsswHelper.data.application.configuration.configCacheUrl = "http://whatever"
prodCmsswHelper.addOutputModule("writeData", primaryDataset = "Primary",
processedDataset = "Processed",
dataTier = "TIER")
#print prodCmsswHelper.data
# //
# // production stage out step
#//
prodStageOutHelper = prodStageOut.getTypeHelper()
# //
# // merge cmssw step
#//
# point it at the input step from the previous task
merge.setInputReference(prodCmssw, outputModule = "writeData")
# //
# // populate the details of the merge tasks
#//
# print workload.data
production = workload.newTask("Production2")
production.addProduction(totalevents = 2000)
prodCmssw = production.makeStep("cmsRun1")
prodCmssw.setStepType("CMSSW")
prodStageOut = prodCmssw.addStep("stageOut1")
prodStageOut.setStepType("StageOut")
production.applyTemplates()
production.setSiteWhitelist(["T2_XX_SiteA"])
# //
# // set up the merge task
#//
merge = production.addTask("Merge")
mergeCmssw = merge.makeStep("cmsRun1")
mergeCmssw.setStepType("CMSSW")
mergeStageOut = mergeCmssw.addStep("stageOut1")
mergeStageOut.setStepType("StageOut")
merge.applyTemplates()
# //
# // populate the details of the production tasks
#//
# //
# // production cmssw step
#//
#
# TODO: Anywhere helper.data is accessed means we need a method added to the
# type based helper class to provide a clear API.
prodCmsswHelper = prodCmssw.getTypeHelper()
prodCmsswHelper.data.application.setup.cmsswVersion = "CMSSW_X_Y_Z"
prodCmsswHelper.data.application.setup.softwareEnvironment = " . /uscmst1/prod/sw/cms/bashrc prod"
prodCmsswHelper.data.application.configuration.configCacheUrl = "http://whatever"
prodCmsswHelper.addOutputModule("writeData", primaryDataset = "Primary",
processedDataset = "Processed",
dataTier = "TIER")
#print prodCmsswHelper.data
# //
# // production stage out step
#//
prodStageOutHelper = prodStageOut.getTypeHelper()
# //
# // merge cmssw step
#//
# point it at the input step from the previous task
merge.setInputReference(prodCmssw, outputModule = "writeData")
return workload | [
129,
2454
] |
def METHOD_NAME(self, fsm):
if fsm.getDebugFlag() == True:
fsm.getDebugStream().write('TRANSITION : Default\n')
msg = "\n\tState: %s\n\tTransition: %s" % (
fsm.getState().getName(), fsm.getTransition())
raise statemap.TransitionUndefinedException, msg | [
235
] |
def METHOD_NAME(tmp_path: Path):
with Cache(tmp_path) as cache:
hello = cache.directory / "hello.txt"
hello.write_text("world")
assert (tmp_path / "hello.txt").is_file() | [
9,
596,
2851
] |
def METHOD_NAME(self, item):
return not item.passed or self._warning_or_error(item) | [
1423,
894,
3437,
894,
168
] |
def METHOD_NAME(self, event) -> None:
self.set_debug_mode(False) | [
276,
538
] |
def METHOD_NAME(args):
assert len(args) in [0,1]
if (len(args) == 0):
n_trials = 10
else:
n_trials = int(args[0])
print("n_trials:", n_trials)
assert n_trials >= 0
def exercise_array_alignment(f):
for n in range(2,6):
exercise_given_members_size(array_alignment=f, n=n, n_trials=n_trials)
exercise_exceptions(array_alignment=f)
exercise_array_alignment(fable.equivalence.array_alignment)
if (fable.ext is not None):
exercise_array_alignment(fem_array_alignment)
exercise_cluster_unions()
print("OK") | [
22
] |
def METHOD_NAME(request):
mqtt = etree.Element("net.xmeter.samplers.ConnectSampler",
guiclass="net.xmeter.gui.ConnectSamplerUI",
testclass="net.xmeter.samplers.ConnectSampler",
testname=request.label)
mqtt.append(JMX._string_prop("mqtt.server", request.config.get("addr", "127.0.0.1")))
mqtt.append(JMX._string_prop("mqtt.port", "1883"))
mqtt.append(JMX._string_prop("mqtt.version", "3.1"))
mqtt.append(JMX._string_prop("mqtt.conn_timeout", "10"))
mqtt.append(JMX._string_prop("mqtt.protocol", "TCP"))
mqtt.append(JMX._string_prop("mqtt.ws_path", ""))
mqtt.append(JMX._bool_prop("mqtt.dual_ssl_authentication", False))
mqtt.append(JMX._string_prop("mqtt.clientcert_file_path", ""))
mqtt.append(JMX._string_prop("mqtt.clientcert_password", ""))
mqtt.append(JMX._string_prop("mqtt.client_id_prefix", "conn_"))
mqtt.append(JMX._bool_prop("mqtt.client_id_suffix", True))
mqtt.append(JMX._string_prop("mqtt.conn_keep_alive", "300"))
mqtt.append(JMX._string_prop("mqtt.conn_attampt_max", "0"))
mqtt.append(JMX._string_prop("mqtt.reconn_attampt_max", "0"))
mqtt.append(JMX._string_prop("mqtt.conn_clean_session", "true"))
return mqtt | [
19,
707,
8755
] |
def METHOD_NAME(s):
"""The custom error message only happens when the test has already failed.""" | [
9,
1441,
972,
1186,
5133
] |
def METHOD_NAME():
"""Test dumping of a configuration with root settings."""
conf = metomi.rose.config.ConfigNode({}, comments=["hello"])
conf.set(["foo"], "foo", comments=["foo foo", "foo foo"])
conf.set(["bar"], "bar")
conf.set(["baz"], {})
conf.set(["baz", "egg"], "egg")
conf.set(["baz", "ham"], "ham")
dumper = metomi.rose.config.ConfigDumper()
target = StringIO()
dumper.dump(conf, target)
assert (
target.getvalue()
== """#hello | [
9,
278,
1563
] |
def METHOD_NAME(self):
"""
Test filtering of a set(run) object
"""
mask = Mask()
mask.addRunWithLumiRanges(run=1, lumiList=[[1, 9], [12, 12], [31, 31], [38, 39], [49, 49]])
runs = set()
runs.add(Run(1, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 0)
runs = set()
runs.add(Run(1, 2, 148, 166, 185, 195, 203, 212))
runs.add(Run(2, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 1)
runs = set()
runs.add(Run(1, 2, 9, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 1)
run = newRuns.pop()
self.assertEqual(run.run, 1)
self.assertEqual(run.lumis, [2, 9]) | [
9,
527
] |
def METHOD_NAME(self):
with self.assertRaisesRegex(SyntaxError, "cannot assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegex(SyntaxError, "illegal expression"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec") | [
9,
8163,
776
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.