text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(args: argparse.Namespace) -> None:
"""Create vimiv cache, config and data directories.
The directories are either the directories defined in the freedesktop
standard or located in a temporary base directory.
Args:
args: Arguments returned from parser.parse_args().
"""
if args.temp_basedir:
global _tmpdir
# We want the temporary directory to stick around until the end
# pylint: disable=consider-using-with
_tmpdir = tempfile.TemporaryDirectory(prefix="vimiv-tempdir-")
args.basedir = _tmpdir.name
if args.basedir is not None:
xdg.basedir = args.basedir
xdg.makedirs(xdg.vimiv_cache_dir(), xdg.vimiv_config_dir(), xdg.vimiv_data_dir()) | [
176,
6031
] |
def METHOD_NAME(cls, args):
config = paddle.inference.Config(args.model_path + ".pdmodel", args.model_path + ".pdiparams")
if args.device == "gpu":
# set GPU configs accordingly
config.enable_use_gpu(100, 0)
elif args.device == "cpu":
# set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
elif args.device == "xpu":
# set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
predictor = paddle.inference.METHOD_NAME(config)
input_handles = [predictor.get_input_handle(name) for name in predictor.get_input_names()]
output_handles = [predictor.get_output_handle(name) for name in predictor.get_output_names()]
return cls(predictor, input_handles, output_handles) | [
129,
4476
] |
def METHOD_NAME(self):
self._ensureNamesStartUppercase = True
return self | [
602,
83,
447,
8391
] |
async def METHOD_NAME(
self,
resource_group_name: str,
automation_account_name: str,
count_type: Union[str, _models.CountType],
**kwargs: Any
) -> _models.NodeCounts:
"""Retrieve counts for Dsc Nodes.
:param resource_group_name: Name of an Azure Resource group. Required.
:type resource_group_name: str
:param automation_account_name: The name of the automation account. Required.
:type automation_account_name: str
:param count_type: The type of counts to retrieve. Known values are: "status" and
"nodeconfiguration". Required.
:type count_type: str or ~azure.mgmt.automation.models.CountType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NodeCounts or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.NodeCounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-01-13-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-01-13-preview")
)
cls: ClsType[_models.NodeCounts] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
count_type=count_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NodeCounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | [
19
] |
async def METHOD_NAME(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = await self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader,
loop=self.loop)
trans, proto = await self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = await r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done' | [
9,
890
] |
def METHOD_NAME(test, params, env):
"""
Test virsh nodedev-detach and virsh nodedev-reattach
Step1.Init variables for test.
Step2.Check variables.
Step3.Do nodedev_detach_reattach.
"""
def get_driver_readlink(device_address):
"""
Readlink the driver of device.
"""
nodedevxml = nodedev_xml.NodedevXML.new_from_dumpxml(device_address)
driver_path = ('%s/driver') % (nodedevxml.get_sysfs_path())
try:
driver = os.readlink(driver_path)
except (OSError, UnicodeError):
return None
return driver
def get_device_driver(device_address):
"""
Get the driver of device.
:param device_address: The address of device, such as pci_0000_19_00_0
:return: The driver of device, such as ixgbe, igb
"""
driver_strings = get_driver_readlink(device_address).strip().split('/')
driver = driver_strings[-1]
return driver
def detach_reattach_nodedev(device_address, params, options=""):
"""
Do the detach and reattach.
Step1.Do detach.
Step2.Check the result of detach.
Step3.Do reattach.
Step4.Check the result of reattach
"""
# Libvirt acl polkit related params
uri = params.get("virsh_uri")
# Nodedev-detach/reattach are special, the connect driver is still qemu
# with split daemon, and the connect_driver in polkit rule
# should be 'QEMU' for detach, 'nodedev' for read. update the polkit
# rule to include both QEMU and nodedev in such situation.
set_polkit = 'yes' == params.get('setup_libvirt_polkit', 'no')
if utils_split_daemons.is_modular_daemon() and set_polkit:
rule_path = '/etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules'
cmd = '''sed -i "s/'nodedev'/'nodedev'||'QEMU'/g" %s''' % rule_path
process.METHOD_NAME(cmd)
process.METHOD_NAME('cat /etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules')
unprivileged_user = params.get('unprivileged_user')
readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
# Do the detach
logging.debug('Node device name is %s.', device_address)
CmdResult = virsh.nodedev_detach(device_address, options,
unprivileged_user=unprivileged_user,
uri=uri, readonly=readonly, debug=True)
# Check the exit_status.
libvirt.check_exit_status(CmdResult)
# Check the driver.
driver = get_driver_readlink(device_address)
logging.debug('Driver after detach is %s.', driver)
if libvirt_version.version_compare(1, 1, 1):
device_driver_name = 'vfio-pci'
else:
device_driver_name = 'pci-stub'
if (driver is None) or (not driver.endswith(device_driver_name)):
test.fail("Driver for %s is not %s "
"after nodedev-detach" % (device_address, device_driver_name))
# Do the reattach.
CmdResult = virsh.nodedev_reattach(device_address, options)
# Check the exit_status.
libvirt.check_exit_status(CmdResult)
# Check the driver.
driver = get_driver_readlink(device_address)
if libvirt_version.version_compare(1, 1, 1):
device_driver_name = 'vfio-pci'
else:
device_driver_name = 'pci-stub'
if driver and driver.endswith(device_driver_name):
test.fail("Driver for %s is not %s "
"after nodedev-detach" % (device_address, device_driver_name))
def pci_device_address():
"""
Get the address of network device which is safe to detach
"""
# find a physical interface to be detached by nodedev-detach
cmd = "ip l | grep NO-CARRIER"
out = process.METHOD_NAME(cmd, shell=True).stdout_text.strip().splitlines()
net_name = None
for link in out:
if "lo" not in link and "virbr0" not in link:
logging.debug(link)
net_name = link.split(":")[1].strip()
logging.debug("The interface to be detached is %s", net_name)
break
if not net_name:
test.cancel("There is no available network device to detach!")
# get the pci address of the interface
net_list = virsh.nodedev_list(tree='', cap='net')
net_lists = net_list.stdout.strip().splitlines()
net_name_string = '_' + net_name + '_'
for net_name_ in net_lists:
if net_name_string in net_name_:
eth_detach = net_name_
break
eth_addr = nodedev_xml.NodedevXML.new_from_dumpxml(eth_detach).parent
return eth_addr
def check_kernel_option():
"""
Check the kernel option if the kernel cmdline include "iommu=on" option
"""
check_cmd = "egrep '(intel|amd)_iommu=on' /proc/cmdline"
try:
check_result = process.METHOD_NAME(check_cmd, shell=True)
except Exception:
test.cancel("Operation not supported: neither VFIO nor KVM device assignment"
"is currently supported on this system")
else:
logging.debug('IOMMU is enabled')
#Check kernel iommu option
check_kernel_option()
# Init variables
device_address = params.get('nodedev_device', 'ENTER.YOUR.PCI.DEVICE.TO.DETACH')
if device_address.find('ENTER.YOUR.PCI.DEVICE.TO.DETACH') != -1:
replace_address = pci_device_address()
if replace_address:
device_address = device_address.replace('ENTER.YOUR.PCI.DEVICE.TO.DETACH', replace_address)
else:
test.cancel('Param device_address is not configured.')
device_opt = params.get('nodedev_device_opt', '')
status_error = ('yes' == params.get('status_error', 'no'))
with_driver = params.get('with_driver', 'yes') == 'yes'
# check variables.
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
test.cancel("API acl test not supported in current"
" libvirt version.")
# check the device driver and delete the driver
if not with_driver:
device_driver = get_device_driver(device_address)
remove_cmd = "modprobe -r %s" % device_driver
remove_opt = process.system(remove_cmd, shell=True)
if remove_opt != 0:
test.fail("Fail to remove the device driver : %s" % device_driver)
# Do nodedev_detach_reattach
try:
detach_reattach_nodedev(device_address, params, device_opt)
except exceptions.TestFail as e:
# Do nodedev detach and reattach failed.
if status_error:
return
else:
test.fail("Test failed in positive case."
"error: %s" % e)
# Do nodedev detach and reattach success.
if status_error:
test.fail('Test succeeded in negative case.')
# reload the device driver
if not with_driver:
reload_cmd = "modprobe %s" % device_driver
reload_opt = process.system(reload_cmd, shell=True)
if reload_opt != 0:
test.fail("Fail to reload the device driver : %s" % device_driver) | [
22
] |
def METHOD_NAME(tmpdir):
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when saving/loading."""
checkpoint_plugin = CustomCheckpointIO()
checkpoint_plugin = MagicMock(wraps=checkpoint_plugin, spec=CustomCheckpointIO)
ck = ModelCheckpoint(dirpath=tmpdir, save_last=True)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=SingleDeviceStrategy("cpu", checkpoint_io=checkpoint_plugin),
callbacks=ck,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=0,
limit_test_batches=1,
)
trainer.fit(model)
ckpt_files = {fn.name for fn in Path(tmpdir).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt"}
assert trainer.checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=2.ckpt"
assert trainer.checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1
trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_with(tmpdir / "last.ckpt")
checkpoint_plugin.reset_mock()
ck = ModelCheckpoint(dirpath=tmpdir, save_last=True)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=SingleDeviceStrategy("cpu"),
plugins=[checkpoint_plugin],
callbacks=ck,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=0,
limit_test_batches=1,
)
trainer.fit(model)
ckpt_files = {fn.name for fn in Path(tmpdir).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt", "epoch=1-step=2-v1.ckpt", "last-v1.ckpt"}
assert trainer.checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=2-v1.ckpt"
assert trainer.checkpoint_callback.last_model_path == tmpdir / "last-v1.ckpt"
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1
trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_once()
checkpoint_plugin.load_checkpoint.assert_called_with(tmpdir / "last-v1.ckpt") | [
9,
1830,
2793,
259
] |
def METHOD_NAME(value: int) -> int: ... | [
0,
679,
168
] |
def METHOD_NAME(module: None | str | types.ModuleType, name: str) -> str: ... | [
11779
] |
def METHOD_NAME(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:
return is_node_with_fused_bias(node, nncf_graph) | [
137,
1716,
41,
1173
] |
def METHOD_NAME(cls, routingKey):
def ifNone(v, default):
return default if v is None else v
# replace None values by "" in routing key
routingKey = [ifNone(key, "") for key in routingKey]
# then join them with "dot", and add the prefix
return cls.NAMESPACE + "." + ".".join(routingKey) | [
277,
39
] |
def METHOD_NAME(
premium_data_fixture, alternative_per_workspace_license_service
):
user = premium_data_fixture.create_user(
first_name="Test User", has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
handler = ViewHandler()
alternative_per_workspace_license_service.restrict_user_premium_to(
user, [table.database.workspace.id]
)
handler.create_view(
user=user, table=table, type_name="form", name="Form", mode="survey"
) | [
9,
129,
1281,
1029,
41,
12208,
2130
] |
def METHOD_NAME(projects: Sequence[Project]) -> None:
num_clustered = 0
try:
for project in projects:
with sentry_sdk.start_span(op="span_descs-cluster") as span:
span.set_data("project_id", project.id)
descriptions = list(redis.get_span_descriptions(project))
new_rules = []
if len(descriptions) >= MERGE_THRESHOLD:
clusterer = TreeClusterer(merge_threshold=MERGE_THRESHOLD)
clusterer.add_input(descriptions)
new_rules = clusterer.get_rules()
# Span description rules must match a prefix in the string
# (HTTP verb, domain...), but we only feed the URL path to
# the clusterer to avoid scrubbing other tokens. The prefix
# `**` in the glob ensures we match the prefix but we don't
# scrub it.
new_rules = [ReplacementRule(f"**{r}") for r in new_rules]
track_clusterer_run(ClustererNamespace.SPANS, project)
# The Redis store may have more up-to-date last_seen values,
# so we must update the stores to bring these values to
# project options, even if there aren't any new rules.
num_rules_added = rules.update_rules(ClustererNamespace.SPANS, project, new_rules)
# Track a global counter of new rules:
metrics.incr("span_descs.new_rules_discovered", num_rules_added, sample_rate=1.0)
# Clear transaction names to prevent the set from picking up
# noise over a long time range.
redis.clear_samples(ClustererNamespace.SPANS, project)
num_clustered += 1
finally:
unclustered = len(projects) - num_clustered
if unclustered > 0:
metrics.incr(
"span_descs.cluster_projects.unclustered", amount=unclustered, sample_rate=1.0
) | [
2059,
2847,
1244,
18082
] |
async def METHOD_NAME(tracer, test_spans):
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
pin = Pin.get_from(r)
assert pin is not None
pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(r)
await r.get("cheese")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == "redis"
assert test_spans.spans[0].get_tag("component") == "aredis"
assert test_spans.spans[0].get_tag("span.kind") == "client"
assert test_spans.spans[0].get_tag("db.system") == "redis"
assert "cheese" in test_spans.spans[0].get_tags() and test_spans.spans[0].get_tag("cheese") == "camembert" | [
9,
1094,
345
] |
def METHOD_NAME(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['inside'] = df.geo.inside_polygon(df.x, df.y, px, py)
assert df.inside.values.tolist() == [False, True, False] | [
9,
3005,
1117,
97
] |
def METHOD_NAME(self, path, binary):
tmp_path = path + ".__tmp__"
self.call_method("save_checkpoint", path=tmp_path, mode=int(binary))
os.rename(tmp_path, path) | [
73,
1830
] |
def METHOD_NAME(iris_split_dataset) -> Tuple[Dataset, Dataset, RandomForestClassifier]:
"""Return Iris train and val datasets and trained RF model."""
train_ds, test_ds = iris_split_dataset
clf = RandomForestClassifier(random_state=0, n_estimators=10, max_depth=2)
clf.fit(train_ds.features_columns, train_ds.label_col)
return train_ds, test_ds, clf | [
8245,
265,
126,
61,
578,
5207
] |
def METHOD_NAME(j, n):
"""
Computes the flip set of the j-th orbital in n modes.
Args:
j (int) : the orbital index
n (int) : the total number of modes
Returns:
numpy.ndarray: Array of mode indices
"""
indices = np.array([])
if n % 2 != 0:
return indices
if j < n / 2:
indices = np.append(indices, METHOD_NAME(j, n / 2))
elif n / 2 <= j < n - 1:
indices = np.append(indices, METHOD_NAME(j - n / 2, n / 2) + n / 2)
else:
indices = np.append(
np.append(indices, METHOD_NAME(j - n / 2, n / 2) + n / 2), n / 2 - 1
)
return indices | [
4864,
0
] |
def METHOD_NAME(topology_st, request):
"""
Enable USN plug-in
Enable MEMBEROF plugin
Add test entries
"""
inst = topology_st.standalone
log.info("Enable the USN plugin...")
plugin = USNPlugin(inst)
plugin.enable()
log.info("Enable the MEMBEROF plugin...")
plugin = MemberOfPlugin(inst)
plugin.enable()
inst.restart()
users_list = []
log.info("Adding test entries...")
users = UserAccounts(inst, DEFAULT_SUFFIX)
for id in range(USER_NUM):
user = users.create_test_user(uid=id)
users_list.append(user)
groups_list = []
log.info("Adding test groups...")
groups = Groups(inst, DEFAULT_SUFFIX)
for id in range(GROUP_NUM):
group = groups.create(properties={'cn': f'test_group{id}'})
groups_list.append(group)
def fin():
for user in users_list:
try:
user.delete()
except ldap.NO_SUCH_OBJECT:
pass
for group in groups_list:
try:
group.delete()
except ldap.NO_SUCH_OBJECT:
pass
request.addfinalizer(fin)
return {"users": users_list,
"groups": groups_list} | [
102
] |
def METHOD_NAME(dskey, export_types):
"""
Extract the first pair (dskey, exptype) found in export
"""
for exptype in export_types:
if (dskey, exptype) in export:
return (dskey, exptype) | [
19,
-1
] |
ef METHOD_NAME(self):
"Test if there is an item in the cluster that has a different cardinality"
data = [ (1,5), (2,5), (2,6), (3,4), (3,5), (3,6,7), (7,3), (8,1), (8,2), (8), (9,2), (9,3) ]
self.assertRaises(ValueError, KMeansClustering, data) | [
9,
4431,
799
] |
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True) | [
1458
] |
def METHOD_NAME(
flow_definition_path: str, extra_context: Optional[Dict] = None
) -> Tuple[Dict[str, Question], str]:
previous_question: Optional[Question] = None
first_question_key: str = ""
questions: Dict[str, Question] = {}
questions_definition = InteractiveFlowCreator._parse_questions_definition(flow_definition_path, extra_context)
try:
for question in questions_definition.get("questions", []):
q = QuestionFactory.create_question_from_json(question)
if not first_question_key:
first_question_key = q.key
elif previous_question and not previous_question.default_next_question_key:
previous_question.set_default_next_question_key(q.key)
questions[q.key] = q
previous_question = q
return questions, first_question_key
except (KeyError, ValueError, AttributeError, TypeError) as ex:
raise QuestionsFailedParsingException(f"Failed to parse questions: {str(ex)}") from ex | [
557,
6756
] |
def METHOD_NAME(iterator):
seen = set()
for model in iterator:
if model.pk not in seen:
seen.add(model.pk)
yield model | [
2768
] |
def METHOD_NAME(self, indices):
"""Highlight the single paren that matches"""
self.text.tag_add("paren", indices[0])
self.text.tag_config("paren", self.HILITE_CONFIG) | [
129,
82,
235
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Gets a list of ElasticSan operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.elasticsan.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME(self) -> None:
model = beam_job_services.create_beam_job_run_model('FooJob')
model.put()
run = beam_job_domain.BeamJobRun(
model.id, 'FooJob', 'CANCELLING',
datetime.datetime.utcnow(), datetime.datetime.utcnow(), False)
swap_cancel_beam_job = self.swap_to_always_return(
beam_job_services, 'cancel_beam_job', value=run)
with swap_cancel_beam_job:
response = self.delete_json('/beam_job_run', {'job_id': model.id})
self.assertEqual(response, run.to_dict()) | [
9,
34,
10472,
202
] |
async def METHOD_NAME(self, user_input=None):
return await self.async_step_user(user_input) | [
958,
367,
176
] |
METHOD_NAME(self, id, source, target, directed=True, **attributes): | [
238,
540
] |
def METHOD_NAME(adata):
"""Perform geometric normalization on CITEseq data.
Add description of why geometric normalization
parameters
----------
adata: :class:`~anndata.AnnData`
The annotated data matrix.
returns
-------
AnnData
updates adata.X to contain the geometrically normalized values.
Example
-------
NEED TO IMPLEMENT AN EXAMPLE
"""
X = adata.X
X = np.nan_to_num(X)
# if the matrix is sparse make to array
if type(X) == csr_matrix:
X = X.todense()
# need to add a catch for newly encountered datatype
elif type(X) == SparseCSRView:
X = X.todense()
# ensure that X is an array otherwise this will cause type issue with multiplicative replacement function
X = np.array(X)
# replacement of zero values with very small numbers without changing the overall sums
X = multiplicative_replacement(X)
# centre log ratio transformation
X = clr(X)
adata.X = X
return None # adata object is automatically updated | [
1137,
5957
] |
def METHOD_NAME(self, size="unsigned", oldFirstGhostNode="unsigned"):
"Set the number of internal elements"
return "void" | [
1128,
101,
2026
] |
def METHOD_NAME(self):
msg = "cannot retrieve Fortran compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes["compilers"].get("fortran", None)
return str(self.spec.prefix.bin.nagfor) | [
7125
] |
async def METHOD_NAME(tracer, test_spans, traced_yaaredis):
service = str(uuid.uuid4())
Pin.override(traced_yaaredis, service=service, tracer=tracer)
await traced_yaaredis.set("cheese", "1")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == service | [
9,
549,
156
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(self):
return "natsbenchsize" | [
19,
44
] |
def METHOD_NAME():
my_cache = cache.LRUCache[int, Union[int, str]](
max_cache_items=5,
expiration_horizon=datetime.timedelta(hours=1),
retrieval_function=retrieval_function,
)
for i in range(5):
my_cache.put(i)
assert i in my_cache
my_cache.clear()
assert len(my_cache) == 0
with pytest.raises(KeyError):
my_cache.get(1, False) | [
9,
596,
537,
61,
1992
] |
def METHOD_NAME(self, line: str) -> Optional[PostfixLogLine]:
"""Parse a single postfix logline
Lines consist of date, host and process info, optional queue id followed by information on the logged event
Lines should look like:
```
Jan 1 01:02:03 mail postfix/something[1234566]: ABCDEF1234: field=value
```
Args:
line (str): The log line to be parsed
Returns:
Optional[PostfixLogLine]: If the logline was parsed successfullt, a PostfixLogLine namedtuple is returned. If it could not be parsed, None is returned
"""
match = re.match(self.LINE_RE, line)
if not match:
return
line_data = match.groupdict()
# We only care for postfix lines
if "postfix" not in line_data["process"]:
return None
data = self._parse_fields(line_data["fields"].split(","), self.relevant_fields)
return PostfixLogLine(line_data["timestamp"], line_data["queue_id"], data) | [
214,
534
] |
def METHOD_NAME():
@T.prim_func
# Buffer A should be remapped
def elementwise(A: T.Buffer((128, 128), "float32")):
# Buffer B should be remapped
B = T.alloc_buffer((128, 128), "float32")
# i, j should be remapped
for i, j in T.grid(128, 128):
with T.block("B"):
# vi, vj should be remapped
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
f1 = elementwise
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
# check root block
_check_block_signature_remap(f1.body.block, f2.body.block)
# check remap of i
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
# check remap of j
assert f1.body.block.body.body.loop_var != f2.body.block.body.body.loop_var
# check inner block
def _get_block(f):
return f.body.block.body.body.body.block
_check_block_signature_remap(_get_block(f1), _get_block(f2)) | [
9,
53
] |
def METHOD_NAME(template, args=None):
"""URI template converter"""
if not args:
args = []
paths = template % dict([p, "{%s}" % p] for p in args)
return "%s/%s" % (prefix, paths) | [
197
] |
def METHOD_NAME(self):
self.round_trip(
b"\x82\x84\x53\xcd\xe2\x89\x16\xaa\x85\xfa",
Frame(True, OP_BINARY, b"Eggs"),
mask=True,
) | [
9,
808,
2062
] |
def METHOD_NAME(self, x):
h = self.encoder(x)
return self.enc_mu(h), self.enc_logvar(h) | [
421
] |
def METHOD_NAME(database, ethereum_inquirer, ethereum_accounts):
"""Test a simple beacon chain deposit contract"""
dbeth2 = DBEth2(database)
validator = Eth2Validator(index=507258, public_key=Eth2PubKey('0xa685b19738ac8d7ee301f434f77fdbca50f7a2b8d287f4ab6f75cae251aa821576262b79ae9d58d9b458ba748968dfda'), ownership_proportion=ONE) # noqa: E501
with database.user_write() as write_cursor:
dbeth2.add_validators( # add validator in DB so decoder can map pubkey -> index
write_cursor,
validators=[validator],
)
evmhash = deserialize_evm_tx_hash('0xb3658f940cab23f95273bb7c199eec5c71424a8bf2f111201f5cc2a1491d3471') # noqa: E501
user_address = ethereum_accounts[0]
events, _ = get_decoded_events_of_transaction(
evm_inquirer=ethereum_inquirer,
database=database,
tx_hash=evmhash,
)
assert events == [
EvmEvent(
tx_hash=evmhash,
sequence_index=0,
timestamp=TimestampMS(1674558203000),
location=Location.ETHEREUM,
event_type=HistoryEventType.SPEND,
event_subtype=HistoryEventSubType.FEE,
asset=A_ETH,
balance=Balance(amount=FVal('0.000788637337054068')),
location_label=user_address,
notes='Burned 0.000788637337054068 ETH for gas',
counterparty=CPT_GAS,
), EthDepositEvent(
tx_hash=evmhash,
validator_index=validator.index,
sequence_index=2,
timestamp=TimestampMS(1674558203000),
balance=Balance(amount=FVal('32')),
depositor=user_address,
),
] | [
9,
4634
] |
def METHOD_NAME(self):
'''
Generate dataset metadata
'''
# read labels list
labels = self._read_labels(get_path(self.data_dir, is_directory=True) / 'labels.txt')
# create label map
label_map = dict(enumerate(labels))
return {'label_map': label_map} | [
19,
1094
] |
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2015-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ResourceHealth/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) | [
56,
245,
377
] |
def METHOD_NAME(self, workerName, errorMessage):
self.updateErrorWorker.execute(self.componentName, workerName, errorMessage,
conn=self.getDBConn(),
transaction=self.existingTransaction()) | [
86,
1794,
168
] |
def METHOD_NAME(key, value):
field = {'key': key}
if isinstance(value, dict) and list(value.keys()) == ['ref']:
field['refValue'] = value['ref']
else:
field['stringValue'] = value
return field | [
197,
97,
101
] |
def METHOD_NAME():
"""Test that the usage of use_legacy_supplementaries is deprecated."""
msg = "use_legacy_supplementaries"
session = CFG.start_session('my_session')
with pytest.warns(ESMValCoreDeprecationWarning, match=msg):
session.update({'use_legacy_supplementaries': False})
assert session['use_legacy_supplementaries'] is False | [
9,
1080,
3116,
12224,
3527,
240,
86
] |
def METHOD_NAME(self):
"""
Call this method after enter map.
"""
self._hp = {}
self._hp_has_ship = {} | [
1754,
656
] |
def METHOD_NAME(
self,
bk_user,
project_id,
cluster_id,
config_id,
log_collect_result,
log_collect_meta_data,
allcontainers_update_params,
):
t_view = views.LogCollectViewSet.as_view({'put': 'update'})
request = factory.put(
f'/api/log_collect/projects/{project_id}/clusters/{cluster_id}/configs/{config_id}/',
allcontainers_update_params['req_params'],
)
force_authenticate(request, bk_user)
with mock.patch.object(
LogCollectorClient,
'update_collect_config',
return_value=log_collect_result,
) as update_collect_config:
t_view(request, project_id=project_id, cluster_id=cluster_id, pk=config_id)
update_collect_config.assert_called_with(
config_id=config_id, config=allcontainers_update_params['config_params']
)
assert (
LogCollectMetadata.objects.get(
config_id=config_id, project_id=project_id, cluster_id=cluster_id
).updator
== bk_user.username
) | [
9,
86,
-1,
200
] |
def METHOD_NAME(self):
"""Test the behaviour of the sessionizer when given zero events."""
index = "test_index"
sketch_id = 1
analyzer = SessionizerSketchPlugin(index, sketch_id)
analyzer.datastore.client = mock.Mock()
datastore = analyzer.datastore
_create_mock_event(datastore, 0, 0)
message = analyzer.run()
self.assertEqual(
message, "Sessionizing completed, number of session created: 0"
) | [
9,
313,
239
] |
def METHOD_NAME(cls):
pass | [
531,
481,
2
] |
def METHOD_NAME(
project_doc,
asset_doc=None,
task_name=None,
host_name=None,
system_settings=None
):
"""Prepare data for templates filling from entered documents and info.
This function does not "auto fill" any values except system settings and
it's on purpose.
Universal function to receive template data from passed arguments. Only
required argument is project document all other arguments are optional
and their values won't be added to template data if are not passed.
Required document fields:
Project: 'name', 'data.code', 'config.tasks'
Asset: 'name', 'data.parents', 'data.tasks'
Args:
project_doc (Dict[str, Any]): Mongo document of project from MongoDB.
asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB.
task_name (Union[str, None]): Task name under passed asset.
host_name (Union[str, None]): Used to fill '{app}' key.
system_settings (Union[Dict, None]): Prepared system settings.
They're queried if not passed (may be slower).
Returns:
Dict[str, Any]: Data prepared for filling workdir template.
"""
template_data = get_general_template_data(system_settings)
template_data.update(get_project_template_data(project_doc))
if asset_doc:
template_data.update(get_asset_template_data(
asset_doc, project_doc["name"]
))
if task_name:
template_data.update(get_task_template_data(
project_doc, asset_doc, task_name
))
if host_name:
template_data["app"] = host_name
return template_data | [
19,
671,
365
] |
def METHOD_NAME(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions) | [
232,
2758
] |
def METHOD_NAME(self):
return util.format.astemperature(self.__tempmax, self.__unit) | [
-1
] |
def METHOD_NAME(self, request, queryset, view):
# Filter works only for "list" requests and allows to return
# only non-organization objects if org isn't specified
if (
view.detail or not view.iam_organization_field or
# FIXME: It should be handled in another way. For example, if we try to get information for a specific job
# and org isn't specified, we need to return the full list of labels, issues, comments.
# Allow crowdsourcing users to get labels/issues/comments related to specific job.
# Crowdsourcing user always has worker group and isn't a member of an organization.
(
view.__class__.__name__ in ('LabelViewSet', 'IssueViewSet', 'CommentViewSet') and
request.query_params.get('job_id') and
request.iam_context.get('organization') is None and
request.iam_context.get('privilege') == 'worker'
)
):
return queryset
visibility = None
org = request.iam_context['organization']
if org:
visibility = {'organization': org.id}
elif not org and self._parameter_is_provided(request):
visibility = {'organization': None}
if visibility:
org_id = visibility.pop("organization")
query = self._construct_filter_query(view.iam_organization_field, org_id)
return queryset.filter(query).distinct()
return queryset | [
527,
2386
] |
def METHOD_NAME(self):
# Even though this call retrieves the last 3 alarms,
# the last minor alarm won't be included as it's already cleared
alarms = self.cumulocity.get_last_n_alarms_from_device(self.project.deviceid, 3)
# Validate the last 3 measurements of thin-edge-child device
self.validate_alarm(
alarms[0],
"temperature_dangerous",
"CRITICAL",
"temperature is dangerously high",
)
self.validate_alarm(
alarms[1], "temperature_very_high", "MAJOR", "temperature is very high"
)
self.validate_alarm(
alarms[2],
"temperature_high",
"WARNING",
"temperature is high",
"2021-12-15T15:22:06.464247777+00:30",
) | [
187
] |
def METHOD_NAME(version: str, hashes: Dict[str, str], arch: str, edition="community") -> int:
if version >= "v5.2.0" and Components.importer in COMP_TO_BINARY.keys():
COMP_TO_BINARY.pop(Components.importer)
def do_valiate(cmd: str, comp: str) -> int:
nerrs = 0 # can't modify variable in closure in Python
cmd = f"{cmd} --version || {cmd} -V || {cmd} version"
try:
version_string = shell_cmd(cmd)
logging.debug(version_string)
matcher = Matcher(comp, version_string, version, hashes[comp], "community")
mismatches = matcher.match()
if len(mismatches) > 0:
nerrs += 1
msg = "\n\t\t".join(mismatches)
logging.error(f"{comp}:\n\t\t{msg}\n")
except Exception as e:
nerrs += 1
logging.error(f"On command: {cmd}\n Exception: {e}")
return nerrs
err_count = 0
# 具体看包里的内容, 能力有限没找到规律
for comp, hashsum in hashes.items():
if comp not in COMP_TO_BINARY.keys():
logging.warn(f"[{comp}] not supported")
continue
if edition == "community":
if in_community_tools_package(comp): # overlap in toolkit tarball and tidb tarball
for binary in COMP_TO_BINARY[comp]:
cmd = f"{tmp_dir}/tidb-community-toolkit-{version}-{arch}/bin/{binary}"
err_count += do_valiate(cmd, comp)
for binary in COMP_TO_BINARY[comp]:
env = f"LD_LIBRARY_PATH={tmp_dir}/tidb-community-server-{version}-{arch}/bin/tiflash"
cmd = f"{env} {tmp_dir}/tidb-community-server-{version}-{arch}/bin/{binary}"
err_count += do_valiate(cmd, comp)
else:
if comp not in COMP_TO_BINARY_COMMERCIRL.keys():
continue # not in commercial tarball, ignore
if in_commercial_tools_package(comp):
path = f"{tmp_dir}/tidb-toolkit-{version}-{arch}/bin"
elif comp == Components.tiflash and need_tiflash(version):
env = f"LD_LIBRARY_PATH={tmp_dir}/tiflash-{version}-{arch}"
path = f"{env} {tmp_dir}/tiflash-{version}-{arch}"
else:
path = f"{tmp_dir}/tidb-{version}-{arch}/bin"
for binary in COMP_TO_BINARY_COMMERCIRL[comp]:
cmd = f"{path}/{binary}"
err_count += do_valiate(cmd, comp)
return err_count | [
7763
] |
def METHOD_NAME(self, classname, cls=None, module=None, is_template=False,
baseclasses=None, filename=None, warn=False):
'''Register a new classname referring to a real class or
class definition in a module. Warn, if True will emit a warning message
when a class is re-declared.
.. versionchanged:: 1.9.0
`warn` was added.
.. versionchanged:: 1.7.0
:attr:`baseclasses` and :attr:`filename` added
.. versionchanged:: 1.0.5
:attr:`is_template` has been added in 1.0.5.
'''
if cls is None and module is None and baseclasses is None:
raise ValueError(
'You must specify either cls= or module= or baseclasses =')
if classname in self.classes:
if warn:
info = self.classes[classname]
Logger.warning('Factory: Ignored class "{}" re-declaration. '
'Current - module: {}, cls: {}, baseclass: {}, filename: {}. '
'Ignored - module: {}, cls: {}, baseclass: {}, filename: {}.'.
format(classname, info['module'], info['cls'],
info['baseclasses'], info['filename'], module, cls,
baseclasses, filename))
return
self.classes[classname] = {
'module': module,
'cls': cls,
'is_template': is_template,
'baseclasses': baseclasses,
'filename': filename} | [
372
] |
def METHOD_NAME(class_, default=True):
if default: return "";
memcpyList = []
for item in class_.iter():
if not item.subtype.isArray: continue;
name = filter.variableName(item.name)
type = filter.typeName(item.subtype.name)
count = item.subtype.count;
memcpyList.append("memcpy(this->%s, %s, %s * sizeof(%s));" % (name, name, count, type))
return " ".join(memcpyList) | [
527,
877,
2821
] |
def METHOD_NAME(self):
self.dialog.close() | [
531,
481
] |
def METHOD_NAME(self) -> None:
super().METHOD_NAME()
self.lv.deleteLater()
self.lv = None | [
531,
481
] |
def METHOD_NAME(self, *args, **kwargs):
return self._synchronize("solver", *args, **kwargs) | [
2644
] |
def METHOD_NAME(self, encoder):
return {v: k for k, v in encoder.items()} | [
129,
3642
] |
def METHOD_NAME(
self, mock_create_image, mock_send_mail, mock_create_logged_action
) -> None:
self.form.cleaned_data = {
"decision": QueuedImage.APPROVED,
"x_min": 0,
"y_min": 0,
"x_max": 200,
"y_max": 200,
"moderator_why_allowed": CopyrightOptions.PUBLIC_DOMAIN,
}
with patch.object(self.form.queued_image, "save"):
self.form.approved()
self.form.queued_image.save.assert_called_once()
mock_create_image.assert_called_once()
mock_send_mail.assert_called_once()
mock_create_logged_action.assert_called_once()
self.form.create_logged_action.assert_called_once()
self.assertEqual(
self.form.queued_image.decision, QueuedImage.APPROVED
)
self.assertEqual(self.form.queued_image.crop_min_x, 0)
self.assertEqual(self.form.queued_image.crop_min_y, 0)
self.assertEqual(self.form.queued_image.crop_max_x, 200)
self.assertEqual(self.form.queued_image.crop_max_y, 200) | [
9,
12427
] |
async def METHOD_NAME(self):
with self.assertRaisesRegex(
ConfigurationError, 'there is no field named "uuids" in model "Tournament"'
):
await Tortoise.init(
{
"connections": {
"default": {
"engine": "tortoise.backends.sqlite",
"credentials": {"file_path": ":memory:"},
}
},
"apps": {
"models": {
"models": ["tests.model_setup.model_bad_rel7"],
"default_connection": "default",
}
},
}
) | [
9,
256,
1985,
101,
623,
5405,
272
] |
def METHOD_NAME(lnglat: Tuple[float, float]) -> bool:
return Borough.objects.filter(geom__contains=Point(*lnglat)).exists() | [
137,
-1,
623,
17216
] |
def METHOD_NAME():
launcher = mlrun.launcher.local.ClientLocalLauncher(local=False)
runtime = mlrun.code_to_function(
name="test", kind="job", filename=str(func_path), handler=handler
)
run = mlrun.run.RunObject(spec=mlrun.model.RunSpec(inputs={"input1": 1}))
with pytest.raises(mlrun.errors.MLRunInvalidArgumentTypeError) as exc:
launcher._validate_runtime(runtime, run)
assert "Inputs should be of type Dict[str,str]" in str(exc.value) | [
9,
187,
1461
] |
def METHOD_NAME(self, extension = '*', newer = True):
"""Returns a list of existing files matching our basenames and the given extension.
First the files basename + extension are returned,
then basename + '-[0-9]+' + extension,
then basename + '-.+' + extension.
If newer is True (the default), only files that are newer than the jobfile() are returned.
"""
jobfile = self.jobfile()
if jobfile:
METHOD_NAME = util.METHOD_NAME(self.basenames(), extension)
if newer:
try:
return util.newer_files(METHOD_NAME, os.path.getmtime(jobfile))
except OSError:
pass
return list(METHOD_NAME)
return [] | [
1537
] |
def METHOD_NAME(module, toplevel_only=False):
return lineoffsets_in_file(module.__file__, toplevel_only) | [
-1,
623,
298
] |
async def METHOD_NAME(self, api_resource, api_version='v1.0'):
scopes = ['https://graph.microsoft.com/.default']
client = GraphClient(credential=self.credentials.get_credentials(), scopes=scopes)
endpoint = 'https://graph.microsoft.com/{}/{}'.format(api_version, api_resource)
try:
response = client.get(endpoint)
if response.status_code == 200:
return response.json()
# If response is 404 then it means there is no resource associated with the provided id
elif response.status_code == 404:
return {}
else:
print_exception('Failed to query Microsoft Graph endpoint \"{}\": status code {}'.
format(api_resource, response.status_code))
return {}
except Exception as e:
print_exception('Failed to query Microsoft Graph endpoint \"{}\": {}'.format(api_resource, e))
return {} | [
19,
10103,
303,
17
] |
def METHOD_NAME(self):
code = dedent("""\
def func():
yield 1
""")
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index("func")) | [
9,
442,
1646,
9437,
385,
559,
41
] |
def METHOD_NAME() -> ChartData:
return ChartData(
"balances",
gettext("Net Worth"),
g.ledger.charts.net_worth(g.filtered, g.interval, g.conversion),
) | [
2508,
819,
-1
] |
async def METHOD_NAME(self) -> list:
"""
Returns a list of connection objects.
Requires the connections OAuth2 scope.
"""
return await self.request(Route("GET", "/users/@me/connections")) | [
19,
21,
560
] |
def METHOD_NAME(self):
self._clean()
token = str(uuid())
self[token] = int(time.time())
return token | [
370,
466
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_lro_poller(self._execute_operations, None) | [
1519
] |
def METHOD_NAME(recipe, schema):
"""This is a custom validator for validating JSON documents. We implement a
custom resolver using `RefResolver <https://python-jsonschema.readthedocs.io/en/stable/references/#jsonschema.RefResolver>`_
to find schemas locally in order to validate buildspecs with schema files on local filesystem. This ensures changes to
schema can be done in sync with change to code base.
This method uses `Draft7Validator <https://python-jsonschema.readthedocs.io/en/stable/validate/#jsonschema.Draft7Validator>`_
for validating schemas. If there is an error during validation jsonschema will raise an exception of type
`jsonschema.exceptions.ValidationError <https://python-jsonschema.readthedocs.io/en/stable/errors/#jsonschema.exceptions.ValidationError>`_
Args:
recipe (dict): Loaded test recipe as YAML document
schema (dict): Schema document loaded in JSON format
Raises:
jsonschema.exceptions.ValidationError: if recipe fails to validate with schema
"""
# making sure input recipe and schema are dictionary
assert isinstance(recipe, dict)
assert isinstance(schema, dict)
validator = Draft7Validator(schema, resolver=resolver)
validator.validate(recipe) | [
343,
2889
] |
def METHOD_NAME(self):
self.zone.add_mx(
self.base_domain,
[
'10 mx1.%s' % self.base_domain,
'20 mx2.%s' % self.base_domain,
],
1000
)
record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
set([u'10 mx1.%s.' % self.base_domain,
u'20 mx2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'1000')
self.zone.update_mx(
self.base_domain,
[
'10 mail1.%s' % self.base_domain,
'20 mail2.%s' % self.base_domain,
],
50
)
record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
set([u'10 mail1.%s.' % self.base_domain,
'20 mail2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'50') | [
9,
5611
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Notes about the lock. Maximum of 512 characters.
"""
return pulumi.get(self, "notes") | [
5160
] |
def METHOD_NAME(x,lo,hi):
return max(min(x,hi),lo) | [
4897
] |
def METHOD_NAME(string: Account) -> bool:
"""Return true if the given string is a valid account name.
This does not check for the root account types, just the general syntax.
Args:
string: A string, to be checked for account name pattern.
Returns:
A boolean, true if the string has the form of an account's name.
"""
return (isinstance(string, str) and
bool(re.match('{}$'.format(ACCOUNT_RE), string))) | [
137,
1205
] |
def METHOD_NAME(self):
self.setup_clean_chain = True
self.num_nodes = 1 | [
0,
9,
434
] |
def METHOD_NAME(self):
def perform_update():
try:
self.update()
finally:
self._update_scheduled = False
if not self._update_scheduled:
self._update_scheduled = True
self.text.after_idle(perform_update) | [
507,
86
] |
def METHOD_NAME(self, variables):
param = self.param
eps_s = variables["Separator porosity"]
eps_p = variables["Positive electrode porosity"]
eps = pybamm.concatenation(eps_s, eps_p)
deps_dt_s = variables["Separator porosity change [s-1]"]
deps_dt_p = variables["Positive electrode porosity change [s-1]"]
deps_dt = pybamm.concatenation(deps_dt_s, deps_dt_p)
c_ox = variables[
"Separator and positive electrode oxygen concentration [mol.m-3]"
]
N_ox = variables["Oxygen flux [mol.m-2.s-1]"].orphans[1]
a_j_ox = variables[
"Positive electrode oxygen volumetric interfacial current density [A.m-3]"
]
source_terms = pybamm.concatenation(
pybamm.FullBroadcast(0, "separator", "current collector"),
param.s_ox_Ox * a_j_ox,
)
self.rhs = {
c_ox: (1 / eps)
* (-pybamm.div(N_ox) + source_terms / param.F - c_ox * deps_dt)
} | [
0,
8919
] |
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
content_key_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetContentKeyPolicyResult]:
"""
Get the details of a Content Key Policy in the Media Services account
Azure REST API version: 2023-01-01.
:param str account_name: The Media Services account name.
:param str content_key_policy_name: The Content Key Policy name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
... | [
19,
459,
59,
54,
146
] |
def METHOD_NAME(self, experience: Experience) -> None:
if self.disable:
return
self.make_experience_duration += time() - self.make_experience_start_time
batch_size, seq_len = experience.sequences.shape
self.make_experience_num_samples += batch_size
# actor generate
num_actions = experience.action_mask.size(1)
input_len = seq_len - num_actions
total_seq_len = (input_len + seq_len - 1) * num_actions / 2
self.make_experience_flop += (
self.actor_num_params * batch_size * total_seq_len * 2
)
# actor forward
self.make_experience_flop += self.actor_num_params * batch_size * seq_len * 2
# critic forward
self.make_experience_flop += self.critic_num_params * batch_size * seq_len * 2
# initial model forward
self.make_experience_flop += (
self.initial_model_num_params * batch_size * seq_len * 2
)
# reward model forward
self.make_experience_flop += (
self.reward_model_num_params * batch_size * seq_len * 2
) | [
69,
93,
13029,
1798
] |
def METHOD_NAME(instance, *args, **kwargs):
# This check must take place in the background thread.
if should_raise and not getattr(instance, self.method_name).running:
raise StopExecution
# This call must be sent to the main thread.
return async_method.__get__(instance, method)(*args, **kwargs) | [
291
] |
def METHOD_NAME(password: str, salt: str) -> bytes:
mykdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=bytes(salt, encoding="utf8"),
iterations=100000,
backend=default_backend(),
)
return mykdf.derive(password.encode("utf-8")) | [
13020
] |
def METHOD_NAME():
tokens = tokenize_target("foo -opt1='v, a, l, u, e', bar")
expected_tokens = ["foo", "-opt1='v, a, l, u, e'", ",", "bar"]
assert len(tokens) == len(expected_tokens)
assert tokens == expected_tokens | [
9,
4022,
1030,
41,
6512,
61,
97
] |
def METHOD_NAME(command, fail_msg=None):
"""Executes a command and prefixes the appropriate command for
Windows or Linux/UNIX.
Args:
command: Command list to execute.
fail_msg: Message describing the error in case the command fails.
Raises:
NetworkEmulatorError: If command fails a message is set by the fail_msg
parameter.
"""
if sys.platform == 'win32':
ipfw_command = ['ipfw.exe']
else:
ipfw_command = ['sudo', '-n', 'ipfw']
cmd_list = ipfw_command[:] + [str(x) for x in command]
cmd_string = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd_string)
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode != 0:
raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, output,
error)
return output.strip() | [
22,
12569,
462
] |
def METHOD_NAME(tensor):
# noinspection PyProtectedMember
from jax._src.dlpack import to_dlpack
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor | [
757,
24,
3296
] |
def METHOD_NAME(cobbler_api: CobblerAPI):
"""
Test that validates the catalog subroutine.
"""
# Arrange
test_status = status.CobblerStatusReport(cobbler_api, "text")
expected_result = InstallStatus()
expected_result.most_recent_start = 0
expected_result.most_recent_stop = -1
expected_result.most_recent_target = "system:test"
expected_result.seen_start = 0
expected_result.seen_stop = -1
expected_result.state = "?"
# Act
test_status.catalog("system", "test", "192.168.0.1", "start", 0.0)
# Assert
assert "192.168.0.1" in test_status.ip_data
assert test_status.ip_data["192.168.0.1"] == expected_result | [
9,
2824
] |
def METHOD_NAME():
"""Execute main client."""
cli(ap, main) | [
-1
] |
def METHOD_NAME(test):
"""Phase with dimensioned measurements."""
for dim in range(5):
test.measurements.METHOD_NAME[dim] = 1 << dim
for x, y, z in zip(
list(range(1, 5)), list(range(21, 25)), list(range(101, 105))):
test.measurements.lots_of_dims[x, y, z] = x + y + z | [
5164
] |
def METHOD_NAME(self, doi, ref=None, extra_args=None):
"""Trigger this provider for things that resolve to a Dataverse dataset.
Handles:
- DOI pointing to {siteURL}/dataset.xhtml?persistentId={persistentId}
- DOI pointing to {siteURL}/file.xhtml?persistentId={persistentId}&...
- URL {siteURL}/api/access/datafile/{fileId}
Examples:
- https://dataverse.harvard.edu/api/access/datafile/3323458
- doi:10.7910/DVN/6ZXAGT
- doi:10.7910/DVN/6ZXAGT/3YRRYJ
"""
url = self.doi2url(doi)
# Parse the url, to get the base for later API calls
parsed_url = urlparse(url)
# Check if the url matches any known Dataverse installation, bail if not.
host = next(
(
host
for host in self.hosts
if urlparse(host["url"]).netloc == parsed_url.netloc
),
None,
)
if host is None:
return
query_args = parse_qs(parsed_url.query)
# Corner case handling
if parsed_url.path.startswith("/file.xhtml"):
# There's no way of getting file information using its persistentId, the only thing we can do is assume that doi
# is structured as "doi:<dataset_doi>/<file_doi>" and try to handle dataset that way.
new_doi = doi.rsplit("/", 1)[0]
if new_doi == doi:
# tough luck :( Avoid inifite recursion and exit.
return
return self.METHOD_NAME(new_doi)
elif parsed_url.path.startswith("/api/access/datafile"):
# Raw url pointing to a datafile is a typical output from an External Tool integration
entity_id = os.path.basename(parsed_url.path)
search_query = "q=entityId:" + entity_id + "&type=file"
# Knowing the file identifier query search api to get parent dataset
search_url = urlunparse(
parsed_url._replace(path="/api/search", query=search_query)
)
self.log.debug("Querying Dataverse: " + search_url)
data = self.urlopen(search_url).json()["data"]
if data["count_in_response"] != 1:
self.log.debug(
f"Dataverse search query failed!\n - doi: {doi}\n - url: {url}\n - resp: {json.dump(data)}\n"
)
return
self.record_id = deep_get(data, "items.0.dataset_persistent_id")
elif (
parsed_url.path.startswith("/dataset.xhtml")
and "persistentId" in query_args
):
self.record_id = deep_get(query_args, "persistentId.0")
if hasattr(self, "record_id"):
return {"record": self.record_id, "host": host} | [
2991
] |
def METHOD_NAME(self):
node = parser.DataNode('foo')
self.assertEqual(node.data, 'foo') | [
9,
144,
365
] |
def METHOD_NAME(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location, splitter, num_partitions)
spatial_rdd = self.create_rectangle_rdd(input_location, splitter, num_partitions)
self.partition_rdds(query_rdd, spatial_rdd, grid_type)
result = JoinQuery.SpatialJoinQuery(
spatial_rdd, query_rdd, False, True).collect()
count = 0
for el in result:
count += el[1].__len__()
self.sanity_check_join_results(result)
expected_count = match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else match_count
assert expected_count == self.count_join_results(result) | [
9,
612,
1751
] |
def METHOD_NAME():
return [
np.random.randint(0, num_classes, size=np.random.randint(2, 8, size=(3,)), dtype=np.int32)
for _ in range(batch_size)
] | [
236,
1529,
4261,
2277
] |
def METHOD_NAME(values):
"""Generic encoder for data stored using two bits.
This returns an unsigned integer array containing encoded sample
values that range from 0 to 3. The conversion from floating point
sample value to unsigned int is given below, with
``lv = TWO_BIT_1_SIGMA = 2.1745``:
================= ======
Input range Output
================= ======
value < -lv 0
-lv < value < 0. 2
0. < value < lv 1
lv < value 3
================= ======
This does not pack the samples into bytes.
"""
# Optimized for speed by doing calculations in-place, and ensuring that
# the dtypes match.
values = np.clip(values, clip_low, clip_high)
values += two_bit_2_sigma
bitvalues = np.empty(values.shape, np.uint8)
return np.floor_divide(values, TWO_BIT_1_SIGMA, out=bitvalues,
casting='unsafe') | [
421,
8418,
414
] |
def METHOD_NAME(s: str, expected: List[str]) -> None:
symbols = mana.parse(s)
works = symbols == expected
if not works:
print('\nInput: {s}\nExpected: {expected}\n Actual: {actual}'.format(s=s, expected=expected, actual=symbols))
assert works | [
74,
9
] |
def METHOD_NAME():
parser = ArgumentParser(
description='Convert MMPose models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.METHOD_NAME()
return args | [
214,
335
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.