text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, Y, category):
r"""
Return the homset from ``self`` to ``Y`` in the category ``category``
INPUT:
- ``Y`` -- an Hecke module
- ``category`` -- a subcategory of :class:`HeckeModules()
<HeckeModules>` or ``None``
The sole purpose of this method is to construct the homset
as a :class:`~sage.modular.hecke.homspace.HeckeModuleHomspace`. If
``category`` is specified and is not a subcategory of
:class:`HeckeModules`, a ``TypeError`` is raised instead
This method is not meant to be called directly. Please use
:func:`sage.categories.homset.Hom` instead.
EXAMPLES::
sage: # needs sage.modular
sage: M = ModularForms(Gamma0(7), 4)
sage: H = M._Hom_(M, category=HeckeModules(QQ)); H
Set of Morphisms
from Modular Forms space of dimension 3 for Congruence Subgroup Gamma0(7) of weight 4 over Rational Field
to Modular Forms space of dimension 3 for Congruence Subgroup Gamma0(7) of weight 4 over Rational Field
in Category of Hecke modules over Rational Field
sage: H.__class__
<class 'sage.modular.hecke.homspace.HeckeModuleHomspace_with_category'>
sage: TestSuite(H).run(skip=["_test_elements", "_test_an_element", "_test_elements_eq",
....: "_test_elements_eq_reflexive", "_test_elements_eq_transitive",
....: "_test_elements_eq_symmetric", "_test_elements_neq", "_test_some_elements",
....: "_test_zero", "_test_additive_associativity",
....: "_test_one", "_test_associativity", "_test_prod"])
Fixing :meth:`_test_zero` (``__call__`` should accept a
function as input) and :meth:`_test_elements*` (modular
form morphisms elements should inherit from categories) is
:trac:`12879`.
TESTS::
sage: H = M._Hom_(M, category=HeckeModules(GF(5))); H # needs sage.modular sage.rings.finite_rings
Traceback (most recent call last):
...
TypeError: Category of Hecke modules over Finite Field of size 5
is not a subcategory of Category of Hecke modules over Rational Field
"""
# TODO: double check that it's the correct HeckeModules category below:
if category is not None and not category.is_subcategory(HeckeModules(self.base_ring())):
raise TypeError("%s is not a subcategory of %s"%(category, HeckeModules(self.base_ring())))
from sage.modular.hecke.homspace import HeckeModuleHomspace
return HeckeModuleHomspace(self, Y, category=category) | [
11145
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(
app_with_extensions,
staff_user,
permission_manage_apps,
permission_manage_products,
permission_manage_channels,
site_settings,
):
# given
staff_user.user_permissions.set([permission_manage_products])
audience = f"https://{site_settings.site.domain}.com/app-123"
app, extensions = app_with_extensions
app.audience = audience
app.save()
extension = extensions[0]
extension.permissions.set(
[permission_manage_channels, permission_manage_apps, permission_manage_products]
)
# when
access_token = create_access_token_for_app_extension(
app_extension=extension,
permissions=extension.permissions.all(),
user=staff_user,
app=app,
)
# then
decoded_token = jwt_decode(access_token, verify_expiration=False)
assert decoded_token["permissions"] == ["MANAGE_PRODUCTS"]
_, decode_extension_id = graphene.Node.from_global_id(
decoded_token["app_extension"]
)
_, decode_app_id = graphene.Node.from_global_id(decoded_token["app"])
assert decoded_token["user_permissions"] == ["MANAGE_PRODUCTS"]
assert int(decode_extension_id) == extension.id
assert int(decode_app_id) == app.id
assert decoded_token["aud"] == audience | [
9,
129,
1089,
466,
43,
991,
2916
] |
def METHOD_NAME(self, text: str) -> None:
if self._filter_box.isHidden():
return
self.filter_signal.emit(text) | [
69,
1070,
526,
1180
] |
def METHOD_NAME(self):
# TODO: Delete storage objects: uploads, outputs
# TODO: Delete images from registry
while True:
db = dbpool.get()
try:
logger.info('Starting next GC run')
self._gc(db)
logger.info('Finished GC run')
logger.info('')
except Exception as e:
logger.exception(e)
finally:
dbpool.put(db)
time.sleep(3600) | [
22
] |
def METHOD_NAME(self, output_chain):
msg = (
"In chain {}, left end of first domino ({}) and "
"right end of last domino ({}) must match"
)
msg = msg.format(output_chain, output_chain[0], output_chain[-1])
self.assertEqual(output_chain[0][0], output_chain[-1][1], msg) | [
638,
-1,
1541,
1531,
590
] |
def METHOD_NAME(make_napari_viewer):
"""Test adding key bindings to a layer"""
np.random.seed(0)
viewer = make_napari_viewer()
canvas = viewer.window._qt_viewer.canvas
layer = viewer.add_image(np.random.random((10, 20)))
viewer.layers.selection.add(layer)
mock_press = Mock()
mock_release = Mock()
mock_shift_press = Mock()
mock_shift_release = Mock()
@layer.bind_key('F')
def key_callback(_layer):
assert layer == _layer
# on press
mock_press.method()
yield
# on release
mock_release.method()
@layer.bind_key('Shift-F')
def key_shift_callback(_layer):
assert layer == _layer
# on press
mock_shift_press.method()
yield
# on release
mock_shift_release.method()
# Simulate press only
canvas._scene_canvas.events.key_press(key=keys.Key('F'))
mock_press.method.assert_called_once()
mock_press.reset_mock()
mock_release.method.assert_not_called()
mock_shift_press.method.assert_not_called()
mock_shift_release.method.assert_not_called()
# Simulate release only
canvas._scene_canvas.events.key_release(key=keys.Key('F'))
mock_press.method.assert_not_called()
mock_release.method.assert_called_once()
mock_release.reset_mock()
mock_shift_press.method.assert_not_called()
mock_shift_release.method.assert_not_called()
# Simulate press only
canvas._scene_canvas.events.key_press(
key=keys.Key('F'), modifiers=[keys.SHIFT]
)
mock_press.method.assert_not_called()
mock_release.method.assert_not_called()
mock_shift_press.method.assert_called_once()
mock_shift_press.reset_mock()
mock_shift_release.method.assert_not_called()
# Simulate release only
canvas._scene_canvas.events.key_release(
key=keys.Key('F'), modifiers=[keys.SHIFT]
)
mock_press.method.assert_not_called()
mock_release.method.assert_not_called()
mock_shift_press.method.assert_not_called()
mock_shift_release.method.assert_called_once()
mock_shift_release.reset_mock() | [
9,
94,
59,
5992
] |
def METHOD_NAME(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
Source: https://github.com/django/django/blob/master/django/utils/text.py
"""
s = str(s).strip().replace(" ", "_")
return re.sub(r"(?u)[^-\w.]", "", s) | [
19,
1205,
1147
] |
def METHOD_NAME(self):
"""
Access non existing secret, expect error
"""
tag = "?{vaultkv:secret/joker}"
env = {"auth": "token"}
file_data = "foo:some_random_value".encode()
REF_CONTROLLER[tag] = VaultSecret(file_data, env)
# confirming secret file exists
self.assertTrue(
os.path.isfile(os.path.join(REFS_HOME, "secret/joker")), msg="Secret file doesn't exist"
)
file_with_secret_tags = tempfile.mktemp()
with open(file_with_secret_tags, "w") as fp:
fp.write("File contents revealed: {}".format(tag))
with self.assertRaises(VaultError):
REVEALER.reveal_raw_file(file_with_secret_tags) | [
9,
78,
1038,
444
] |
def METHOD_NAME(self):
value, _ = extract_timestamp(self._test_line_2)
expected = datetime(2016, 11, 30, 16, 17, 19, 81000)
self.assertEqual(expected, value) | [
9,
534,
988
] |
def METHOD_NAME():
"""#328: first member in a class can't be used in operators"""
a = m.NestA()
b = m.NestB()
c = m.NestC()
a += 10
assert m.get_NestA(a) == 13
b.a += 100
assert m.get_NestA(b.a) == 103
c.b.a += 1000
assert m.get_NestA(c.b.a) == 1003
b -= 1
assert m.get_NestB(b) == 3
c.b -= 3
assert m.get_NestB(c.b) == 1
c *= 7
assert m.get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Shouldn't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect() | [
9,
612
] |
def METHOD_NAME(server: str, group: str, level, members: tuple[Incomplete, Incomplete]) -> None: ... | [
819,
846,
0,
3467
] |
def METHOD_NAME(self, request, spider):
if request.meta.get("dont_merge_cookies", False):
return
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
self._process_cookies(cookies, jar=jar, request=request)
# set Cookie header
request.headers.pop("Cookie", None)
jar.add_cookie_header(request)
self._debug_cookie(request, spider) | [
356,
377
] |
def METHOD_NAME(
model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None
) -> PyTorchFasterRCNN:
num_classes = model_kwargs.pop("num_classes", 3)
frcnn_kwargs = {
arg: model_kwargs.pop(arg)
for arg in ["min_size", "max_size"]
if arg in model_kwargs
}
backbone = MultimodalNaive(**model_kwargs)
model = FasterRCNN(
backbone,
num_classes=num_classes,
image_mean=[0.485, 0.456, 0.406, 0.0, 0.0, 0.0],
image_std=[0.229, 0.224, 0.225, 1.0, 1.0, 1.0],
**frcnn_kwargs,
)
model.to(DEVICE)
if weights_path:
checkpoint = torch.load(weights_path, map_location=DEVICE)
model.load_state_dict(checkpoint)
wrapped_model = PyTorchFasterRCNN(
model,
clip_values=(0.0, 1.0),
channels_first=False,
**wrapper_kwargs,
)
return wrapped_model | [
19,
10744,
578,
3074
] |
def METHOD_NAME(self, data, package_name):
affected_version_range = None
unaffected_version_range = None
fixed_version = None
vulnerable_range = data.get("vulnerable_versions") or ""
patched_range = data.get("patched_versions") or ""
# https://github.com/nodejs/security-wg/blob/cfaa51cc5c83f01eea61b69658f7bc76a77c5979/vuln/npm/213.json#L14
if vulnerable_range == "<=99.999.99999":
vulnerable_range = "*"
if vulnerable_range:
affected_version_range = NpmVersionRange.from_native(vulnerable_range)
# https://github.com/nodejs/security-wg/blob/cfaa51cc5c83f01eea61b69658f7bc76a77c5979/vuln/npm/213.json#L15
if patched_range == "<0.0.0":
patched_range = None
if patched_range:
unaffected_version_range = NpmVersionRange.from_native(patched_range)
# We only store single fixed versions and not a range of fixed versions
# If there is a single constraint in the unaffected_version_range
# having comparator as ">=" then we store that as the fixed version
if unaffected_version_range and len(unaffected_version_range.constraints) == 1:
constraint = unaffected_version_range.constraints[0]
if constraint.comparator == ">=":
fixed_version = constraint.version
return AffectedPackage(
package=PackageURL(
type="npm",
name=package_name,
),
affected_version_range=affected_version_range,
fixed_version=fixed_version,
) | [
19,
859,
360
] |
def METHOD_NAME(t):
day = DayWithinYear(t)
leap = InLeapYear(t)
if day < 31:
return 0
day -= leap
if day < 59:
return 1
elif day < 90:
return 2
elif day < 120:
return 3
elif day < 151:
return 4
elif day < 181:
return 5
elif day < 212:
return 6
elif day < 243:
return 7
elif day < 273:
return 8
elif day < 304:
return 9
elif day < 334:
return 10
else:
return 11 | [
1485,
280,
104
] |
def METHOD_NAME(self, model):
input_file_name = Path(self.get_auto_remove_tmp_dir()) / "utest_input.source"
output_file_name = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
text = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
tmp_dir = Path(self.get_auto_remove_tmp_dir())
score_path = str(tmp_dir / "scores.json")
reference_path = str(tmp_dir / "val.target")
_dump_articles(input_file_name, text["en"])
_dump_articles(reference_path, text["de"])
task = "translation_en_to_de" if model == T5_TINY else "summarization"
testargs = f"""
run_eval_search.py
{model}
{str(input_file_name)}
{str(output_file_name)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"])
with patch.object(sys, "argv", testargs):
with CaptureStdout() as cs:
run_search()
expected_strings = [" num_beams | length_penalty", model, "Best score args"]
un_expected_strings = ["Info"]
if "translation" in task:
expected_strings.append("bleu")
else:
expected_strings.extend(ROUGE_KEYS)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(output_file_name).exists()
os.remove(Path(output_file_name)) | [
9,
22,
1171,
1070
] |
def METHOD_NAME(self):
for file in self.files:
os.unlink(
os.path.join(
settings.BASE_DIR,
'files',
'articles',
self.pk_string,
file.uuid_filename,
)
) | [
531,
481
] |
def METHOD_NAME(workspace_setting_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceSettingResult]:
"""
Settings about where we should store your security data and logs. If the result is empty, it means that no custom-workspace configuration was set
Azure REST API version: 2017-08-01-preview.
:param str workspace_setting_name: Name of the security setting
"""
... | [
19,
1976,
1333,
146
] |
def METHOD_NAME(self, value):
self.settings.display_flags = value | [
0,
52,
1106
] |
def METHOD_NAME(
dim,
size1D,
banks,
mem_type,
test_name,
unroll_map_inside=False,
):
in1, in2, expected, target = create_test_set(dim, size1D, banks)
sdfg = create_vadd_multibank_sdfg(banks, dim, unroll_map_inside, mem_type, test_name)
if (dim == 1):
sdfg(in1=in1, in2=in2, out=target, N=size1D)
elif (dim == 2):
sdfg(in1=in1, in2=in2, out=target, N=size1D, M=size1D)
else:
sdfg(in1=in1, in2=in2, out=target, N=size1D, M=size1D, S=size1D)
assert np.allclose(expected, target, rtol=1e-6)
return sdfg | [
1005,
9
] |
def METHOD_NAME(self, name: str) -> str:
return underscore(name) | [
93,
58,
156
] |
def METHOD_NAME(ibs, aid_list, config2_=None, **kwargs):
"""
very hacky, but cute way to cache keypoint distinctivness
Args:
ibs (IBEISController): wbia controller object
aid_list (list):
dstncvs_normer (None):
Returns:
list: dstncvs_list
CommandLine:
python -m wbia.control.manual_wbiacontrol_funcs --test-get_annot_kpts_distinctiveness
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from wbia.control.manual_wbiacontrol_funcs import * # NOQA
>>> from wbia.algo.hots import distinctiveness_normalizer
>>> import wbia
>>> import numpy as np
>>> config2_ = None
>>> # build test data
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids(species=const.TEST_SPECIES.ZEB_PLAIN)
>>> # execute function
>>> aid_list1 = aid_list[::2]
>>> aid_list2 = aid_list[1::3]
>>> dstncvs_list1 = get_annot_kpts_distinctiveness(ibs, aid_list1)
>>> dstncvs_list2 = get_annot_kpts_distinctiveness(ibs, aid_list2)
>>> dstncvs_list = get_annot_kpts_distinctiveness(ibs, aid_list)
>>> print(ut.depth_profile(dstncvs_list1))
>>> stats_dict = ut.dict_stack([ut.get_stats(dstncvs) for dstncvs in dstncvs_list])
>>> print(ut.repr2(stats_dict))
>>> assert np.all(np.array(stats_dict['min']) >= 0), 'distinctiveness was out of bounds'
>>> assert np.all(np.array(stats_dict['max']) <= 1), 'distinctiveness was out of bounds'
"""
from wbia.algo.hots import distinctiveness_normalizer as dcvs_normer
# per-species disinctivness wrapper around wbia cached function
# get feature rowids
aid_list = np.array(aid_list)
fid_list = np.array(
ibs.get_annot_feat_rowids(
aid_list, ensure=True, eager=True, nInput=None, config2_=config2_
)
)
species_rowid_list = np.array(ibs.get_annot_species_rowids(aid_list))
# Compute distinctivness separately for each species
unique_sids, groupxs = vt.group_indices(species_rowid_list)
fids_groups = vt.apply_grouping(fid_list, groupxs)
species_text_list = ibs.get_species_texts(unique_sids)
# Map distinctivness computation
normer_list = [
dcvs_normer.request_species_distinctiveness_normalizer(species)
for species in species_text_list
]
# Reduce to get results
dstncvs_groups = [
get_feat_kpts_distinctiveness(
ibs, fids, dstncvs_normer=dstncvs_normer, species_rowid=sid, **kwargs
)
for dstncvs_normer, fids, sid in zip(normer_list, fids_groups, unique_sids)
]
dstncvs_list = vt.invert_apply_grouping(dstncvs_groups, groupxs)
return dstncvs_list | [
19,
4000,
13092,
-1
] |
def METHOD_NAME(self):
# should return matched substring
pattern = re.compile(r"how")
patternmatch = self.inputscanner.readUntilAfter(pattern)
self.assertEqual(patternmatch, "how") | [
9,
203,
1238,
1887
] |
def METHOD_NAME(self):
@cuda.jit
def f(r, x):
iv1 = Interval(x[0], x[1])
iv2 = Interval(x[2], x[3])
iv_sum = sum_intervals(iv1, iv2)
r[0] = iv_sum.lo
r[1] = iv_sum.hi
x = np.asarray((1.5, 2.5, 3.0, 4.0))
r = np.zeros(2)
f[1, 1](r, x)
expected = np.asarray((x[0] + x[2], x[1] + x[3]))
np.testing.assert_allclose(r, expected) | [
9,
2916,
44,
947,
-1
] |
def METHOD_NAME(tflite_mobilenet_v1_1_quant, tflite_compile_model):
pytest.importorskip("tflite")
tflite_compiled_model_mlf = tflite_compile_model(
tflite_mobilenet_v1_1_quant,
target="c",
executor=backend.Executor("aot"),
output_format="mlf",
pass_context_configs=["tir.disable_vectorize=1"],
)
# Compile and export a model to a MLF archive so it can be imported.
exported_tvmc_package = tflite_compiled_model_mlf
archive_path = exported_tvmc_package.package_path
# Import the MLF archive. TVMCPackage constructor will call import_package method.
tvmc_package = TVMCPackage(archive_path)
assert tvmc_package.lib_name is None, ".lib_name must not be set in the MLF archive."
assert tvmc_package.lib_path is None, ".lib_path must not be set in the MLF archive."
assert tvmc_package.graph is None, ".graph must not be set in the MLF archive for AOT executor."
assert tvmc_package.params is not None, ".params must be set in the MLF archive."
assert tvmc_package.type == "mlf", ".type must be set to 'mlf' in the MLF format." | [
9,
13638,
512,
360,
16534,
4442
] |
def METHOD_NAME(self) -> ID:
return self.peer_info.METHOD_NAME | [
502,
147
] |
f METHOD_NAME(self): | [
9
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-n", "--name", "--workspace-name"],
help="The name of the workspace.",
required=True,
fmt=AAZStrArgFormat(
pattern="^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$",
max_length=63,
min_length=4,
),
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(mesh2d, spaces):
(name, order), (vname, vorder) = spaces
return utility.get_functionspace(mesh2d, name, order, vector=True) | [
3597,
1085
] |
def METHOD_NAME(self, plot=None):
if plot is None:
plot = self.plot
# Load the image with the user supplied filename
image = ImageData.fromfile(self._load_file)
# Update the plot data. NB we must extract _data from the image
# for the time being, until ImageData is made more friendly
self.pd.set_data("imagedata", image._data)
# Set the title and redraw
plot.title = os.path.basename(self._load_file)
plot.request_redraw() | [
557
] |
def METHOD_NAME(self):
self.aedtapp.insert_design("sweepsbr")
self.aedtapp.solution_type = "SBR+"
self.aedtapp.insert_infinite_sphere()
setup1 = self.aedtapp.create_setup("My_HFSS_Setup4", self.aedtapp.SETUPS.HFSSSBR)
assert setup1.add_subrange("LinearStep", 1, 10, 0.1, clear=False)
assert setup1.add_subrange("LinearCount", 10, 20, 10, clear=True) | [
9,
11496,
981,
-1
] |
def METHOD_NAME(date):
d = dt.strptime(date, "%Y%m%d")
yy = float(d.year) + float(d.month-1)/12 + float(d.day-1)/365
return yy | [
-1
] |
def METHOD_NAME(self):
# Folders not used for header-only
self.cpp_info.bindirs = []
self.cpp_info.libdirs = []
# Set these to the appropriate values if the package has an official FindPACKAGE.cmake
# listed in https://cmake.org/cmake/help/latest/manual/cmake-modules.7.html#find-modules
# examples: bzip2, freetype, gdal, icu, libcurl, libjpeg, libpng, libtiff, openssl, sqlite3, zlib...
self.cpp_info.set_property("cmake_module_file_name", "ChefFun")
self.cpp_info.set_property("cmake_module_target_name", "ChefFun::ChefFun")
# Set these to the appropriate values if package provides a CMake config file
# (package-config.cmake or packageConfig.cmake, with package::package target, usually installed in <prefix>/lib/cmake/<package>/)
self.cpp_info.set_property("cmake_file_name", "chef-fun")
self.cpp_info.set_property("cmake_target_name", "chef-fun::chef-fun")
# Set this to the appropriate value if the package provides a pkgconfig file
# (package.pc, usually installed in <prefix>/lib/pkgconfig/)
self.cpp_info.set_property("pkg_config_name", "chef-fun")
## Add m, pthread and dl if needed in Linux/FreeBSD
#if self.settings.os in ["Linux", "FreeBSD"]:
# self.cpp_info.system_libs.extend(["dl", "m", "pthread"])
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "chef-fun"
self.cpp_info.names["cmake_find_package_multi"] = "chef-fun"
self.cpp_info.filenames["cmake_find_package"] = "chef-fun"
self.cpp_info.filenames["cmake_find_package_multi"] = "chef-fun" | [
360,
100
] |
def METHOD_NAME(self):
value = ""
cube = self.run_testcase(ukmo__um_stash_source=value)
self.assertNotIn("STASH", cube.attributes)
self.assertEqual(cube.attributes["ukmo__um_stash_source"], value) | [
9,
11336,
35
] |
def METHOD_NAME(
self,
domain: Domain,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
runtime_configuration: Optional[dict] = None,
) -> Attributes:
"""
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and details.
Returns:
Attributes object, containing computed parameter values and parameter computation details metadata.
"""
fully_qualified_column_values_nonnull_count_metric_parameter_builder_name: str = f"{RAW_PARAMETER_KEY}{self._column_values_nonnull_count_metric_single_batch_parameter_builder_config.name}"
# Obtain "column_values.nonnull.count" from "rule state" (i.e., variables and parameters); from instance variable otherwise.
column_values_nonnull_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=fully_qualified_column_values_nonnull_count_metric_parameter_builder_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
fully_qualified_column_value_counts_metric_single_batch_parameter_builder_name: str = f"{RAW_PARAMETER_KEY}{self._column_value_counts_metric_single_batch_parameter_builder_config.name}"
# Obtain "column.value_counts" from "rule state" (i.e., variables and parameters); from instance variable otherwise.
column_value_counts_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=fully_qualified_column_value_counts_metric_single_batch_parameter_builder_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
values: list = list(
column_value_counts_parameter_node[
FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY
].index
)
weights: np.ndarray = np.asarray(
column_value_counts_parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY]
) / (
column_values_nonnull_count_parameter_node[
FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY
]
+ NP_EPSILON
)
partition_object: dict = {
"values": values,
"weights": weights.tolist(),
}
details: dict = column_value_counts_parameter_node[
FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY
]
return Attributes(
{
FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY: partition_object,
FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY: details,
}
) | [
56,
386
] |
def METHOD_NAME(
model: Input[BQMLModel],
model_destination_path: str,
exported_model_path: OutputPath(str),
gcp_resources: OutputPath(str),
location: str = 'us-central1',
job_configuration_extract: Dict[str, str] = {},
labels: Dict[str, str] = {},
project: str = _placeholders.PROJECT_ID_PLACEHOLDER, | [
4701,
294,
578,
202
] |
def METHOD_NAME(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
def find_nearest_small_factor(num, target):
"""Find the nearest factor of the given number that is smaller than the target."""
for i in range(target, 0, -1):
if num % i == 0:
return i
# Unreachable because i=1 must hold.
return -1
fused = sch[out].fuse(*sch[out].op.axis)
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
max_block = 256
# Vectorize on fp16 data type to enable half2 for better memory bandwidth utilization.
vector_width = 2 if out.dtype == "float16" else 1
is_dynamic_output = False
for dim in out.shape:
if not isinstance(dim, tvm.tir.IntImm):
is_dynamic_output = True
break
out_len = utils.prod(out.shape)
try:
const_size = utils.get_const_int(out_len)
# Adjust block and thread to make sure they are dividable so that vectorize can be
# correctly applied.
if vector_width > 1 and const_size % vector_width == 0:
remain_total_size = const_size // vector_width
cand_sizes = []
for max_size in [num_thread, max_block]:
cand_sizes.append(
max_size
if remain_total_size % max_size == 0
else find_nearest_small_factor(remain_total_size, max_size)
)
remain_total_size //= cand_sizes[-1]
# If the product of candidate dividable (block * thread) is too small,
# then the performance may be worse even half2 is enabled. Note that 0.7
# is just a heuristic ratio and may not be optimal for all workloads.
if np.prod(cand_sizes) / (max_block * num_thread) >= 0.7:
num_thread, max_block = cand_sizes
need_block_split = const_size > max_block * num_thread * vector_width
except ValueError:
need_block_split = False
const_size = 0
if vector_width > 1:
fused, v = sch[out].split(fused, vector_width)
sch[out].vectorize(v)
if need_block_split:
xo, xi = sch[out].split(fused, factor=num_thread * max_block)
bx, tx = sch[out].split(xi, factor=num_thread)
sch[out].reorder(bx, tx, xo)
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
else:
# Use less threads for dynamic shape ops to avoid runtime error.
if is_dynamic_output:
num_thread //= 2
if const_size != 0 and const_size < num_thread:
bx, tx = sch[out].split(fused, factor=const_size)
else:
bx, tx = sch[out].split(fused, factor=num_thread)
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
return sch | [
507,
10943,
280,
1153
] |
def METHOD_NAME(wc_obj, par, P, lep):
r"""Branching ratio of $P^+\to\ell^+\nu_\ell$."""
return sum([ _br_plnu(wc_obj, par, P, lep, nu) for nu in ['e','mu','tau']]) | [
1369,
14615
] |
def METHOD_NAME(self, y: Tensor) -> Tensor:
"""
Inverse transformation of `iResnet`
Parameters
----------
y
input tensor
Returns
-------
Tensor
transformed tensor `\text{iResnet}^{-1}(y)`
"""
if y is self._cached_y:
return self._cached_x
x = y + self._block(y)
if self._use_caching:
self._cached_x = x
self._cached_y = y
return x | [
474,
5862
] |
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME(self):
data = self.cleaned_data["mimetypes"]
if data:
return self.clean_list(data) | [
1356,
4890
] |
def METHOD_NAME(self):
infile = self._create_infile()
rc, out, err = assert_python_ok('-m', 'json.tool', infile)
self.assertEqual(rc, 0)
self.assertEqual(out.splitlines(), self.expect.encode().splitlines())
self.assertEqual(err, b'') | [
9,
8238,
720
] |
def METHOD_NAME(aggregator, instance):
exception_msg = (
'[IBM][CLI Driver][DB2/NT64] SQL0440N No authorized routine named "MON_GET_INSTANCE" of type '
'"FUNCTION" having compatible arguments was found. SQLSTATE=42884'
)
def query_instance(*args, **kwargs):
raise Exception(exception_msg)
ibmdb2 = IbmDb2Check('ibm_db2', {}, [instance])
ibmdb2.log = mock.MagicMock()
ibmdb2._conn = mock.MagicMock()
ibmdb2.get_connection = mock.MagicMock()
ibmdb2.query_instance = query_instance
with pytest.raises(Exception):
ibmdb2.query_instance()
ibmdb2.log.warning.assert_called_with('Encountered error running `%s`: %s', 'query_instance', exception_msg) | [
9,
539,
559,
168
] |
def METHOD_NAME(self):
self.run.archived = False | [
531,
481
] |
def METHOD_NAME(builder, declarationFile):
EnumAddDeclarationFile(builder, declarationFile) | [
238,
1134,
171
] |
def METHOD_NAME(self, emulator, filename, md5sum, size, strict_md5_check=True):
"""
Search an image based on its MD5 checksum
:param emulator: Emulator type
:param filename: Image filename (used for ova in order to return the correct file in the archive)
:param md5sum: Hash of the image
:param size: File size
:param strict_md5_check: If `True` then performs MD5 checksum checks, otherwise ignores them
:returns: Image object or None
"""
for remote_image in list(self._remote_images):
if remote_image.md5sum == md5sum:
return remote_image
elif md5sum is None or strict_md5_check is False: # We create a new version or allow custom files
if filename == remote_image.filename:
return remote_image
for directory in self._images_dirs:
log.debug("Search image {} (MD5={} SIZE={}) in '{}'".format(filename, md5sum, size, directory))
if os.path.exists(directory):
try:
for file in os.listdir(directory):
if not file.endswith(".md5sum") and not file.startswith("."):
path = os.path.join(directory, file)
if os.path.isfile(path):
if md5sum is None or strict_md5_check is False:
if filename == os.path.basename(path):
return Image(emulator, path)
else:
# We take all the file with almost the size of the image
# Almost to avoid round issue with system.
file_size = os.stat(path).st_size
if size is None or (file_size - 10 < size and file_size + 10 > size):
image = Image(emulator, path)
if image.md5sum == md5sum:
log.debug("Found image {} (MD5={}) in {}".format(filename, md5sum, image.path))
return image
except (OSError, PermissionError) as e:
log.error("Cannot scan {}: {}".format(path, e))
return None | [
1070,
660,
171
] |
def METHOD_NAME():
def wrapper_maker(channel):
def wrapper(f, args, kwargs):
channel.append(True)
__ddgen = f(*args, **kwargs)
__ddgensend = __ddgen.send
try:
value = next(__ddgen)
channel.append(value)
except StopIteration:
return
while True:
try:
tosend = yield value
except GeneratorExit:
channel.append("GeneratorExit")
__ddgen.close()
raise GeneratorExit()
except: # noqa
channel.append(sys.exc_info()[0])
value = __ddgen.throw(*sys.exc_info())
channel.append(value)
else:
try:
value = __ddgensend(tosend)
channel.append(value)
except StopIteration:
return
return wrapper
channel = []
def g():
while True:
try:
yield 0
except ValueError:
yield 1
wrap(g, wrapper_maker(channel))
inspect.isgeneratorfunction(g)
gen = g()
inspect.isgenerator(gen)
for _ in range(10):
assert next(gen) == 0
assert gen.throw(ValueError) == 1
gen.close()
assert channel == [True] + [0, ValueError, 1] * 10 + ["GeneratorExit"] | [
9,
503,
1443,
1471,
1462
] |
def METHOD_NAME(self, database: RedisDatabase) -> RedisClientSDK:
return self._client_sdks[database] | [
340
] |
def METHOD_NAME(model: ov.Model, done_queue: multiprocessing.Queue) -> None:
compiled_model = ov.Core().METHOD_NAME(model, "CPU")
model_stream = compiled_model.export_model()
done_queue.put(model_stream) | [
296,
578
] |
def METHOD_NAME(self):
ba1 = taint_pyobject(
pyobject=bytearray(b"123"), source_name="test", source_value="foo", source_origin=OriginType.PARAMETER
)
ba2 = bytearray(b"456")
result = mod.do_bytearray_extend(ba1, ba2)
assert result == bytearray(b"123456")
assert ba1 == bytearray(b"123456")
ranges = get_tainted_ranges(result)
assert ranges == [TaintRange(0, 3, Source("test", "foo", OriginType.PARAMETER))]
assert get_tainted_ranges(ba1) == [TaintRange(0, 3, Source("test", "foo", OriginType.PARAMETER))]
assert not get_tainted_ranges(ba2) | [
9,
978,
865,
4571
] |
def METHOD_NAME(self, **kwargs):
# maybe interesting: --with(out)-pymalloc disable/enable specialized mallocs
if self.should_include_debug_info:
self.configure_args.append("--with-pydebug")
# XXXAR: always add assertions?
self.configure_args.append("--with-assertions")
if self.compiling_for_cheri():
# computed gotos currently crash the compiler...
self.configure_args.append("--without-computed-gotos")
self.configure_args.append("--without-pymalloc") # use system malloc
else:
self.configure_args.append("--with-computed-gotos")
# fails to cross-compile and does weird stuff on host (uses wrong python version?)
self.configure_args.append("--without-ensurepip")
if self.compiling_for_host() and self.compiling_for_cheri():
self.check_required_system_tool("/usr/local64/bin/python3.8", freebsd="python38", compat_abi=True)
# Can't use the local python build for bootstrapping tasks yet:
self.add_configure_vars(PYTHON_FOR_BUILD="/usr/local64/bin/python3.8")
self.add_configure_vars(PYTHON_FOR_REGEN="/usr/local64/bin/python3.8")
if not self.compiling_for_host():
self.configure_args.append("--without-doc-strings") # should reduce size
native_python = self.get_instance_for_cross_target(CompilationTargets.NATIVE_NON_PURECAP,
self.config).install_dir / "bin/python3"
if not native_python.exists():
self.dependency_error("Native python3 doesn't exist, you must build the `python-native` target first.",
cheribuild_target="python",
cheribuild_xtarget=CompilationTargets.NATIVE_NON_PURECAP)
self.add_configure_vars(
ac_cv_buggy_getaddrinfo="no",
# Doesn't work since that remove all flags, need to set PATH instead
# PYTHON_FOR_BUILD=str(native_python),
# PYTHON_FOR_REGEN=str(native_python),
PATH=str(native_python.parent) + ":" + os.getenv("PATH"),
READELF=str(self.sdk_bindir / "llvm-readelf"),
AR=str(self.sdk_bindir / "llvm-ar"),
ac_cv_file__dev_ptmx="no", # no /dev/ptmx file on cheribsd
ac_cv_file__dev_ptc="no", # no /dev/ptc file on cheribsd
)
# self.configure_environment["ac_cv_file__dev_ptmx+set"] = "set"
# self.configure_environment["ac_cv_file__dev_ptc+set"] = "set"
# TODO: do I need to set? ac_sys_release=13.0
super().METHOD_NAME(**kwargs) | [
111
] |
def METHOD_NAME(self):
time.sleep(3)
print("\n")
show_mem("AT THE END ")
print("\n")
missing = self.mem_at_start - free_mem()
assert missing <= 0.1* self.mem_at_start, "possible mem leak: %s at start, missing %s" % (self.mem_at_start, missing) | [
531,
481
] |
def METHOD_NAME(self):
cookies = {'bb_data': self.helper.real_bb_data}
download_url = "http://tapochek.net/download.php?id=110717"
with patch.object(self.plugin.tracker, 'get_cookies', result=cookies), patch.object(self.plugin.tracker, 'get_download_url', return_value=download_url):
url = 'http://tapochek.net/viewtopic.php?t=174801'
request = self.plugin._prepare_request(TapochekNetTopic(url=url))
self.assertIsNotNone(request)
self.assertEqual(request.headers['referer'], url)
self.assertEqual(request.headers['host'], 'tapochek.net')
self.assertEqual(request.url, 'http://tapochek.net/download.php?id=110717') | [
9,
123,
377
] |
def METHOD_NAME(default_task="translation"):
parser = options.get_parser("Reranking tuning", default_task)
add_reranking_args(parser)
add_tuning_args(parser)
return parser | [
19,
3290,
1319
] |
def METHOD_NAME(self, values: tp.Sequence[TLabel]) -> None:
def property_values(cls: tp.Type[Index], values: tp.Iterable[TLabel]) -> None:
index = cls(values)
# must cast both sides to the dtype, as some int to float conversions result in different floats
self.assertAlmostEqualValues(index.values, np.array(values, dtype=index.values.dtype))
property_values(Index, values)
property_values(IndexGO, values) | [
9,
724,
199,
245
] |
def METHOD_NAME(
proj_name,
user_name,
repo_name,
known_link_fname,
out_link_fname,
url=None,
ml_url=None,
):
"""Check and make link targets
If url is None or ml_url is None, check if there are links present for
these in `known_link_fname`. If not, raise error. The check is:
Look for a target `proj_name`.
Look for a target `proj_name` + ' mailing list'
Also, look for a target `proj_name` + 'github'. If this exists, don't
write this target into the new file below.
If we are writing any of the url, ml_url, or github address, then write new
file with these links, of form:
.. _`proj_name`
.. _`proj_name`: url
.. _`proj_name` mailing list: url
"""
link_contents = open(known_link_fname).readlines()
have_url = url is not None
have_ml_url = ml_url is not None
have_gh_url = None
for line in link_contents:
if not have_url:
match = re.match(r"..\s+_%s:\s+" % proj_name, line)
if match:
have_url = True
if not have_ml_url:
match = re.match(r"..\s+_`%s mailing list`:\s+" % proj_name, line)
if match:
have_ml_url = True
if not have_gh_url:
match = re.match(r"..\s+_`%s github`:\s+" % proj_name, line)
if match:
have_gh_url = True
if not have_url or not have_ml_url:
raise RuntimeError(
"Need command line or known project " "and / or mailing list URLs"
)
lines = []
if url is not None:
lines.append(f".. _{proj_name}: {url}\n")
if not have_gh_url:
gh_url = f"http://github.com/{user_name}/{repo_name}\n"
lines.append(f".. _`{proj_name} github`: {gh_url}\n")
if ml_url is not None:
lines.append(f".. _`{proj_name} mailing list`: {ml_url}\n")
if len(lines) == 0:
# Nothing to do
return
# A neat little header line
lines = [".. %s\n" % proj_name] + lines
out_links = open(out_link_fname, "w")
out_links.writelines(lines)
out_links.close() | [
93,
548,
465
] |
def METHOD_NAME(self):
"""Load HDU as appropriate class.
TODO: this should probably go via an extensible registry.
"""
from gammapy.irf import IRF_REGISTRY
hdu_class = self.hdu_class
filename = self.path()
hdu = self.hdu_name
if hdu_class == "events":
from gammapy.data import EventList
return EventList.read(filename, hdu=hdu)
elif hdu_class == "gti":
from gammapy.data.gti import GTI
return GTI.read(filename, hdu=hdu)
elif hdu_class == "map":
from gammapy.maps import Map
return Map.read(filename, hdu=hdu, format=self.format)
elif hdu_class == "pointing":
# FIXME: support loading the pointing table
from gammapy.data import FixedPointingInfo
return FixedPointingInfo.read(filename, hdu=hdu)
else:
cls = IRF_REGISTRY.get_cls(hdu_class)
return cls.read(filename, hdu=hdu) | [
557
] |
METHOD_NAME(self): | [
19,
137,
2242
] |
def METHOD_NAME(self):
if not hasattr(self, '_qs'):
METHOD_NAME = super(UserProfileFilter, self).METHOD_NAME
if not hasattr(self, '_users_info'):
qs_filter = {}
if u'courses' in self.data:
qs_filter['user__group__course__id__in'] = self.data.getlist(u'courses')
if u'groups' in self.data:
qs_filter['user__group__id__in'] = self.data.getlist(u'groups')
profiles_info = METHOD_NAME.filter(**qs_filter).values(
'id',
'user__id',
'user__username',
'user__email',
'user__last_name',
'user__first_name',
'user_status__id',
'user_status__name',
'user_status__color'
)
extra_sql = self.get_extra_sql_statuses()
if extra_sql:
profiles_info = profiles_info.extra(where=[extra_sql])
users_info = {}
for info in profiles_info:
if info['user__id'] not in users_info:
users_info[info['user__id']] = defaultdict(dict)
users_info[info['user__id']]['id_profile'] = info['id']
users_info[info['user__id']]['username'] = info['user__username']
users_info[info['user__id']]['email'] = info['user__email']
users_info[info['user__id']]['last_name'] = info['user__last_name']
users_info[info['user__id']]['first_name'] = info['user__first_name']
if info['user_status__id']:
users_info[info['user__id']]['statuses'][info['user_status__id']] = {
'name': info['user_status__name'],
'color': info['user_status__color'],
}
self.users_info = users_info
return self._qs | [
5343
] |
def METHOD_NAME(self, max_iterations=None, tol=1e-7, debug_callback=None):
check_interval = 20
if max_iterations is None:
max_iterations = max(3 * check_interval + 1, 10 * self.operator.shape[0])
real_resid_interval = min(self.operator.shape[0], 50)
iterations = 0
delta_0 = None
while iterations < max_iterations:
compute_real_residual = iterations % real_resid_interval == 0
self.one_iteration(compute_real_residual=compute_real_residual)
if debug_callback is not None:
if compute_real_residual:
what = "it+residual"
else:
what = "it"
debug_callback(
what, iterations, self.x, self.residual, self.d, self.delta
)
# do often enough to allow AsyncInnerProduct
# to progress through (polled) event chain
rdq = self.real_delta_queue
if iterations % check_interval == 0:
if delta_0 is None:
delta_0 = rdq[0].get_host_result()
if delta_0 is not None:
rdq.pop(0)
if delta_0 is not None:
i = 0
while i < len(rdq):
delta = rdq[i].get_host_result()
if delta is not None:
if abs(delta) < tol * tol * abs(delta_0):
if debug_callback is not None:
debug_callback(
"end",
iterations,
self.x,
self.residual,
self.d,
self.delta,
)
return self.x
rdq.pop(i)
else:
i += 1
iterations += 1
raise ConvergenceError("cg failed to converge") | [
22
] |
def METHOD_NAME(self) -> str:
return self.__class__.__name__ | [
156
] |
def METHOD_NAME(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
Connection state of the private endpoint connection
"""
return pulumi.get(self, "private_link_service_connection_state") | [
547,
548,
549,
550,
551
] |
def METHOD_NAME(self):
super().METHOD_NAME()
self.script_short.remove()
self.script_long.remove()
self.py.remove() | [
531,
481
] |
def METHOD_NAME(x, *args):
return numpy.exp(x) - 1 | [
-1
] |
def METHOD_NAME(self, addon: Addon, service: str, config: dict[str, Any]) -> Message:
"""Send a discovery message to Home Assistant."""
try:
config = valid_discovery_config(service, config)
except vol.Invalid as err:
_LOGGER.error("Invalid discovery %s config", humanize_error(config, err))
raise DiscoveryError() from err
# Create message
message = Message(addon.slug, service, config)
# Already exists?
for exists_msg in self.list_messages:
if exists_msg != message:
continue
if exists_msg.config != config:
message = exists_msg
message.config = config
else:
_LOGGER.debug("Duplicate discovery message from %s", addon.slug)
return exists_msg
break
_LOGGER.info(
"Sending discovery to Home Assistant %s from %s", service, addon.slug
)
self.message_obj[message.uuid] = message
self.save()
self.sys_create_task(self._push_discovery(message, CMD_NEW))
return message | [
353
] |
def METHOD_NAME(dut, fanouthosts, result, wait_time):
action = None
for port in result['down_ports']:
logging.warning("Restoring port: {}".format(port))
pn = str(port).lower()
if 'portchannel' in pn or 'vlan' in pn:
action = 'config_reload'
continue
# If internal port is down, do 'config_reload' to recover.
# Here we do lowercase string search as pn is converted to lowercase
if '-ib' in pn or '-rec' in pn or '-bp' in pn:
action = 'config_reload'
continue
fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut.hostname, port)
if fanout and fanout_port:
fanout.shutdown(fanout_port)
fanout.no_shutdown(fanout_port)
if dut.facts["num_asic"] > 1:
asic = dut.get_port_asic_instance(port)
dut.asic_instance(asic.asic_index).startup_interface(port)
else:
dut.no_shutdown(port)
wait(wait_time, msg="Wait {} seconds for interface(s) to restore.".format(wait_time))
return action | [
2986,
703
] |
METHOD_NAME(self, port, timeout=None): | [
707,
4364
] |
f METHOD_NAME(self, prefix): | [
1042,
2107
] |
def METHOD_NAME(first, second, *rest):
if not rest:
return urljoin(first, second)
return urljoin(urljoin(first, second), *rest) | [
2831
] |
def METHOD_NAME(self):
for name in self._attrs_to_clear:
delattr(self, name)
self._attrs_to_clear = []
for base in self._alloced_bases:
self.u.free(base)
self._alloced_bases = []
self.symbols = [(a, b, objname) for (a, b, objname) \
in self.symbols if objname == self.base_object] | [
537,
3873
] |
def METHOD_NAME(app_name: Optional[str] = None,
binding_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBindingResult:
"""
Get a Binding and its properties.
Azure REST API version: 2023-05-01-preview.
:param str app_name: The name of the App resource.
:param str binding_name: The name of the Binding resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['appName'] = app_name
__args__['bindingName'] = binding_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform:getBinding', __args__, opts=opts, typ=GetBindingResult).value
return AwaitableGetBindingResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
4320
] |
f METHOD_NAME(self): | [
9,
2728,
4182,
-1,
227
] |
def METHOD_NAME(x):
return branin(np.array([x[0], 2.275])) | [
17755,
2481
] |
def METHOD_NAME(self):
self.assertLess(abs(tutorial.PHI_1), 1.0)
self.assertLess(abs(tutorial.PHI_2), 1.0)
# Test manual binning analysis
ref_bin_avgs = np.mean(
tutorial.time_series_1[:tutorial.N_BINS * tutorial.BIN_SIZE].reshape((tutorial.N_BINS, -1)), axis=1)
np.testing.assert_allclose(
tutorial.bin_avgs,
ref_bin_avgs,
atol=1e-12,
rtol=0)
self.assertAlmostEqual(
tutorial.avg,
np.mean(ref_bin_avgs),
delta=1e-10)
self.assertAlmostEqual(
tutorial.sem,
np.std(ref_bin_avgs, ddof=1.5) / np.sqrt(tutorial.N_BINS),
delta=1e-10)
# Test binning analysis function
for bin_size in [2, 10, 76, 100]:
data = np.random.random(500)
n_bins = 500 // bin_size
sem = tutorial.do_binning_analysis(data, bin_size)
ref_bin_avgs = np.mean(
data[:n_bins * bin_size].reshape((n_bins, -1)), axis=1)
ref_sem = np.std(ref_bin_avgs, ddof=1.5) / np.sqrt(n_bins)
self.assertAlmostEqual(sem, ref_sem, delta=1e-10)
# The analytic expressions for the AR(1) process are taken from
# https://en.wikipedia.org/wiki/Autoregressive_model#Example:_An_AR(1)_process
# (accessed June 2021)
SIGMA_1 = np.sqrt(tutorial.EPS_1 ** 2 / (1 - tutorial.PHI_1 ** 2))
TAU_EXP_1 = -1 / np.log(tutorial.PHI_1)
# The autocorrelation is exponential, thus tau_exp = tau_int, and
# therefore
SEM_1 = np.sqrt(2 * SIGMA_1 ** 2 * TAU_EXP_1 / tutorial.N_SAMPLES)
self.assertAlmostEqual(
tutorial.fit_params[2],
SEM_1,
delta=0.1 * SEM_1)
self.assertAlmostEqual(tutorial.AN_SEM_1, SEM_1, delta=1e-10 * SEM_1)
SIGMA_2 = np.sqrt(tutorial.EPS_2 ** 2 / (1 - tutorial.PHI_2 ** 2))
TAU_EXP_2 = -1 / np.log(tutorial.PHI_2)
SEM_2 = np.sqrt(2 * SIGMA_2 ** 2 * TAU_EXP_2 / tutorial.N_SAMPLES)
self.assertAlmostEqual(tutorial.AN_SEM_2, SEM_2, delta=1e-10 * SEM_2) | [
9
] |
def METHOD_NAME(self):
self.pre_operations()
self.RolesGet(ctx=self.ctx)()
self.post_operations() | [
750,
710
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME():
d = {"foo.x": 3, "foo.y": 5, "foo.z": None, "bar.z": 7}
scoped_dict = make_scoped_dictionary(d, exclude_nones=False)
assert "foo" in scoped_dict
assert "bar" in scoped_dict
assert scoped_dict["foo"]["x"] == 3
assert scoped_dict["foo"]["y"] == 5
assert scoped_dict["foo"]["z"] is None
assert scoped_dict["bar"]["z"] == 7
scoped_dict = make_scoped_dictionary(d, exclude_nones=True)
assert "foo" in scoped_dict
assert "bar" in scoped_dict
assert scoped_dict["foo"]["x"] == 3
assert scoped_dict["foo"]["y"] == 5
assert "z" not in scoped_dict["foo"]
assert scoped_dict["bar"]["z"] == 7 | [
9,
3270,
553
] |
def METHOD_NAME(self, suffix, append=" "):
comment_chars = {
"sql": "--",
"yaml": "#",
"yml": "#",
}
return comment_chars[suffix[1:]] + append | [
19,
1591,
3874
] |
def METHOD_NAME(self):
self.assertEqual(self.subject.max_temp, 75) | [
9,
232,
963
] |
def METHOD_NAME(user_info):
gate.ActorBase.METHOD_NAME(user_info)
mm = gate.g4_units("mm")
user_info.output = False
user_info.input_digi_collections = ["Hits"]
user_info.spacing = [4 * mm, 4 * mm]
user_info.size = [128, 128]
user_info.physical_volume_index = None
user_info.origin_as_image_center = True
user_info.detector_orientation_matrix = Rotation.from_euler("x", 0).as_matrix() | [
0,
235,
21,
100
] |
def METHOD_NAME(img, target_size, center=True):
"""crop image
Args:
img: images data
target_size: crop target size
center: crop mode
Returns:
img: cropped image data
"""
height, width = img.shape[:2]
size = target_size
if center:
w_start = (width - size) // 2
h_start = (height - size) // 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img[h_start:h_end, w_start:w_end, :]
return img | [
712,
660
] |
async def METHOD_NAME(self, from_cfg_id: ConfigId, to_cfg_id: ConfigId) -> Optional[ConfigEntity]:
pass | [
215,
200
] |
def METHOD_NAME(self, content):
if not self.is_opened():
self.open(mode="w+")
self.write(content)
self.close() | [
77,
459,
61,
1462
] |
def METHOD_NAME(self):
fields_iter = self._dummy_fields_iter(lbprocs=[991, 995, 991])
result = self._group_result(fields_iter)
self.assertEqual(result, self._test_fields([(1001, 1003), (1002,)])) | [
9,
-1,
955
] |
def METHOD_NAME(data):
"""Test instantiating Tracks layer, check 3D+t dimensionality."""
layer = Tracks(data)
assert layer.ndim == 4 | [
9,
6520,
94,
-1,
4333
] |
def METHOD_NAME(self, expected):
if expected == 'in geometry':
with self.context.db.cursor() as cur:
cur.execute("""SELECT ST_Within(ST_SetSRID(ST_Point({cx}, {cy}), 4326),
ST_SetSRID('{geomtxt}'::geometry, 4326))""".format(**self.db_row))
return cur.fetchone()[0]
if ' ' in expected:
x, y = expected.split(' ')
else:
x, y = self.context.osm.grid_node(int(expected))
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy'] | [
220,
4902
] |
def METHOD_NAME(aws_accounts):
existing_statement_list = {
"effect": "Allow",
"sid": "ExpireStatement",
"expires_at": "2023-01-24",
}
existing_document = ManagedPolicyDocument(
policy_name="foo", version="bar", statement=existing_statement_list
)
new_statement_list = [{"effect": "Allow", "sid": "ExpireStatement"}]
new_document = ManagedPolicyDocument(
policy_name="foo", version="bar", statement=new_statement_list
)
merged_document: ManagedPolicyDocument = merge_model(
new_document, existing_document, aws_accounts
)
assert (
merged_document.statement[0].expires_at
== existing_document.statement.expires_at
) | [
9,
411,
3627,
54,
352,
280,
553
] |
def METHOD_NAME():
bst.load_model(fname=global_args["model_path"]) | [
86,
578
] |
def METHOD_NAME(self):
with self.no_rights_user_logged_in:
response: Response = self.client.post(
path=reverse(viewname="v1:config-history", kwargs={"adcm_pk": self.adcm.pk}),
data={"config": {}},
content_type=APPLICATION_JSON,
)
log: AuditLog = AuditLog.objects.order_by("operation_time").last()
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
self.check_adcm_updated(
log=log,
operation_name=self.adcm_conf_updated_str,
operation_result=AuditLogOperationResult.DENIED,
user=self.no_rights_user,
) | [
9,
4496
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(wandb_init, git_repo):
# disable_git is False is by default
# so run object should have git info
run = wandb_init()
assert run._commit is not None
assert run._commit == git_repo.last_commit
assert run._remote_url is None
run.finish() | [
9,
-1,
1161,
2876,
3705
] |
f METHOD_NAME(self): | [
9,
3533,
40,
41,
6526,
3399,
9
] |
def METHOD_NAME(self, response):
# type: (requests.Response) -> None
try:
response.raise_for_status()
except Exception as exc:
message = 'Error response from VoltDB: {}'.format(exc)
try:
# Try including detailed error message from response.
details = response.json()['statusstring']
except Exception:
pass
else:
message += ' (details: {})'.format(details)
raise_from(Exception(message), exc) | [
241,
43,
452,
41,
2051
] |
def METHOD_NAME(fileBase, start, end):
if start == 0 and end == 0:
return
url1 = buildUrl(URL1_FORMAT, fileBase, start, end)
url2 = buildUrl(URL2_FORMAT, fileBase, start, end)
print('curling %s' % url1)
code1, data1 = getUrl(url1)
print('curling %s' % url2)
code2, data2 = getUrl(url2)
if code1 != code2:
if set([code1, code2]) == set([400, 500]):
return
print('Error: different codes %s %s' % (code1, code2))
return
if data1 != data2:
print('Error: %s %s' % (url1, url2)) | [
22,
97,
9
] |
def METHOD_NAME(self):
return 11 | [
19,
58,
281
] |
def METHOD_NAME(self, cmd):
os.chdir(currdir)
output = main.main(['convert', '--logging=quiet', '-c'] + cmd)
return output | [
-1
] |
def METHOD_NAME(self, experience: Experience) -> None:
for callback in self.callbacks:
callback.on_make_experience_end(experience) | [
69,
93,
13029,
1798
] |
def METHOD_NAME(ref_filename, hyp_filename, case_sensitive=False):
"""Compute BLEU for two files (reference and hypothesis translation)."""
ref_lines = tf.io.gfile.GFile(ref_filename).read().strip().splitlines()
hyp_lines = tf.io.gfile.GFile(hyp_filename).read().strip().splitlines()
if len(ref_lines) != len(hyp_lines):
raise ValueError("Reference and translation files have different number of "
"lines.")
if not case_sensitive:
ref_lines = [x.lower() for x in ref_lines]
hyp_lines = [x.lower() for x in hyp_lines]
ref_tokens = [bleu_tokenize(x) for x in ref_lines]
hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]
return metrics.compute_bleu(ref_tokens, hyp_tokens) * 100 | [
8612,
291
] |
def METHOD_NAME(context):
if context.ret_code != 0:
raise Exception('%s : %s' % (context.error_message, context.stdout_message)) | [
638,
3163,
462
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.