text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self) -> list:
"""
Get a list of :class:`~kivymd.uix.hero.MDHeroFrom` objects according
to the tag names specified in the :attr:`~current_heroes` list.
"""
hero_from_widget = []
for name_hero in self.current_heroes:
for hero_widget in self._heroes_data:
if isinstance(hero_widget, MDHeroFrom) or issubclass(
hero_widget.__class__, MDHeroFrom
):
if hero_widget.tag == name_hero:
hero_from_widget.append(hero_widget)
return hero_from_widget | [
19,
15003,
280,
706
] |
def METHOD_NAME(request, **kwargs):
SessionInfo.just_logged_in(request) | [
2717,
623,
4130
] |
def METHOD_NAME(dataset):
if isinstance(dataset, LanguagePairDataset):
return dataset
if hasattr(dataset, "tgt_dataset"):
return METHOD_NAME(dataset.tgt_dataset)
if hasattr(dataset, "dataset"):
return METHOD_NAME(dataset.dataset)
raise Exception(f"Don't know how to unwrap this dataset: {dataset}") | [
3670,
1238,
2938,
637
] |
def METHOD_NAME(self, app: AppT, *, url_prefix: Optional[str] = None) -> None:
"""Register blueprint with app."""
url_prefix = url_prefix or self.url_prefix
# Apply routes
for route in self.routes:
self._apply_route(app, route, url_prefix)
for static_route in self.static_routes:
self._apply_static_route(app.web, static_route, url_prefix) | [
372
] |
def METHOD_NAME():
# Make dummy classes for easier distinguishing the transforms
class DummyTrans(tr.BaseTransform):
glsl_map = "vec4 trans(vec4 pos) {return pos;}"
glsl_imap = "vec4 trans(vec4 pos) {return pos;}"
class TransA(DummyTrans):
pass
class TransB(DummyTrans):
pass
class TransC(DummyTrans):
pass
# Create test transforms
a, b, c = TransA(), TransB(), TransC()
# Test Chain creation
assert tr.ChainTransform().transforms == []
assert tr.ChainTransform(a).transforms == [a]
assert tr.ChainTransform(a, b).transforms == [a, b]
assert tr.ChainTransform(a, b, c, a).transforms == [a, b, c, a]
# Test composition by multiplication
assert_chain_objects(a * b, tr.ChainTransform(a, b))
assert_chain_objects(a * b * c, tr.ChainTransform(a, b, c))
assert_chain_objects(a * b * c * a, tr.ChainTransform(a, b, c, a))
# Test adding/prepending to transform
chain = tr.ChainTransform()
chain.append(a)
assert chain.transforms == [a]
chain.append(b)
assert chain.transforms == [a, b]
chain.append(c)
assert chain.transforms == [a, b, c]
chain.prepend(b)
assert chain.transforms == [b, a, b, c]
chain.prepend(c)
assert chain.transforms == [c, b, a, b, c]
# Test simplifying
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
t3 = tr.STTransform(translate=(3, 4))
# Create multiplied versions
t123 = t1*t2*t3
t321 = t3*t2*t1
c123 = tr.ChainTransform(t1, t2, t3)
c321 = tr.ChainTransform(t3, t2, t1)
c123s = c123.simplified
c321s = c321.simplified
#
assert isinstance(t123, tr.STTransform) # or the test is useless
assert isinstance(t321, tr.STTransform) # or the test is useless
assert isinstance(c123s, tr.ChainTransform) # or the test is useless
assert isinstance(c321s, tr.ChainTransform) # or the test is useless
# Test Mapping
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain1 = tr.ChainTransform(t1, t2)
chain2 = tr.ChainTransform(t2, t1)
#
assert chain1.transforms == [t1, t2] # or the test is useless
assert chain2.transforms == [t2, t1] # or the test is useless
#
m12 = (t1*t2).map((1, 1)).tolist()
m21 = (t2*t1).map((1, 1)).tolist()
m12_ = chain1.map((1, 1)).tolist()
m21_ = chain2.map((1, 1)).tolist()
#
# print(m12, m21, m12_, m21_)
assert m12 != m21
assert m12 == m12_
assert m21 == m21_
# Test shader map
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain = tr.ChainTransform(t1, t2)
#
funcs = chain.shader_map().dependencies()
funcsi = chain.shader_imap().dependencies()
#
assert t1.shader_map() in funcs
assert t2.shader_map() in funcs
assert t1.shader_imap() in funcsi
assert t2.shader_imap() in funcsi | [
9,
1053,
357
] |
def METHOD_NAME(empty_dag: DAG) -> None:
"""Test transitive reduction no reduction."""
dag = empty_dag
dag.from_dict({"a": ["b", "c"], "b": ["d"], "c": ["d"], "d": []})
dag.transitive_reduction()
assert dag.graph == {"a": {"b", "c"}, "b": {"d"}, "c": {"d"}, "d": set()} | [
9,
9170,
6603,
654,
6603
] |
def METHOD_NAME(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# local validation with global support vectors
# fit a standalone SVM with the global support vectors
svm_global = self.svm_lib.SVC(kernel=self.kernel)
support_x = global_param["support_x"]
support_y = global_param["support_y"]
svm_global.fit(support_x, support_y)
# validate global model
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = svm_global.predict(x_valid)
auc = roc_auc_score(y_valid, y_pred)
self.log_info(fl_ctx, f"AUC {auc:.4f}")
metrics = {"AUC": auc}
return metrics, svm_global | [
187
] |
def METHOD_NAME( quiet=False ):
global status_log_file
original_log_file = status_log_file
status_log_file= "NULL" if quiet else status_log_file
try:
execute_cmd( "pg_ctl", [ "-D", sql_dir, "-w", "-t", "20", "-l", sql_log_file, "start" ] )
status_log_file = original_log_file
return True
except:
status_log_file = original_log_file
return False | [
447
] |
def METHOD_NAME(self, pressure, temperature, volume, params):
"""
Not implemented in the Modified Tait EoS. :math:`[Pa]`
Returns 0.
Could potentially apply a fixed Poissons ratio as a rough estimate.
"""
return 0.0 | [
8321,
8322
] |
def METHOD_NAME(module_session, config):
return UserFactory.create(
with_host=True,
with_membership=True,
membership__group=config.member_group,
) | [
21
] |
def METHOD_NAME(self, options: Values, args: List[Any]) -> None:
if len(args) > 1:
raise CommandError("Too many arguments")
if args:
pattern = args[0]
else:
pattern = "*"
files = self._find_wheels(options, pattern)
if options.list_format == "human":
self.format_for_human(files)
else:
self.format_for_abspath(files) | [
245,
596,
1768
] |
def METHOD_NAME():
return {
"broker_url": TEST_BROKER_URL,
"imports": ["src.tasks.index_nethermind"],
"task_serializer": "json",
"accept_content": ["json"],
"task_always_eager": True,
} | [
5595,
200
] |
def METHOD_NAME(self, _init_pygame, default_ui_manager, _display_surface_return_none):
healthy_sprite = HealthySprite()
health_bar = UIStatusBar(relative_rect=pygame.Rect(100, 100, 150, 30),
sprite=healthy_sprite,
manager=default_ui_manager)
assert health_bar.visible == 1
health_bar.hide()
assert health_bar.visible == 0 | [
9,
1243
] |
def METHOD_NAME(cls, fig):
"""Destroy figure *fig*."""
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num) | [
2656,
8617
] |
def METHOD_NAME(scheme):
return scheme & 0x7fff | [
4089,
106
] |
def METHOD_NAME(self, binary_form=False):
return self.sslobj.METHOD_NAME(binary_form) | [
-1
] |
def METHOD_NAME(self, region_summary: can_api_v2_definition.RegionSummary, file_type):
if self.level is AggregationLevel.STATE:
return self.region_subdir / f"{region_summary.state}.{file_type.suffix}"
if self.level is AggregationLevel.COUNTRY:
return self.region_subdir / f"{region_summary.country}.{file_type.suffix}"
if self.level in (AggregationLevel.COUNTY, AggregationLevel.CBSA, AggregationLevel.PLACE):
return self.region_subdir / f"{region_summary.fips}.{file_type.suffix}"
raise NotImplementedError("Level not supported") | [
97,
2718
] |
def METHOD_NAME(self):
"""
Returns force parameter.
"""
return self.get_bool_parameter("relations") | [
19,
1015
] |
def METHOD_NAME(self, samples):
samples = [
s for s in samples if s["source"] is not None and len(s["source"]) > 0
]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
target_size = min(min(sizes), self.max_sample_size)
if target_size < self.min_length:
return {}
if self.min_sample_size < target_size:
target_size = np.random.randint(self.min_sample_size, target_size + 1)
collated_sources = sources[0].new(len(sources), target_size)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
assert diff >= 0
if diff == 0:
collated_sources[i] = source
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"source": collated_sources},
} | [
3632
] |
def METHOD_NAME(self) -> None: ... | [
2869
] |
def METHOD_NAME(
*, org_id: int, upsert_data: OrganizationCreateAndUpdateOptions
) -> Organization:
with outbox_context(transaction.atomic(router.db_for_write(Organization))):
org, created = Organization.objects.update_or_create(id=org_id, defaults=upsert_data)
return org | [
5592,
1044,
604,
3411,
147,
41,
9098
] |
def METHOD_NAME():
if using_pyside:
# this will return None, unless properly inited
inst = GetPySideViewerInstance()
if not inst is None:
return inst.METHOD_NAME()
return None | [
19,
882,
1092
] |
def METHOD_NAME(self, return_url=None):
return self._logout_url | [
19,
2431,
274
] |
def METHOD_NAME(x):
return np.exp(-(x**2) / 50**2) | [
3435
] |
def METHOD_NAME(self, vals_list):
res = super().METHOD_NAME(vals_list)
res._create_calendar_event()
return res | [
129
] |
f METHOD_NAME(self, inputDict): | [
15
] |
METHOD_NAME( self ) : | [
9,
0,
527
] |
def METHOD_NAME(session_multihost, request):
""" Multihost fixture to be used by tests
:param obj session_multihost: multihost object
:return obj session_multihost: return multihost object
:Exceptions: None
"""
if hasattr(request.cls(), 'class_setup'):
request.cls().class_setup(session_multihost)
request.addfinalizer(
lambda: request.cls().class_teardown(session_multihost))
return session_multihost | [
-1
] |
def METHOD_NAME(self):
"""
Calcula o valor executado da meta
"""
algoritmo = self.algoritmo.lower()
valor = getattr(self, algoritmo)()
try:
valor = float(valor)
except:
valor = 0.0
return valor | [
-1,
-1
] |
def METHOD_NAME(self):
opts = self.menu_item_model._meta
self.assertEqual(opts.verbose_name, 'menu item')
self.assertEqual(opts.verbose_name_plural, 'menu items')
self.assertEqual(opts.ordering, ('sort_order',)) | [
9,
1094,
2
] |
def METHOD_NAME(full_op, type_param_non_default):
"""Tests _apply_typeparam_dict and by necessity getattr."""
for key, value in type_param_non_default.items():
full_op.type_param[key] = value
cpp_obj = DummyCppObj()
full_op._cpp_obj = cpp_obj
full_op._apply_typeparam_dict(cpp_obj, DummySimulation())
for key, value in type_param_non_default.items():
expected_dict = {"foo": 1, **value}
assert cpp_obj.getTypeParam(key) == expected_dict | [
9,
231,
-1,
553
] |
def METHOD_NAME(self):
if self.method is not None:
descr = "scale param's method"
self.method = self.check_and_change_lower(self.method,
[consts.MINMAXSCALE, consts.STANDARDSCALE],
descr)
descr = "scale param's mode"
self.mode = self.check_and_change_lower(self.mode,
[consts.NORMAL, consts.CAP],
descr)
# LOGGER.debug("self.feat_upper:{}, type:{}".format(self.feat_upper, type(self.feat_upper)))
# if type(self.feat_upper).__name__ not in ["float", "int"]:
# raise ValueError("scale param's feat_upper {} not supported, should be float or int".format(
# self.feat_upper))
if self.scale_col_indexes != -1 and not isinstance(self.scale_col_indexes, list):
raise ValueError("scale_col_indexes is should be -1 or a list")
if self.scale_names is None:
self.scale_names = []
if not isinstance(self.scale_names, list):
raise ValueError("scale_names is should be a list of string")
else:
for e in self.scale_names:
if not isinstance(e, str):
raise ValueError("scale_names is should be a list of string")
self.check_boolean(self.with_mean, "scale_param with_mean")
self.check_boolean(self.with_std, "scale_param with_std")
self.check_boolean(self.need_run, "scale_param need_run")
return True | [
250
] |
def METHOD_NAME(self, no_salt):
root_password_msg = "insert into password values (0,\'%s\');"
return root_password_msg % (hash_map[("0", no_salt)]) | [
782,
146
] |
def METHOD_NAME(self):
self.info.clear() | [
360,
147
] |
def METHOD_NAME(rpr_context, obj: bpy.types.Object, **kwargs):
""" sync the object and any data attached """
from rprblender.engine.render_engine import RenderEngine
log("sync", obj, obj.type)
if obj.type == 'MESH':
if obj.mode == 'OBJECT':
# if in edit mode use to_mesh
mesh.METHOD_NAME(rpr_context, obj, **kwargs)
else:
to_mesh.METHOD_NAME(rpr_context, obj, **kwargs)
elif obj.type == 'LIGHT':
light.METHOD_NAME(rpr_context, obj)
elif obj.type == 'CAMERA':
camera.METHOD_NAME(rpr_context, obj)
elif obj.type in ('CURVE', 'FONT', 'SURFACE', 'META'):
to_mesh.METHOD_NAME(rpr_context, obj, **kwargs)
elif obj.type == 'VOLUME':
openvdb.METHOD_NAME(rpr_context, obj, **kwargs)
elif obj.type == 'CURVES':
hair.sync_curves(rpr_context, obj)
elif obj.type == 'EMPTY':
pass
else:
log.warn("Object to sync not supported", obj, obj.type)
if obj.type in ('MESH', 'CURVE', 'FONT', 'SURFACE', 'META'):
volume.METHOD_NAME(rpr_context, obj)
hair.METHOD_NAME(rpr_context, obj)
# Note: particles should be exported separately in final render engine
# after motion blur, otherwise prev_location of particle will be (0, 0, 0)
if rpr_context.engine_type != RenderEngine.TYPE:
particle.METHOD_NAME(rpr_context, obj) | [
164
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The type of billing. Will be MAU for all new customers. If 'Auths', it can be updated to 'MAU'. Cannot be changed if value is 'MAU'. Learn more about Azure AD B2C billing at [aka.ms/b2cBilling](https://aka.ms/b2cbilling).
"""
return pulumi.get(self, "billing_type") | [
4094,
44
] |
def METHOD_NAME(self, identity, data, uow=None):
"""Create a new OAI set."""
self.require_permission(identity, "create")
valid_data, errors = self.schema.load(
data,
context={"identity": identity},
raise_errors=True,
)
self._validate_spec(valid_data["spec"])
system_created = valid_data["spec"].startswith(tuple(self.reserved_prefixes))
new_set = OAISet(**valid_data, system_created=system_created)
existing_set, errors = self._get_one(spec=new_set.spec, raise_error=False)
if existing_set:
raise OAIPMHSetSpecAlreadyExistsError(new_set.spec)
uow.register(OAISetCommitOp(new_set))
return self.result_item(
service=self,
identity=identity,
item=new_set,
links_tpl=self.links_item_tpl,
) | [
129
] |
def METHOD_NAME(msg=""):
"""Raise a SkipTest if we appear to be running the pytest test loader.
Parameters
----------
msg : string, optional
The message issued when a test is skipped.
"""
if are_tests_running():
pytest.skip(msg) | [
2423,
217,
1340,
450
] |
def METHOD_NAME(cursor, user_id):
try:
cursor.execute(
'DELETE FROM kontext_user_access '
'WHERE user_id = %s', (user_id,))
except Exception as ex:
print(ex) | [
188,
75,
11815
] |
def METHOD_NAME(
cls: t.Type[T],
method: t.Callable[t.Concatenate[T, P], t.Any],
name: str,
*,
batchable: bool = False,
batch_dim: tuple[int, int] | int = 0,
input_spec: LazyType[t.Any] | t.Tuple[LazyType[t.Any], ...] | None = None,
output_spec: LazyType[t.Any] | None = None,
):
meth = Runnable.method(
method,
batchable=batchable,
batch_dim=batch_dim,
input_spec=input_spec,
output_spec=output_spec,
)
setattr(cls, name, meth)
meth.__set_name__(cls, name) | [
238,
103
] |
def METHOD_NAME(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate | [
2497
] |
def METHOD_NAME(self, entity: prop_exploding_futbol, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props') | [
276,
1302,
6149,
6150
] |
def METHOD_NAME(container, summary):
phantom.debug("on_finish() called")
build_observable__observable_array = json.loads(_ if (_ := phantom.get_run_data(key="build_observable:observable_array")) != "" else "null") # pylint: disable=used-before-assignment
output = {
"observable": build_observable__observable_array,
}
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.save_playbook_output_data(output=output)
retur | [
69,
1239
] |
def METHOD_NAME(self):
# deal with inconsistent intermediate folders of tarballs
# 02.00.00 only has 'R02-00-00'
# but 02.02.00 has 'EvtGen/R02-02-00'
if self.spec.satisfies("@02.02.00:"):
return "R" + str(self.version).replace(".", "-")
else:
return "" | [
1563,
1703,
1190
] |
def METHOD_NAME(self, mocked_exec):
err = batchprocessors.BATCHCODE.code_exec(
'# batchcode code:\n\nprint("Hello")\n', extra_environ={}
)
self.assertIsNone(err) | [
9,
13812,
7727
] |
def METHOD_NAME(self):
g = Graph.Ring(5, directed=True)
lo = g.layout("auto")
plot(
g,
layout=lo,
target=result_image_folder / "graph_mark_groups_squares_directed.png",
backend="cairo",
mark_groups=True,
vertex_shape="square",
) | [
9,
1743,
861,
8582
] |
def METHOD_NAME(vm, total_cpus):
output = vm.monitor.send_args_cmd("info cpus")
test.log.debug("Output of info CPUs:\n%s", output)
cpu_regexp = re.compile(r"CPU #(\d+)")
total_cpus_monitor = len(cpu_regexp.findall(output))
if total_cpus_monitor != total_cpus:
test.fail("Monitor reports %s CPUs, when VM should have"
" %s" % (total_cpus_monitor, total_cpus))
error_context.context("hotplugging finished, let's wait a few sec and"
" check CPUs quantity in guest.", test.log.info)
if not utils_misc.wait_for(lambda: cpu.check_if_vm_vcpu_match(
total_cpus, vm),
60 + total_cpus, first=10,
step=5.0, text="retry later"):
test.fail("CPU quantity mismatch cmd after hotplug !")
error_context.context("rebooting the vm and check CPU quantity !",
test.log.info)
session = vm.reboot()
if not cpu.check_if_vm_vcpu_match(total_cpus, vm):
test.fail("CPU quantity mismatch cmd after hotplug and reboot !") | [
1162
] |
def METHOD_NAME():
uis = UserImage.objects.filter(image__disk_file__file_type='FITS image data')
print(uis.count(), 'UserImages are FITS')
for ui in uis:
im = ui.image
im.display_image = None
im.thumbnail = None
im.save()
print('Updated', len(uis), 'UserImages') | [
4384,
12273,
3298
] |
def METHOD_NAME(syn):
synapse_without_hpc_suffix = syn[: len(syn) - 4]
net_without_hpc_suffix = prepare_neuron(synapse_without_hpc_suffix)
net_with_hpc_suffix = prepare_neuron(syn)
assert net_with_hpc_suffix == net_without_hpc_suffix | [
9,
5753,
629
] |
def METHOD_NAME(mutable_mock_repo, extra_repo):
mutable_mock_repo.put_first(extra_repo[0])
mutable_mock_repo.get_pkg_class("a")
mutable_mock_repo.get_pkg_class("builtin.mock.a") | [
9,
522,
457,
-1
] |
def METHOD_NAME(self):
self.filter_one.insert = self.filter_one.uppercase_insert
self.text.insert('insert', 'bAr')
self.assertEqual(self.text.get('1.0', END), 'BAR\n') | [
9,
2444,
408
] |
def METHOD_NAME(self, session, path):
target = os.path.basename(path)
try:
session.put(path, target)
except Exception as ex:
return False, repr(ex)
return True, 'OK' | [
172,
171
] |
def METHOD_NAME(self, ml_dict: dict):
"""Update the persistence data with the learned values.
Args:
ml_dict (Dict of ModelLearnable): updated information to be merged into existing Dict of ModelLearnable
"""
for model_id in ml_dict.keys():
if model_id != "meta":
ml = ml_dict[model_id]
# update with value of the model learnable
# note that the original weights that are not learned are still kept!
learned_weights = ml[ModelLearnableKey.WEIGHTS]
for k, v in learned_weights.items():
self.model_set[model_id][k] = v | [
86
] |
def METHOD_NAME(self):
r1 = fakeredis.FakeStrictRedis.from_url('redis://localhost:6379/11')
r2 = fakeredis.FakeStrictRedis.from_url('redis://localhost:6379/11')
r3 = fakeredis.FakeStrictRedis(server=fakeredis.FakeServer())
r1.set('foo', 'bar')
assert r2.get('foo') == b'bar'
assert not r3.exists('foo') | [
9,
1101,
550,
434
] |
def METHOD_NAME(self) -> None:
file_path = _download(DEMO_DATA_URL, dir_str=TEST_DATA_PATH)
self.gef_file = file_path
file_path = _download(DEMO_135_CELL_BIN_GEF_URL, dir_str=TEST_DATA_PATH)
self.cgef_file = file_path | [
0,
1
] |
def METHOD_NAME(thermoml_string, supported_phases):
"""A decorator which wraps around the `register_thermoml_property`
method.
Parameters
----------
thermoml_string: str
The ThermoML string identifier (ePropName) for this property.
supported_phases: PropertyPhase:
An enum which encodes all of the phases for which this
property supports being estimated in.
"""
def decorator(cls):
register_thermoml_property(thermoml_string, supported_phases, cls)
return cls
return decorator | [
-1,
1042
] |
def METHOD_NAME(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._get_context()
context['media_rating']['content'] = rating | [
1798,
1091,
5390
] |
def METHOD_NAME(self):
"""
@brief 右边界力的方向
Notes
-----
位移方向沿 (1, 0) 方向,即向 x 的正向拉伸
"""
return np.array([1, 0], np.float_) | [
2786,
1818,
4065
] |
f METHOD_NAME(self): | [
9,
1181,
1461
] |
def METHOD_NAME(self, ids: List[str]) -> None:
pass | [
34,
2465,
280,
798,
1267
] |
def METHOD_NAME(source=None):
return [] | [
86,
1458,
459,
44
] |
METHOD_NAME(self, ref): | [
549,
137,
1865
] |
def METHOD_NAME(self, txt):
"""
Process YAML file
"""
txt = self._inject_constants_dict(txt)
if LooseVersion(platform.python_version()) < LooseVersion(u'2.7'):
self.parsed_yeb = yaml.load(txt)
else:
self.parsed_yeb = yaml.load(txt, Loader=yaml.SafeLoader) | [
214
] |
def METHOD_NAME(db_session):
name = "My Hypothesis Group"
org = models.Organization(name="My Organization", authority="foobar.com")
db_session.add(org)
db_session.flush()
group = models.Group(name=name, authority="foobar.com", organization=org)
db_session.add(group)
db_session.flush()
assert group.organization == org
assert group.organization_id == org.id | [
9,
846,
1044
] |
def METHOD_NAME(self):
if self._fp:
return self._fp
ip, port = self.get_web_port_and_ip()
wc = webclient.SecureHTTPConnection(ip, verifycallback=self._savecert, port=port)
try:
wc.connect()
except IOError as ie:
if ie.errno == errno.ECONNREFUSED:
self._certfailreason = 1
return None
elif ie.errno == errno.EHOSTUNREACH:
self._certfailreason = 2
return None
self._certfailreason = 2
return None
except Exception:
self._certfailreason = 2
return None
return self._fp | [
3905,
1941
] |
def METHOD_NAME(self, **kwargs: Any) -> None:
"""Delete the project's artifacts on the server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
path = self._compute_path("/projects/{project_id}/artifacts")
if TYPE_CHECKING:
assert path is not None
self.gitlab.http_delete(path, **kwargs) | [
34
] |
def METHOD_NAME(self, isSynchronized):
""""
Set if the filename and the data are synchronized.
"""
self.__isSynchronized = isSynchronized | [
0,
8900
] |
def METHOD_NAME():
# [START multiple_translation]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document import (
DocumentTranslationClient,
DocumentTranslationInput,
TranslationTarget
)
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url_1 = os.environ["AZURE_SOURCE_CONTAINER_URL_1"]
source_container_url_2 = os.environ["AZURE_SOURCE_CONTAINER_URL_2"]
target_container_url_fr = os.environ["AZURE_TARGET_CONTAINER_URL_FR"]
target_container_url_ar = os.environ["AZURE_TARGET_CONTAINER_URL_AR"]
target_container_url_es = os.environ["AZURE_TARGET_CONTAINER_URL_ES"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
poller = client.begin_translation(inputs=[
DocumentTranslationInput(
source_url=source_container_url_1,
targets=[
TranslationTarget(
target_url=target_container_url_fr,
language="fr"
),
TranslationTarget(
target_url=target_container_url_ar,
language="ar"
)
]
),
DocumentTranslationInput(
source_url=source_container_url_2,
targets=[
TranslationTarget(
target_url=target_container_url_es,
language="es"
)
]
)
]
)
result = poller.result()
print(f"Status: {poller.status()}")
print(f"Created on: {poller.details.created_on}")
print(f"Last updated on: {poller.details.last_updated_on}")
print(f"Total number of translations on documents: {poller.details.documents_total_count}")
print("\nOf total documents...")
print(f"{poller.details.documents_failed_count} failed")
print(f"{poller.details.documents_succeeded_count} succeeded")
for document in result:
print(f"Document ID: {document.id}")
print(f"Document status: {document.status}")
if document.status == "Succeeded":
print(f"Source document location: {document.source_document_url}")
print(f"Translated document location: {document.translated_document_url}")
print(f"Translated to language: {document.translated_to}\n")
elif document.error:
print(f"Error Code: {document.error.code}, Message: {document.error.message}\n")
# [END multiple_translation] | [
734,
107,
2518
] |
def METHOD_NAME(self):
self.log.debug("jupyter run: starting...")
super(RunApp, self).METHOD_NAME()
if self.filenames_to_run:
for filename in self.filenames_to_run:
self.log.debug("jupyter run: executing `%s`" % filename)
with open(filename) as fp:
code = fp.read()
reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT)
return_code = 0 if reply['content']['status'] == 'ok' else 1
if return_code:
raise Exception("jupyter-run error running '%s'" % filename)
else:
code = sys.stdin.read()
reply = self.kernel_client.execute_interactive(code, timeout=OUTPUT_TIMEOUT)
return_code = 0 if reply['content']['status'] == 'ok' else 1
if return_code:
raise Exception("jupyter-run error running 'stdin'") | [
447
] |
def METHOD_NAME(self):
clone_clf = clone(self.clf) | [
9,
578,
670
] |
def METHOD_NAME(self):
# external helper method to view proposal choices
choices = self.proposal_data["choices"]
console.print(f"Choices for proposal {self.proposal_id}: {choices}") | [
697,
4229,
998
] |
def METHOD_NAME(self):
# Generate KMS Client
kms_client = client("kms", region_name=AWS_REGION)
# Create KMS keys
key1 = kms_client.create_key()["KeyMetadata"]
key2 = kms_client.create_key()["KeyMetadata"]
kms_client.enable_key_rotation(KeyId=key2["KeyId"])
# KMS client for this test class
audit_info = self.set_mocked_audit_info()
kms = KMS(audit_info)
assert len(kms.keys) == 2
assert kms.keys[0].arn == key1["Arn"]
assert kms.keys[0].rotation_enabled is False
assert kms.keys[1].arn == key2["Arn"]
assert kms.keys[1].rotation_enabled is True | [
9,
19,
59,
2271,
452
] |
def METHOD_NAME(self) -> None:
"""Test ``BaseModule.predict_scores``."""
batch = torch.zeros(self.batch_size, 3, dtype=torch.long, device=self.model.device)
# Set into training mode to check if it is correctly set to evaluation mode.
self.model.train()
scores = self.model.predict_hrt(batch)
assert scores.shape == (self.batch_size, 1)
self._check_scores(scores)
assert not self.model.training | [
9,
2103,
3295
] |
def METHOD_NAME(self):
self.add_argument('--triples', dest='triples', required=True)
self.add_argument('--queries', dest='queries', default=None)
self.add_argument('--collection', dest='collection', default=None)
def check_training_input(args):
assert (args.collection is None) == (args.queries is None), \
"For training, both (or neither) --collection and --queries must be supplied." \
"If neither is supplied, the --triples file must contain texts (not PIDs)."
self.checks.append(check_training_input) | [
238,
2685,
362
] |
def METHOD_NAME(self, input_data, expected_value):
result = compute_variance(**input_data)
np.testing.assert_allclose(result.cpu().numpy(), expected_value, atol=1e-4) | [
9,
6580,
331
] |
def METHOD_NAME():
config = _get_mock_config("A")
builder = A(config, True, 1)
builder.state_value += 1
saved_state = builder.get_state()
builder = A(config, True, 1)
builder.load_state(saved_state)
assert builder.state_value == 2 | [
9,
348,
551,
557
] |
def METHOD_NAME(self):
num_classes = 1
num_instance = 2
bbox_head = MultiInstanceBBoxHead(
num_instance=num_instance,
num_shared_fcs=2,
reg_class_agnostic=True,
num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(
len(result_list[0]), num_samples * num_instance * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples * num_instance)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0 * num_instance)
self.assertIsNone(result_list[0].get('label', None)) | [
9,
2739,
373,
19,
51
] |
def METHOD_NAME(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code) | [
558
] |
def METHOD_NAME(self, t, status):
self._status = status | [
0,
452
] |
def METHOD_NAME(x: da.Array, data: da.Array) -> da.Array:
xrank = da.from_delayed(nanrankdata(x), dtype=float, shape=x.shape)
ranks = da.from_delayed(nanrankdata(data), dtype=float, shape=data.shape)
return _pearson_1xn(xrank, ranks) | [
4753,
11522
] |
def METHOD_NAME(self, video_id):
json = self.session.http.post(
f"https://api-web.trovo.live/graphql?qid={self.generate_qid()}",
json=[{
"operationName": "vod_VodReaderService_BatchGetVodDetailInfo",
"variables": {
"params": {
"vids": [video_id],
},
},
"extensions": {},
}],
schema=validate.Schema(
validate.parse_json(),
[{
"data": {
"vod_VodReaderService_BatchGetVodDetailInfo": {
"VodDetailInfos": validate.any(
{
video_id: {
"streamerInfo": {
"userName": str,
},
"vodInfo": {
"playInfos": [{
"desc": validate.all(validate.transform(lambda s: s.lower()), str),
"playUrl": validate.url(),
}],
"vid": str,
"title": str,
"categoryName": str,
"playbackRights": {
"playbackRightsSetting": str,
"playbackRights": str,
},
},
},
},
{},
),
},
},
}],
validate.get((0, "data", "vod_VodReaderService_BatchGetVodDetailInfo", "VodDetailInfos", video_id)),
),
)
if not json:
log.error("Video not found")
return
log.debug(json["vodInfo"]["playbackRights"])
self.id = json["vodInfo"]["vid"]
self.author = json["streamerInfo"]["userName"]
self.title = json["vodInfo"]["title"]
self.category = json["vodInfo"]["categoryName"]
for s in json["vodInfo"]["playInfos"]:
q = s["desc"]
if "(source)" in q:
q = "source"
yield q, HLSStream(self.session, update_scheme("https:", s["playUrl"])) | [
19,
17498
] |
def METHOD_NAME():
# Bit of a hack without installing
# python bindings
gdal_translate = shutil.which('gdal_translate')
if not gdal_translate:
return None
# Get version
version_output = subprocess.check_output([gdal_translate, "--version"]).decode('utf-8')
m = re.match(r"GDAL\s+([\d+])\.([\d+])\.([\d+]),\s+released", version_output)
if not m:
return None
return tuple(map(int, m.groups())) | [
19,
3908,
281
] |
def METHOD_NAME(self):
tuples = [
('USD', '1111.00'),
('CAD', '1333.33'),
]
table_object = table.create_table(tuples, [(0, 'Currency'), 1])
self.assertEqual(table.Table(columns=[0, 1],
header=['Currency', 'Field 1'],
body=[['USD', '1111.00'],
['CAD', '1333.33']]),
table_object) | [
9,
129,
410,
41,
724
] |
def METHOD_NAME(self):
# Layout for buttons
buttons = QtWidgets.QDialogButtonBox()
self.cancel_button = buttons.addButton(QtWidgets.QDialogButtonBox.Cancel)
self.delete_button = buttons.addButton(
"Delete", QtWidgets.QDialogButtonBox.AcceptRole
)
buttons_layout = QtWidgets.QHBoxLayout()
buttons_layout.addWidget(buttons, alignment=QtCore.Qt.AlignTop)
buttons_layout_widget = QtWidgets.QWidget()
buttons_layout_widget.setLayout(buttons_layout)
# Connect actions for buttons
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
return buttons_layout_widget | [
93,
1974,
706
] |
def METHOD_NAME(self, M, Rs, c):
# NFW normalization from mass, radius and concentration
return M / (4 * np.pi * Rs**3 * (np.log(1+c) - c/(1+c))) | [
387
] |
def METHOD_NAME(fieldstype=3, n=10, L=12, options=None):
quadtree = quadmesh(n)
NN = quadtree.number_of_nodes()
print('The number of mesh:', NN)
mesh = quadtree.to_pmesh()
obj = SCFTVEMModel(mesh, options=options)
mu = obj.init_value(fieldstype=fieldstype) # get initial value
problem = {'objective': obj, 'x0': mu, 'quadtree': quadtree}
return problem | [
5825,
578
] |
def METHOD_NAME(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n) | [
9,
5745,
4403,
7844
] |
def METHOD_NAME(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents(""" | [
9,
2526,
1068,
163,
4024,
1334
] |
def METHOD_NAME(app_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
sync_status: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppResult:
"""
Get an App and its properties.
:param str app_name: The name of the App resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
:param str sync_status: Indicates whether sync status
"""
__args__ = dict()
__args__['appName'] = app_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['syncStatus'] = sync_status
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230501preview:getApp', __args__, opts=opts, typ=GetAppResult).value
return AwaitableGetAppResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
991
] |
def METHOD_NAME():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x', 4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert len_single == len_double | [
9,
1579,
6411
] |
def METHOD_NAME(key):
# Remove menu entry labels
# In our data model, they conflict with the actual configuration
# they are associated to
# i.e. Nucleo_144.menu.pnum.NUCLEO_F207ZG would be both
# a string ("Nucleo F207ZG")
# and a dict (.build.variant_h=..., .upload.maximum_size=...)
if key[0] == "menu":
# menu.xserial=U(S)ART support
return True
if len(key) == 4 and key[1] == "menu":
# Nucleo_144.menu.pnum.NUCLEO_F207ZG=Nucleo F207ZG
# Midatronics.menu.upload_method.MassStorage=Mass Storage
return True
# keep bootloader flags that impact the build
if len(key) >= 6 and key[1] == "menu" and key[2] == "upload_method":
if key[3] != "build":
return False
return True
return False | [
-1,
527
] |
def METHOD_NAME(self):
return self._options | [
1881
] |
def METHOD_NAME(self, attrs):
c = self.test()
c.tests.append(Testcase(attrs['from'], attrs['import'])) | [
447,
1954
] |
def METHOD_NAME(self) -> global___ExportTracePartialSuccess:
"""The details of a partially successful export request.
If the request is only partially accepted
(i.e. when the server accepts only parts of the data and rejects the rest)
the server MUST initialize the `partial_success` field and MUST
set the `rejected_<signal>` with the number of items it rejected.
Servers MAY also make use of the `partial_success` field to convey
warnings/suggestions to senders even when the request was fully accepted.
In such cases, the `rejected_<signal>` MUST have a value of `0` and
the `error_message` MUST be non-empty.
A `partial_success` message with an empty value (rejected_<signal> = 0 and
`error_message` = "") is equivalent to it not being set/present. Senders
SHOULD interpret it the same way as in the full success case.
"""
pass | [
2351,
1434
] |
def METHOD_NAME(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)(
"delete", url, **params
) | [
34,
612,
191
] |
def METHOD_NAME(sstate_dir):
"""
This function is going to be used for generating the target and host manifest files packages of eSDK.
"""
import math
extra_info = {}
extra_info['tasksizes'] = {}
extra_info['filesizes'] = {}
for root, _, files in os.walk(sstate_dir):
for fn in files:
if fn.endswith('.tgz'):
fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
origtotal = extra_info['tasksizes'].get(task, 0)
extra_info['tasksizes'][task] = origtotal + fsize
extra_info['filesizes'][fn] = fsize
return extra_info | [
19,
1967,
-1
] |
def METHOD_NAME(self):
with self.assertRaises(ValueError):
# 'blah' is not a valid operator
Condition('x', 1, 'blah') | [
9,
532,
441
] |
def METHOD_NAME(self):
return self._casting | [
3215
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self):
got = _run_test(
"TEST_VAR",
".*",
[
# Passing RegexTest because 'test' matches '.*'.
RegexTest(val="test", should_match=True),
# Failing RegexTest because 'test' matches '.*'.
RegexTest(val="test", should_match=False)
])
self.assertEqual(got.regex_error, "")
self.assertTrue(got.failing_matches) | [
9,
3368,
7405,
3368,
3587,
590
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.