text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
def compute_fn(y_preds, y_targets):
raise Exception
em = EpochMetric(compute_fn, check_compute_fn=True)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output1)
em = EpochMetric(compute_fn, check_compute_fn=False)
em.update(output1) | [
9,
250,
226,
667
] |
def METHOD_NAME(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
if self.do_pad_trim:
xs_pad = self.pad_or_trim(xs_pad, self.pad_samples)
feats, feats_lens = self.log_mel_spectrogram(xs_pad, ilens)
if self.specaug is not None and self.encoders.training:
feats = torch.transpose(feats, 1, 2)
feats, feats_lens = self.specaug(feats, feats_lens)
feats = torch.transpose(feats, 1, 2)
xs_pad, olens = self.whisper_encode(feats, feats_lens)
return xs_pad, olens, None | [
76
] |
def METHOD_NAME():
# Arrange
old_settings_dict = {"server": "192.168.0.1"}
# Act
settings = V3_4_0.normalize(old_settings_dict)
# Assert
assert old_settings_dict == settings | [
9,
1137,
7278,
1842,
1506,
2351
] |
def METHOD_NAME(self, *args, **kwargs):
ans = self.dataset.METHOD_NAME(*args, **kwargs)
if "net_input" in ans:
ans["net_input"]["target_language_id"] = self.target_language_id
ans["net_input"]["dataset_name"] = self.name
return ans | [
3632
] |
def METHOD_NAME(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME():
if os.path.exists(diff_meta_path):
for file in os.listdir(diff_meta_path):
if file.endswith('.json'):
cmd = ['azdev', 'command-change', 'meta-diff', '--base-meta-file', os.path.join(base_meta_path, file), '--diff-meta-file', os.path.join(diff_meta_path, file), '--output-file', os.path.join(output_path, file)]
print(cmd)
subprocess.run(cmd)
cmd = ['ls', '-al', output_path]
print(cmd)
subprocess.run(cmd) | [
1094,
2443
] |
def METHOD_NAME(field, value):
return False | [
89,
684,
550,
1096
] |
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result | [
24,
553
] |
def METHOD_NAME(ssh_sub_container):
return ssh_sub_container.get_host_port_binding(22, protocol="tcp") | [
1264,
1066,
237
] |
def METHOD_NAME():
check_2d_r2c(N=(28, 10))
check_2d_r2c(N=(28, 10), dtype=np.float32) | [
9,
1085
] |
def METHOD_NAME(obj):
img = obj['filename'] if 'filename' in obj else obj['file']['filename']
return get_image_size(img) | [
19,
9138,
1318
] |
def METHOD_NAME(error_msg):
return PingCommandResult(
error=error_msg,
host_or_ip=param.host_or_ip,
num_packets=param.num_packets or DEFAULT_NUM_PACKETS,
stats=None,
) | [
129,
168,
1571
] |
def METHOD_NAME( self, bean, newState ) :
# sort out states
print "from Bean", bean
print "with state", newState
# JSON, if set to True
if PUBLISHJSON :
data = {}
if ( not bean.getInverted( ) ) :
if ( newState & jmri.Turnout.CLOSED ) :
data['state'] = "CLOSED"
else:
data['state'] = "THROWN"
else :
if ( newState & jmri.Turnout.CLOSED ) :
data['state'] = "THROWN"
else:
data['state'] = "CLOSED"
json_data = json.dumps( data )
return json_data
# Without JSON - this is the same as the default behavior, here so you can modify it as desired
else:
if ( not bean.getInverted( ) ) :
if ( newState & jmri.Turnout.CLOSED ) :
return "CLOSED"
else :
return "THROWN"
else :
if ( newState & jmri.Turnout.CLOSED ) :
return "THROWN"
else :
return "CLOSED" | [
288,
280,
13723
] |
def METHOD_NAME(ostree_user_credentials, ostree_repo_with_user):
"""Get info for Ostree branch by id
:id: 7838c9a8-56da-44de-883c-28571ecfa75c
:expectedresults: Ostree Branch Info is displayed
"""
result = OstreeBranch.with_user(*ostree_user_credentials).list()
assert len(result) > 0
# Grab a random branch
branch = random.choice(result)
result = OstreeBranch.with_user(*ostree_user_credentials).info({'id': branch['id']})
assert branch['id'] == result['id'] | [
9,
2302,
100,
604,
147
] |
def METHOD_NAME(self):
viewer = self.get_viewer()
self.assertTrue(viewer.is_server_running)
self.assertIsInstance(viewer.datasets_ctx, DatasetsContext)
viewer.stop_server()
self.assertFalse(viewer.is_server_running) | [
9,
447,
61,
631,
163
] |
def METHOD_NAME(self, context):
utils = Utils(self.context, self.request)
target = utils.get_object_url(context)
return self.request.response.redirect(target) | [
74,
1736
] |
def METHOD_NAME(self):
with self.assertRaises(ValueError):
APIKey.generate_secret(31) | [
9,
4207,
444,
1923,
673,
524,
937
] |
def METHOD_NAME(raw_y_batch):
y_batch = [letter_to_index(c) for c in raw_y_batch]
return y_batch | [
356,
320
] |
def METHOD_NAME() -> bool:
"""Detect whether Python can start new threads.
Some WebAssembly platforms do not provide a working pthread
implementation. Thread support is stubbed and any attempt
to create a new thread fails.
- wasm32-wasi does not have threading.
- wasm32-emscripten can be compiled with or without pthread
support (-s USE_PTHREADS / __EMSCRIPTEN_PTHREADS__).
"""
if sys.platform == "emscripten":
return sys._emscripten_info.pthreads
elif sys.platform == "wasi":
return False
else:
# assume all other platforms have working thread support.
return True | [
1046,
447,
600
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self, inv_quad_rhs=None, logdet=False, improper_logdet=False, add_diag=False):
# Set up
x = torch.randn(*self.__class__.matrix_shape[:-1], 3)
ls = torch.tensor(2.0).requires_grad_(True)
ls_clone = torch.tensor(2.0).requires_grad_(True)
mat = (x[..., :, None, :] - x[..., None, :, :]).pow(2.0).sum(dim=-1).mul(-0.5 * ls).exp()
mat_clone = (x[..., :, None, :] - x[..., None, :, :]).pow(2.0).sum(dim=-1).mul(-0.5 * ls_clone).exp()
if inv_quad_rhs is not None:
inv_quad_rhs.requires_grad_(True)
inv_quad_rhs_clone = inv_quad_rhs.detach().clone().requires_grad_(True)
mat_clone_with_diag = mat_clone
if add_diag:
mat_clone_with_diag = mat_clone_with_diag + torch.eye(mat_clone.size(-1))
if inv_quad_rhs is not None:
actual_inv_quad = mat_clone_with_diag.inverse().matmul(inv_quad_rhs_clone).mul(inv_quad_rhs_clone)
actual_inv_quad = actual_inv_quad.sum([-1, -2]) if inv_quad_rhs.dim() >= 2 else actual_inv_quad.sum()
if logdet:
flattened_tensor = mat_clone_with_diag.view(-1, *mat_clone.shape[-2:])
logdets = torch.cat([mat.logdet().unsqueeze(0) for mat in flattened_tensor])
if mat_clone.dim() > 2:
actual_logdet = logdets.view(*mat_clone.shape[:-2])
else:
actual_logdet = logdets.squeeze()
# Compute values with LinearOperator
_wrapped_cg = MagicMock(wraps=linear_operator.utils.linear_cg)
with linear_operator.settings.num_trace_samples(2000), linear_operator.settings.max_cholesky_size(
0
), linear_operator.settings.cg_tolerance(1e-5), linear_operator.settings.skip_logdet_forward(
improper_logdet
), patch(
"linear_operator.utils.linear_cg", new=_wrapped_cg
) as linear_cg_mock, linear_operator.settings.min_preconditioning_size(
0
), linear_operator.settings.max_preconditioner_size(
30
):
linear_op = DenseLinearOperator(mat)
if add_diag:
linear_op = linear_op.add_jitter(1.0)
res_inv_quad, res_logdet = linear_operator.inv_quad_logdet(
linear_op, inv_quad_rhs=inv_quad_rhs, logdet=logdet
)
# Compare forward pass
if inv_quad_rhs is not None:
self.assertAllClose(res_inv_quad, actual_inv_quad, rtol=1e-2)
if logdet and not improper_logdet:
self.assertAllClose(res_logdet, actual_logdet, rtol=1e-1, atol=2e-1)
# Backward
if inv_quad_rhs is not None:
actual_inv_quad.sum().backward(retain_graph=True)
res_inv_quad.sum().backward(retain_graph=True)
if logdet:
actual_logdet.sum().backward()
res_logdet.sum().backward()
self.assertAllClose(ls.grad, ls_clone.grad, rtol=1e-2, atol=1e-2)
if inv_quad_rhs is not None:
self.assertAllClose(inv_quad_rhs.grad, inv_quad_rhs_clone.grad, rtol=2e-2, atol=1e-2)
# Make sure CG was called
self.assertTrue(linear_cg_mock.called) | [
9,
5862,
5825,
8290
] |
def METHOD_NAME(self, server):
"""Creates an inventory service and registers it in the server.
Args:
server (object): Server to register service to.
Returns:
object: The instantiated gRPC service for inventory.
"""
service = GrpcInventory(
inventory_api=inventory.Inventory(
self.config))
inventory_pb2_grpc.add_InventoryServicer_to_server(service, server)
return service | [
129,
61,
372,
549
] |
def METHOD_NAME(radius_authport, radius_acctport):
"""
Start the dummy radius server
We need to start the radius server for every test, since every test
instantiates a new TestClass and thus the radius server process will
not be accessable outside of a test anymore
"""
import subprocess
try:
radius_server_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"..",
"tools",
"dummy_radius_server.py",
)
dictionary_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"..",
"..",
"config",
"dictionary",
)
proc = subprocess.Popen(
[
radius_server_file,
"--dict",
dictionary_file,
"--authport",
radius_authport,
"--acctport",
radius_acctport,
]
)
except Exception as exx:
raise exx
assert proc is not None
return proc | [
447,
3662,
163
] |
def METHOD_NAME(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None | [
186,
1716
] |
def METHOD_NAME(self, l, key):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda item: [convert(c) for c in re.split('([0-9]+)', key(item))]
return sorted(l, key=alphanum_key) | [
1389,
6026
] |
def METHOD_NAME(self):
# 1. known service -> composite url fields are filled
self.besluit_known.refresh_from_db()
self.bio_known.refresh_from_db()
self.assertEqual(self.besluit_known._besluittype_base_url, self.ztc_known)
self.assertEqual(
self.besluit_known._besluittype_relative_url,
"besluittypen/56750100-c537-45cb-a1d8-f39c2385a868",
)
self.assertEqual(self.besluit_known._zaak_base_url, self.zrc_known)
self.assertEqual(
self.besluit_known._zaak_relative_url,
"zaken/7ebd86f8-ce22-4ecf-972b-b2ac20b219c0",
)
self.assertEqual(self.bio_known._informatieobject_base_url, self.drc_known)
self.assertEqual(
self.bio_known._informatieobject_relative_url,
"informatieobjecten/50c9a565-51dc-49ec-804c-99ac72f9ae6e",
)
# 2. unknown service -> composite url fields are filled and services are created
Service = self.apps.get_model("zgw_consumers", "Service")
self.assertTrue(
Service.objects.filter(
api_root="https://andere.catalogus.nl/api/v1/"
).exists()
)
self.assertTrue(
Service.objects.filter(api_root="https://andere.zaken.nl/api/v1/").exists()
)
self.assertTrue(
Service.objects.filter(
api_root="https://andere.documenten.nl/api/v1/"
).exists()
)
ztc_new = Service.objects.get(api_root="https://andere.catalogus.nl/api/v1/")
zrc_new = Service.objects.get(api_root="https://andere.zaken.nl/api/v1/")
drc_new = Service.objects.get(api_root="https://andere.documenten.nl/api/v1/")
for service in [ztc_new, zrc_new, drc_new]:
self.assertEqual(service.label, "FIXME")
self.assertEqual(service.api_type, APITypes.orc)
self.besluit_unknown.refresh_from_db()
self.bio_unknown.refresh_from_db()
self.assertEqual(self.besluit_unknown._besluittype_base_url.id, ztc_new.id)
self.assertEqual(
self.besluit_unknown._besluittype_relative_url,
"besluittypen/56750100-c537-45cb-a1d8-f39c2385a868",
)
self.assertEqual(self.besluit_unknown._zaak_base_url.id, zrc_new.id)
self.assertEqual(
self.besluit_unknown._zaak_relative_url,
"zaken/7ebd86f8-ce22-4ecf-972b-b2ac20b219c0",
)
self.assertEqual(self.bio_unknown._informatieobject_base_url.id, drc_new.id)
self.assertEqual(
self.bio_unknown._informatieobject_relative_url,
"informatieobjecten/50c9a565-51dc-49ec-804c-99ac72f9ae6e",
) | [
9,
3209,
2248,
7133
] |
def METHOD_NAME(config: dict) -> dict:
def _process(obj: Any) -> Any:
if isinstance(obj, dict):
new_obj = {}
for k, v in obj.items():
if isinstance(k, str) and k.startswith(WRITE_TO_FILE_DIRECTIVE_PREFIX):
# This writes the value to a temporary file and replaces the value with the path to the file.
config_option = k[len(WRITE_TO_FILE_DIRECTIVE_PREFIX) :]
with tempfile.NamedTemporaryFile("w", delete=False) as f:
filepath = f.name
f.write(v)
new_obj[config_option] = filepath
else:
new_obj[k] = _process(v)
return new_obj
else:
return obj
return _process(config) | [
356,
10533
] |
def METHOD_NAME(encrypted_data_key):
"""
_cfg_data_key returns the aws_kms:data_key from configuration.
"""
config = {"aws_kms": {"data_key": encrypted_data_key}}
with patch.dict(aws_kms.__salt__, {"config.get": config.get}):
assert (
aws_kms._cfg_data_key() == encrypted_data_key
), "_cfg_data_key did not return the data key configured in __salt__."
with patch.dict(aws_kms.__opts__, config):
assert (
aws_kms._cfg_data_key() == encrypted_data_key
), "_cfg_data_key did not return the data key configured in __opts__." | [
9,
2610,
365,
59
] |
async def METHOD_NAME(
container:
'FutureResult[Callable[[_ValueType], _NewValueType], _ErrorType]',
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async maps a function over a value."""
return (await inner_value).apply((await container)._inner_value) | [
958,
231
] |
def METHOD_NAME(args, packet):
ip6 = packet.getlayer(sp.IPv6)
if not ip6:
return False
oip6 = sp.IPv6(src=args.src[0], dst=args.to[0])
if ip6.dst != oip6.src:
return False
icmp6 = packet.getlayer(sp.ICMPv6ParamProblem)
if not icmp6:
return False
# ICMP6_PARAMPROB_HEADER 0
if icmp6.code != 0:
return False
# Should we check the payload as well?
# We are running in a very isolated environment and nothing else
# should trigger an ICMPv6 Param Prob so leave it.
#icmp6.display()
return True | [
250,
5425,
168
] |
def METHOD_NAME(job):
return job.status.succeeded == True | [
202,
3372
] |
def METHOD_NAME(self, model: Model, file_name: str):
"""Save a Model instance to a spec file.
**NOT IMPLEMENTED**
Parameters
----------
model: Model
Model instance to save to specs file.
file_name: str
File to write the model specs to.
.. # noqa: DAR101
.. # noqa: DAR401
"""
raise NotImplementedError(f"Cannot save models with format {self.format!r}") | [
73,
578
] |
def METHOD_NAME(self) -> None:
"""Test quad form with a parameter.
"""
P = cp.Parameter((2, 2), PSD=True)
Q = np.eye(2)
x = cp.Variable(2)
cost = cp.quad_form(x, P)
P.value = Q
prob = cp.Problem(cp.Minimize(cost), [x == [1, 2]])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assertAlmostEqual(prob.solve(solver=cp.SCS), 5) | [
9,
49,
5825,
1029
] |
def METHOD_NAME():
return str(FIXTURES_FOLDER / "simple/swagger.yaml") | [
1457,
171
] |
async def METHOD_NAME(test_service: TestInterface, dbus_session_bus: MessageBus):
"""Test initialize for reusing connected dbus object."""
proxy = DBusInterface()
proxy.bus_name = "service.test.TestInterface"
proxy.object_path = "/service/test/TestInterface"
assert proxy.is_connected is False
# Not connected
with pytest.raises(ValueError, match="must be a connected DBus object"):
await proxy.initialize(
DBus(
dbus_session_bus,
"service.test.TestInterface",
"/service/test/TestInterface",
)
)
# Connected to wrong bus
await dbus_session_bus.request_name("service.test.TestInterface2")
with pytest.raises(
ValueError,
match="must be a DBus object connected to bus service.test.TestInterface and object /service/test/TestInterface",
):
await proxy.initialize(
await DBus.connect(
dbus_session_bus,
"service.test.TestInterface2",
"/service/test/TestInterface",
)
)
# Connected to wrong object
test_service_2 = TestInterface("/service/test/TestInterface/2")
test_service_2.export(dbus_session_bus)
with pytest.raises(
ValueError,
match="must be a DBus object connected to bus service.test.TestInterface and object /service/test/TestInterface",
):
await proxy.initialize(
await DBus.connect(
dbus_session_bus,
"service.test.TestInterface",
"/service/test/TestInterface/2",
)
)
# Connected to correct object on the correct bus
await proxy.initialize(
await DBus.connect(
dbus_session_bus,
"service.test.TestInterface",
"/service/test/TestInterface",
)
)
assert proxy.is_connected is True | [
9,
15
] |
def METHOD_NAME(traj2):
traj2_line1 = traj2.atom_slice((0, 1))
traj2_line2 = traj2.atom_slice((2, 3))
directors = order._compute_director(traj2_line1)
assert eq(directors,
np.array([[1, 0, 0], # Frame 0
[0, 1, 0], # Frame 1
[0, 0, 1], # Frame 2
[1, 0, 0], # Frame 3
[1, 0, 0], # Frame 4
]))
directors = order._compute_director(traj2_line2)
assert eq(directors,
np.array([[1, 0, 0], # Frame 0
[0, 1, 0], # Frame 1
[0, 0, 1], # Frame 2
[0, 1, 0], # Frame 3
[0, 0, 1], # Frame 4
])) | [
9,
-1
] |
def METHOD_NAME(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
save(self, module_file, content) | [
129,
334,
298,
533,
465
] |
f METHOD_NAME(self): | [
9,
13947,
1116
] |
def METHOD_NAME(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME(self, *args, **kwargs):
return getattr(self, lock_name).run(f, self, *args, **kwargs) | [
291
] |
def METHOD_NAME(wav_name, x, freq):
x = np.clip(x, -1, +1)
x = (x*np.iinfo(np.int16).max).astype(np.int16)
with wave.open(wav_name, "wb") as wav:
wav.setnchannels(1)
wav.setframerate(freq)
wav.setsampwidth(2)
wav.writeframes(x.tobytes()) | [
4097,
77
] |
def METHOD_NAME(test_client_factory):
app = Starlette(
routes=[
Route("/view_session", endpoint=view_session),
Route("/update_session", endpoint=update_session, methods=["POST"]),
],
middleware=[Middleware(SessionMiddleware, secret_key="example", max_age=None)],
)
client: TestClient = test_client_factory(app)
response = client.post("/update_session", json={"some": "data"})
assert response.json() == {"session": {"some": "data"}}
# check cookie max-age
set_cookie = response.headers["set-cookie"]
assert "Max-Age" not in set_cookie
client.cookies.delete("session")
response = client.get("/view_session")
assert response.json() == {"session": {}} | [
9,
240,
4177
] |
def METHOD_NAME(self):
"""Define custom configure options for MVAPICH2."""
# additional configuration options
add_configopts = []
if self.cfg['rdma_type']:
add_configopts.append('--with-rdma=%s' % self.cfg['rdma_type'])
# enable specific support options (if desired)
if self.cfg['withmpe']:
# --enable-mpe is a configure option of MPICH itself.
# It is not available anymore in MPICH package since version 3.0, which correspond to MVAPICH2 1.9.
# MPE can be downloaded separately at http://www.mpich.org/static/mpe/downloads/
# However, the 'withmpe' option should be maintained for backward compatibility purpose
if LooseVersion(self.version) < LooseVersion('1.9'):
add_configopts.append('--enable-mpe')
else:
raise EasyBuildError("MPI Parallel Environment (MPE) is not available anymore starting MVAPICH2 1.9")
if self.cfg['withlimic2']:
add_configopts.append('--enable-limic2')
if self.cfg['withchkpt']:
add_configopts.extend(['--enable-ckpt'])
# --with-hwloc/--without-hwloc option is not available anymore MVAPICH2 >= 2.0. Starting this version,
# hwloc is apparently distributed with MVAPICH2 and always compiled with MVAPICH2, and it cannot be disabled.
# This check happens only if 'withhwloc = False' is explicitly specified in an easyconfig with MPIVACH2 >= 2.0
if LooseVersion(self.version) >= LooseVersion('2.0'):
if self.cfg['withhwloc']:
self.log.debug("hwloc support is always enabled in MVAPICH >= 2.0, nothing to do")
else:
raise EasyBuildError("Disabling hwloc is not supported in MVAPICH >= 2.0")
else:
if self.cfg['withhwloc']:
add_configopts.append('--with-hwloc')
else:
add_configopts.append('--without-hwloc')
# pass BLCR paths if specified
if self.cfg['blcr_path']:
add_configopts.append('--with-blcr=%s' % self.cfg['blcr_path'])
if self.cfg['blcr_inc_path']:
add_configopts.append('--with-blcr-include=%s' % self.cfg['blcr_inc_path'])
if self.cfg['blcr_lib_path']:
add_configopts.append('--with-blcr-libpath=%s' % self.cfg['blcr_lib_path'])
self.cfg.update('configopts', ' '.join(add_configopts))
super(EB_MVAPICH2, self).METHOD_NAME() | [
111,
367
] |
def METHOD_NAME(self):
self.declare_artifact(
self.source.url_path,
sources=list(self.source.iter_source_filenames())) | [
3117,
5277
] |
def METHOD_NAME(
registration, user, user_api_client
):
signup_group = SignUpGroupFactory(registration=registration)
user.get_default_organization().admin_users.remove(user)
RegistrationUserAccessFactory(registration=registration, email=user.email)
assert_get_detail(user_api_client, signup_group.id) | [
9,
2213,
21,
1089,
1046,
19,
3569
] |
def METHOD_NAME(self, arg):
if arg.is_extended_real:
return (sign(arg)+1)/2 | [
1171,
2887,
947,
2452
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Connection name.",
required=True,
id_part="name",
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.environment_name = AAZStrArg(
options=["-n", "--name", "--environment-name"],
help="The name of the Time Series Insights environment associated with the specified resource group.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
] |
f METHOD_NAME(self): | [
9,
226,
1867,
1572,
532,
734,
733
] |
def METHOD_NAME(np_image):
return o3d.geometry.Image(np_image) | [
1053,
660
] |
def METHOD_NAME(self):
mapping = [
("out_invoice", [(100, ["s_iva10b"]), (200, ["s_iva21s"])], {}),
("out_invoice", [(100, ["s_iva10b"]), (200, ["s_iva0_ns"])], {}),
(
"out_invoice",
[(100, ["s_iva10b", "s_req014"]), (200, ["s_iva21s", "s_req52"])],
{},
),
(
"out_refund",
[(100, ["s_iva10b"]), (100, ["s_iva10b"]), (200, ["s_iva21s"])],
{},
),
("out_invoice", [(100, ["s_iva0_sp_i"]), (200, ["s_iva0_ic"])], {}),
("out_refund", [(100, ["s_iva0_sp_i"]), (200, ["s_iva0_ic"])], {}),
("out_invoice", [(100, ["s_iva_e"]), (200, ["s_iva0_e"])], {}),
("out_refund", [(100, ["s_iva_e"]), (200, ["s_iva0_e"])], {}),
(
"in_invoice",
[(100, ["p_iva10_bc", "p_irpf19"]), (200, ["p_iva21_sc", "p_irpf19"])],
{
"ref": "sup0001",
"date": "2020-02-01",
"sii_account_registration_date": "2020-10-01",
},
),
(
"in_refund",
[(100, ["p_iva10_bc"])],
{"ref": "sup0002", "sii_account_registration_date": "2020-10-01"},
),
(
"in_invoice",
[(100, ["p_iva10_bc", "p_req014"]), (200, ["p_iva21_sc", "p_req52"])],
{"ref": "sup0003", "sii_account_registration_date": "2020-10-01"},
),
(
"in_invoice",
[(100, ["p_iva21_sp_ex"])],
{"ref": "sup0004", "sii_account_registration_date": "2020-10-01"},
),
(
"in_invoice",
[(100, ["p_iva0_ns"]), (200, ["p_iva10_bc"])],
{"ref": "sup0005", "sii_account_registration_date": "2020-10-01"},
),
# Out invoice with currency
("out_invoice", [(100, ["s_iva10b"])], {"currency_id": self.usd.id}),
# Out invoice with currency and with not included in total amount
(
"out_invoice",
[(100, ["s_iva10b", "s_irpf1"])],
{"currency_id": self.usd.id},
),
# In invoice with currency
(
"in_invoice",
[(100, ["p_iva10_bc"])],
{
"ref": "sup0006",
"sii_account_registration_date": "2020-10-01",
"currency_id": self.usd.id,
},
),
# In invoice with currency and with not included in total amount
(
"in_invoice",
[(100, ["p_iva10_bc", "p_irpf1"])],
{
"ref": "sup0007",
"sii_account_registration_date": "2020-10-01",
"currency_id": self.usd.id,
},
),
# Intra-community supplier refund with ImporteTotal with "one side"
(
"in_refund",
[(100, ["p_iva21_sp_in"])],
{"ref": "sup0008", "sii_account_registration_date": "2020-10-01"},
),
]
for inv_type, lines, extra_vals in mapping:
self._create_and_test_invoice_sii_dict(
inv_type, lines, extra_vals, module="l10n_es_vat_prorate"
)
return | [
9,
19,
2486,
365
] |
def METHOD_NAME(self, rec, position, model=None):
if not model:
model = GameModel()
model.tags["Event"] = rec["Event"]
model.tags["Site"] = rec["Site"]
model.tags["Date"] = self.get_date(rec)
model.tags["Round"] = ""
model.tags["White"] = "?"
model.tags["Black"] = "?"
model.tags["Termination"] = rec["Termination"]
fen = rec["FEN"]
model.tags["FEN"] = fen
model.boards = [model.variant(setup=fen)]
model.variations = [model.boards]
model.status = WAITING_TO_START
return model | [
557,
24,
578
] |
def METHOD_NAME(model: ov.Model, val_loader: torch.utils.data.DataLoader) -> float:
predictions = []
references = []
compiled_model = ov.compile_model(model)
output = compiled_model.outputs[0]
for images, target in tqdm(val_loader):
pred = compiled_model(images)[output]
predictions.append(np.argmax(pred, axis=1))
references.append(target)
predictions = np.concatenate(predictions, axis=0)
references = np.concatenate(references, axis=0)
return accuracy_score(predictions, references) | [
187
] |
def METHOD_NAME(test, props, keys, row, col):
test.start_server(get_app(props))
target = test.table("table")
c1 = target.cell(3, 1)
c2 = target.cell(row, col)
# focus on cell content
c1.double_click()
assert c1.is_focused()
assert c1.is_value_focused()
# moves to next cell
test.send_keys(keys)
# unfocus previous cell & content, focus new cell but not content
assert not c1.is_focused()
assert not c1.is_value_focused()
assert c2.is_focused()
assert not c2.is_value_focused()
assert test.get_log_errors() == [] | [
9,
-1,
264,
118,
69,
7576
] |
def METHOD_NAME(self) -> str:
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(
a: T.Buffer((1, 32), "int32"), b: T.Buffer((1, 32), "int32") | [
285,
2376,
41,
-1
] |
def METHOD_NAME(self, data_sample, **kwargs):
"""Get text instance predictions of one image.
Args:
pred_result (tuple(Tensor)): Prediction results of an image.
data_sample (TextDetDataSample): Datasample of an image.
**kwargs: Other parameters. Configurable via ``__init__.train_cfg``
and ``__init__.test_cfg``.
Returns:
TextDetDataSample: A new DataSample with predictions filled in.
The polygon/bbox results are usually saved in
``TextDetDataSample.pred_instances.polygons`` or
``TextDetDataSample.pred_instances.bboxes``. The confidence scores
are saved in ``TextDetDataSample.pred_instances.scores``.
"""
data_sample = data_sample.cpu().numpy()
pred_instances = data_sample.pred_instances
data_sample.pred_instances.polygons = list(
map(bezier2poly, pred_instances.beziers))
return data_sample | [
19,
526,
2553
] |
def METHOD_NAME(cmd):
opt = None
if (
cmd[0] == "d.text"
and "text" not in cmd[1]
and ("input" not in cmd[1] or cmd[1]["input"] == "-")
):
if sys.stdin.isatty():
sys.stderr.write(
"\nPlease enter text instructions."
" Enter EOF (ctrl-d) on last line to quit.\n"
)
opt = "input"
if opt:
tmpfile = tempfile.NamedTemporaryFile(dir=path).name + ".txt"
fd = open(tmpfile, "w")
while 1:
line = sys.stdin.readline()
if not line:
break
fd.write(line)
fd.close()
cmd[1][opt] = tmpfile | [
203,
2195
] |
def METHOD_NAME(self):
P_loss_kWh = self.P_loss_kWh.fillna(value=0)
P_loss_kWh = pd.DataFrame(P_loss_kWh.sum(axis=0), columns=['P_loss_kWh']) # format individual loss data
Q_loss_kWh = abs(self.thermal_loss_edges_kWh).fillna(value=0)
Q_loss_kWh = pd.DataFrame(Q_loss_kWh.sum(axis=0), columns=['Q_loss_kWh']) # format individual loss data
total_df = pd.DataFrame(P_loss_kWh.values + Q_loss_kWh.values, index=Q_loss_kWh.index,
columns=['total']) # calculate total loss
merged_df = P_loss_kWh.join(Q_loss_kWh).join(total_df)
anchors = []
load_names = []
median = []
peak = []
total_perc = []
for field in ['P_loss_kWh', 'Q_loss_kWh']:
# calculate graph
anchors.append(', '.join(calc_top_three_anchor_loads(merged_df, field)))
load_names.append(NAMING[field]) # get correct name
median.append(round(merged_df[field].median(), 2)) # calculate median
peak.append(round(merged_df[field].abs().max(), 2)) # calculate peak value
local_total = round(merged_df[field].sum(), 2) # calculate total for this building
total_perc.append(str(local_total) + " (" + str(
min((local_total / total_df.sum().values * 100).round(1),
100.0)) + " %)") # transform value to percentage
column_names = ['Loss Name', 'Total [kWh/yr]', 'Peak [kWh/yr]', 'Median [kWh/yr]', 'Highest 3 Losses']
column_values = [load_names, total_perc, peak, median, anchors]
table_df = pd.DataFrame({cn: cv for cn, cv in zip(column_names, column_values)}, columns=column_names)
return table_df | [
1407,
410
] |
def METHOD_NAME(self) -> None:
expected_topic = "test / test-branch"
expected_message = "kostekIV created [test-branch](https://try.gitea.io/kostekIV/test/src/test-branch) branch."
self.check_webhook("create__branch", expected_topic, expected_message) | [
9,
80,
3653
] |
def METHOD_NAME(self, node, node_to_id):
node_id = node_to_id[node]
viz_node = VizNode(node_id, "Func", str(node.params))
viz_edges = [VizEdge(node_to_id[node.body], node_id)]
return viz_node, viz_edges | [
559
] |
def METHOD_NAME(self):
"""
The Amplifier object from this bundle.
"""
return self._get_item_instance('.amp') | [
8106
] |
def METHOD_NAME(self, data: bytearray):
pass | [
77
] |
def METHOD_NAME(self):
"""Test cutting out an ellipse."""
data = self._setup_cutout_test(self.dc.Ellipse(49.5, 49.5, 12.0, 30.0))
assert data.shape == (61, 25)
assert np.ma.count_masked(data) == 389 | [
9,
1979,
279,
3586
] |
def METHOD_NAME(self, beforeTaskName, *args):
"""@brief Insert a task or tasks before a named task.
@param beforeTaskName The name of an existing task. The new tasks will be inserted
prior to this task.
After the task name parameter, any number of task description 2-tuples may be
passed.
@exception KeyError Raised if the named task does not exist in the sequence.
"""
self._validate_tasks(args)
if not self.has_task(beforeTaskName):
raise KeyError(beforeTaskName)
seq = list(self._calls.items())
for i, v in enumerate(seq):
if v[0] == beforeTaskName:
for c in args:
# List insert() inserts before the given index.
seq.insert(i, c)
i += 1
break
self._calls = OrderedDict(seq)
return self | [
408,
1553
] |
f METHOD_NAME(self): | [
9,
90,
61,
1195,
1097
] |
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME(member):
return inspect.isfunction(member) and not member.__name__.startswith('_') | [
427,
503
] |
def METHOD_NAME(self, cmd: str, fp: SupportsReadline[bytes], callback: Callable[[bytes], object] | None = None) -> str: ... | [
-1
] |
def METHOD_NAME(self, metric_name):
# The metric reports a 1 if connected, and 0 if not. The mapping used here:
# OK if connected || OK if metric value is `1`
# Critical if not connected || Critical if value is '0'
# Unknown for everything else
status_map = defaultdict(lambda: ServiceCheck.UNKNOWN)
status_map[0], status_map[1] = ServiceCheck.CRITICAL, ServiceCheck.OK
service_check_method = self.service_check
def argocd_cluster_connection_status_transformer(_metric, sample_data, _runtime_data):
for sample, tags, hostname in sample_data:
service_check_method(metric_name, status_map[int(sample.value)], hostname=hostname, tags=tags)
return argocd_cluster_connection_status_transformer | [
111,
1052,
-1,
2059,
550,
452
] |
def METHOD_NAME(obj):
return obj.local_end | [
19,
1798
] |
def METHOD_NAME():
# A Mobject with no points and no submobjects has no dimensions
empty = Mobject()
assert empty.width == 0
assert empty.height == 0
assert empty.depth == 0
has_points = Mobject()
has_points.points = np.array([[-1, -2, -3], [1, 3, 5]])
assert has_points.width == 2
assert has_points.height == 5
assert has_points.depth == 8
rect = Rectangle(width=3, height=5)
assert rect.width == 3
assert rect.height == 5
assert rect.depth == 0
# Dimensions should be recalculated after scaling
rect.scale(2.0)
assert rect.width == 6
assert rect.height == 10
assert rect.depth == 0
# Dimensions should not be dependent on location
rect.move_to([-3, -4, -5])
assert rect.width == 6
assert rect.height == 10
assert rect.depth == 0
circ = Circle(radius=2)
assert circ.width == 4
assert circ.height == 4
assert circ.depth == 0 | [
9,
11324,
5164,
97,
11324
] |
def METHOD_NAME(self):
images = irsa_dust.core.IrsaDust().get_images("m81")
assert images is not None | [
9,
19,
3669,
89
] |
def METHOD_NAME(self, context):
self.inputs.new('SvVerticesSocket', "Location")
self.inputs.new('SvStringsSocket', "Text").prop_name = 'text'
self.inputs.new('SvStringsSocket', "Font Size").prop_name = 'font_size'
self.inputs.new('SvStringsSocket', "Angle").prop_name = 'angle'
self.inputs.new('SvSvgSocket', "Fill / Stroke")
self.outputs.new('SvSvgSocket', "SVG Objects") | [
2153,
176
] |
def METHOD_NAME(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
elif stage == sb.Stage.VALID:
print("Completed epoch %d" % epoch)
print("Train SI-SNR: %.3f" % -self.train_loss)
print("Valid SI-SNR: %.3f" % -stage_loss)
elif stage == sb.Stage.TEST:
print("Test SI-SNR: %.3f" % -stage_loss) | [
69,
3164,
1798
] |
def METHOD_NAME(self):
self.enrolled_device.awaiting_configuration = True
cmd = DeviceConfigured.create_for_device(self.enrolled_device)
cmd.process_response(
{"UDID": self.enrolled_device.udid,
"Status": "Acknowledged",
"CommandUUID": str(cmd.uuid).upper()},
self.dep_enrollment_session,
self.mbu
)
cmd.db_command.refresh_from_db()
self.assertEqual(cmd.status, Command.Status.ACKNOWLEDGED)
self.assertEqual(cmd.db_command.status, Command.Status.ACKNOWLEDGED)
self.enrolled_device.refresh_from_db()
self.assertFalse(self.enrolled_device.awaiting_configuration) | [
9,
356,
6376,
17
] |
def METHOD_NAME(self):
sm = SnapshotSession(scope_key="A", verify=True, file_path="", update=False)
sm.recorded_state = {"key_a": {"a": 3}}
sm.match("key_a", {"a": 3})
sm._assert_all() | [
9,
53,
2443,
13360
] |
def METHOD_NAME(batch: SLCWABatch) -> int: # noqa: D102
return batch[0].shape[0] | [
19,
2277,
1318
] |
def METHOD_NAME(self, base_quantity_value):
self.RectHeight = base_quantity_value | [
2560,
1877,
1180
] |
def METHOD_NAME(self):
self.model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for _batch_idx, (inputs, targets) in enumerate(self.testloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
self.accuracy = 100. * correct / total | [
9,
1751
] |
def METHOD_NAME(pred):
"""Parse a single version comparison.
Return (comparison string, StrictVersion)
"""
res = re_splitComparison.match(pred)
if not res:
raise ValueError("bad package restriction syntax: %r" % pred)
comp, verStr = res.groups()
return (comp, distutils.version.StrictVersion(verStr)) | [
265,
1
] |
METHOD_NAME(): | [
203,
894,
631
] |
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem) | [
297,
365
] |
def METHOD_NAME(self):
"""Test JWKS request with RS256"""
provider = OAuth2Provider.objects.create(
name="test",
client_id="test",
authorization_flow=create_test_flow(),
redirect_uris="http://local.invalid",
signing_key=create_test_cert(),
)
app = Application.objects.create(name="test", slug="test", provider=provider)
response = self.client.get(
reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug})
)
body = json.loads(response.content.decode())
self.assertEqual(len(body["keys"]), 1)
PyJWKSet.from_dict(body)
key = body["keys"][0]
load_der_x509_certificate(base64.b64decode(key["x5c"][0]), default_backend()).public_key() | [
9,
-1
] |
def METHOD_NAME(self):
self.memH_test_template("omptriangle") | [
9,
13533,
13534
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_paging(self._execute_operations, self._output) | [
1519
] |
def METHOD_NAME( # type: ignore[override]
self, key: _KT, default: _VT = DEFAULT_NONE # type: ignore[assignment]
) -> _VT:
if key in self:
self.__track_write(key)
if isinstance(default, DefaultValue):
return super().METHOD_NAME(key)
return super().METHOD_NAME(key, default=default) | [
760
] |
def METHOD_NAME():
if not _running_on_ci():
raise RuntimeError('Pipeline-metadata is only available if running on CI infrastructure')
current_cfg_set_name = check_env('CONCOURSE_CURRENT_CFG')
team_name = check_env('CONCOURSE_CURRENT_TEAM')
pipeline_name = check_env('PIPELINE_NAME')
job_name = check_env('BUILD_JOB_NAME')
return PipelineMetaData(
pipeline_name=pipeline_name,
job_name=job_name,
current_config_set_name=current_cfg_set_name,
team_name=team_name,
) | [
19,
1148,
773
] |
def METHOD_NAME(self):
"""Returns the natural logarithm of the quaternion.
(not tested)
"""
# Init
norm = self.norm()
vecNorm = self.x**2 + self.y**2 + self.z**2
tmp = self.w / norm
q = Quaternion()
# Calculate
q.w = np.METHOD_NAME(norm)
q.x = np.METHOD_NAME(norm) * self.x * np.arccos(tmp) / vecNorm
q.y = np.METHOD_NAME(norm) * self.y * np.arccos(tmp) / vecNorm
q.z = np.METHOD_NAME(norm) * self.z * np.arccos(tmp) / vecNorm
return q | [
390
] |
def METHOD_NAME() -> Iterator[Mapping]:
for reg, text in linereg.items():
for row in text.splitlines():
if line := row.strip():
tokens = coalesce(
self._unifying_chars,
include_syms=True,
backwards=False,
chars=line,
)
if word := next(tokens, None):
yield {"register": reg, "word": word, "line": line} | [
9572
] |
def METHOD_NAME(self):
response = '{ "stdout": "Hi", "returncode": 0 }'
expected_stdout = "Hi"
expected_returncode = "0"
stdout, returncode = passive.ncpacheck.NCPACheck.handle_agent_response(response)
self.assertEqual(expected_returncode, returncode)
self.assertEqual(expected_stdout, stdout)
invalid_json = "234 } : }{fl;"
stdout, returncode = passive.ncpacheck.NCPACheck.handle_agent_response(invalid_json)
self.assertIsNone(stdout)
self.assertIsNone(returncode) | [
9,
276,
1849,
17
] |
def METHOD_NAME(self):
pattern_view = self.widget.pattern_widget
self.load_phase('au_Anderson.jcpds')
num_line_items = len(pattern_view.phases[0].line_items)
self.model.phase_model.delete_reflection(0, 0)
self.assertEqual(len(pattern_view.phases[0].line_items),
num_line_items-1) | [
9,
34,
4974
] |
def METHOD_NAME(self):
self.cline += 1
line = self.lines[self.cline]
self.format = {}
while line.find('END') == -1:
self.cline += 1
line = self.lines[self.cline] | [
203,
-1
] |
def METHOD_NAME(self):
miniwallet = MiniWallet(self.nodes[0])
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 200)
txid1 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
txid2 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.generate(self.nodes[0], 1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = miniwallet.get_utxo(txid=txid2) # Get the change from txid2
tx3 = miniwallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=txin_spent)
txid3 = tx3['txid']
self.generate(self.nodes[0], 1)
txid_spent = txin_spent["txid"]
txid_unspent = txid1 # Input was change from txid2, so txid1 should be unspent
# Invalid txids
assert_raises_rpc_error(-8, "txid must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["00000000000000000000000000000000"], blockhash)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["ZZZ0000000000000000000000000000000000000000000000000000000000000"], blockhash)
# Invalid blockhashes
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "ZZZ0000000000000000000000000000000000000000000000000000000000000")
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].gettxoutproof, [txid_spent], "0000000000000000000000000000000000000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])
# Test empty list
assert_raises_rpc_error(-8, "Parameter 'txids' cannot be empty", self.nodes[0].gettxoutproof, [])
# Test duplicate txid
assert_raises_rpc_error(-8, 'Invalid parameter, duplicated txid', self.nodes[0].gettxoutproof, [txid1, txid1])
# Now we'll try tweaking a proof.
proof = self.nodes[1].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = from_hex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[0].verifytxoutproof(tweaked_proof.serialize().hex())
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False]*7
for n in self.nodes:
assert not n.verifytxoutproof(tweaked_proof.serialize().hex())
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid | [
22,
9
] |
def METHOD_NAME(self):
return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval() | [
19,
578
] |
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
) | [
69,
1072
] |
def METHOD_NAME(
self, models: list[EntityTypeSchema], owned_by_id: UUID
) -> MaybeListOfOntologyElementMetadata:
"""Create entity types."""
return async_to_sync(self.inner.METHOD_NAME(models, owned_by_id)) | [
129,
2419,
119
] |
def METHOD_NAME(wf):
new_tasks = {}
for task in wf['tasks'].values():
if task['task_spec'].endswith('BoundaryEventParent'):
task['task_spec'] = task['task_spec'].replace('BoundaryEventParent', 'BoundaryEventSplit')
completed = all([ wf['tasks'][child]['state'] in [64, 256] for child in task['children'] ])
for child in task['children']:
child_task = wf['tasks'][child]
if child_task['state'] < 8:
# MAYBE, LIKELY, FUTURE: use parent state
state = child_task['state']
elif child_task['state'] < 64:
# WAITING, READY, STARTED (definite): join is FUTURE
state = 4
elif child_task['state'] == 64:
# COMPLETED: if the join is not finished, WAITING, otherwise COMPLETED
state = 64 if completed else 8
elif child_task['state'] == 128:
# ERROR: we don't know what the original state was, but we can't proceed through the gateway
state = 8
else:
# Cancelled tasks don't have children
continue
new_task = {
'id': str(uuid4()),
'parent': child_task['id'],
'children': [],
'state': state,
'task_spec': task['task_spec'].replace('BoundaryEventSplit', 'BoundaryEventJoin'),
'last_state_change': None,
'triggered': False,
'internal_data': {},
'data': {},
}
child_task['children'].append(new_task['id'])
new_tasks[new_task['id']] = new_task
wf['tasks'].update(new_tasks)
pass | [
86,
620
] |
def METHOD_NAME(self):
self.session.rollback()
self.session.close() | [
1462,
240
] |
def METHOD_NAME(self):
return BBB | [
13235
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.