text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self) -> None:
pass | [
0,
2455,
43,
21,
3627,
1228
] |
def METHOD_NAME(self, fake_smcli):
fake_smcli.return_value = {
'overallRC': 0, 'rc': 0,
'rs': 0, 'errno': 0,
'response': 'Partition mode: Z/VM\n\n'
'ADDRESS STATUS TYPE CORE_ID\n'
'0000 MASTER-PROCESSOR CP 0000\n'
'0002 ALTERNATE IFL 0001\n'
'0003 PARKED IFL 0001\n'
'0004 PARKED IFL 0002\n',
'strError': ''
}
rh = ReqHandle.ReqHandle(captureLogs=False,
smt=mock.Mock())
ret_total, ret_used = getHost.getCPUCount(rh)
print("return value1:", ret_total, ret_used)
self.assertEqual(4, ret_total)
self.assertEqual(4, ret_used) | [
9,
19,
2265,
29,
529,
5297
] |
def METHOD_NAME(self):
return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined] | [
772,
10632
] |
def METHOD_NAME(kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]) -> List["ScheduleRule"]:
"""Create a list of schedule rules for the given kind.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of the schedule rules.
Returns
-------
rules : List[ScheduleRule]
The list of schedule rules.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.ScheduleRuleDefaultLLVM, # type: ignore
"cuda": _ffi_api.ScheduleRuleDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.ScheduleRuleDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.ScheduleRuleDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for schedule rule creation.") | [
129
] |
def METHOD_NAME(self):
"""Cleanup"""
self._s1ap_wrapper.cleanup() | [
531,
481
] |
def METHOD_NAME(config: Config) -> None:
"""
Test if the set_debug_mode() method updates the debug_mode attribute.
"""
# Store debug mode to reset it after the test
debug_mode = config.debug_mode
config.debug_mode = True
assert config.debug_mode == True
# Reset debug mode
config.debug_mode = debug_mode | [
9,
0,
290,
854
] |
def METHOD_NAME(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hv", ('help', 'verbose'))
except getopt.error as msg:
print('Error: %s\n' % msg, file=sys.stderr)
print(__usage__.strip(), file=sys.stderr)
return 1
# check arguments
if len(args) != 0:
print('Error: no arguments allowed\n', file=sys.stderr)
print(__usage__.strip(), file=sys.stderr)
return 2
# process options
verbose = 0
for o, a in opts:
if o in ('-h', '--help'):
print(__usage__.strip())
return 0
elif o in ('-v', '--verbose'):
verbose += 1
# do it!
pywws.logger.setup_handler(verbose)
ws = pywws.weatherstation.WeatherStation()
fixed_block = ws.get_fixed_block()
if not fixed_block:
print("No valid data block found")
return 3
# loop
ptr = ws.data_start
total_count = 0
bad_count = 0
while True:
if total_count % 1000 == 0:
active = ws.current_pos()
while True:
ptr += 0x20
if ptr >= 0x10000:
ptr = ws.data_start
if active < ptr - 0x10 or active >= ptr + 0x20:
break
result_1 = ws._read_block(ptr, retry=False)
result_2 = ws._read_block(ptr, retry=False)
if result_1 != result_2:
logger.warning('read_block changing %06x', ptr)
logger.warning(' %s', str(result_1))
logger.warning(' %s', str(result_2))
bad_count += 1
total_count += 1
print("\r %d/%d " % (bad_count, total_count), end='', flush=True)
print('')
return 0 | [
57
] |
def METHOD_NAME(self, pred):
pred = torch.sigmoid(pred)
n_i, n_c = pred.size()
bg_score = pred[:, -1].view(n_i, 1)
if self.test_with_obj:
pred[:, :-1] *= (1 - bg_score)
return pred | [
19,
648
] |
async def METHOD_NAME(received_msg, keychain: Keychain) -> MoneroTransactionFinalAck:
import gc
from trezor import log, utils
from trezor.wire.context import get_context
from apps.monero.signing.state import State
state = State()
mods = utils.unimport_begin()
progress = MoneroTransactionProgress()
# Splitting ctx.call() to write() and read() helps to reduce memory fragmentation
# between calls.
while True:
if __debug__:
log.debug(__name__, "#### F: %s, A: %s", gc.mem_free(), gc.mem_alloc())
gc.collect()
gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())
result_msg, accept_msgs = await _sign_tx_dispatch(
state, received_msg, keychain, progress
)
if accept_msgs is None:
break
ctx = get_context()
await ctx.write(result_msg)
del (result_msg, received_msg)
utils.unimport_end(mods)
received_msg = await ctx.read(accept_msgs)
utils.unimport_end(mods)
return result_msg | [
2452,
2543
] |
def METHOD_NAME(self) -> None:
self.start_time = time() | [
447
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The value for the property is greater than or equal to the specified value.
"""
return pulumi.get(self, "gte") | [
4246
] |
def METHOD_NAME(self):
# bitline pins are not added because they are floating
for bl_name in self.get_bitline_names():
self.add_pin(bl_name, "INOUT")
# bitline pins are not added because they are floating
for wl_name in self.get_wordline_names():
self.add_pin(wl_name, "INPUT")
self.add_pin("vdd", "POWER")
self.add_pin("gnd", "GROUND") | [
238,
3783
] |
def METHOD_NAME(
object_identifier: int, payload: Any, shard_identifier: int, **kwds: Any
):
if (org_member := OrganizationMember.objects.filter(id=object_identifier).last()) is None:
# Delete all identities that may have been associated. This is an implicit cascade.
if payload and payload.get("user_id") is not None:
identity_service.delete_identities(
user_id=payload["user_id"], organization_id=shard_identifier
)
organizationmember_mapping_service.delete(
organizationmember_id=object_identifier,
organization_id=shard_identifier,
)
return
rpc_org_member_update = RpcOrganizationMemberMappingUpdate.from_orm(org_member)
organizationmember_mapping_service.upsert_mapping(
organizationmember_id=org_member.id,
organization_id=shard_identifier,
mapping=rpc_org_member_update,
) | [
356,
1044,
1823,
682
] |
def METHOD_NAME(func): ... | [
2991,
2993,
16141
] |
def METHOD_NAME(self):
linear = nn.Linear(self.source_model.d_hidden, self.pt_tasks)
af = nn.ReLU()
return nn.Sequential(linear, af) | [
474,
474,
7353
] |
def METHOD_NAME(line):
# Splitting on a plain colon would accidentally match inside a
# Windows absolute-path filename, so we must search for a colon
# followed by whitespace to find the divider between LHS and RHS
# of the Makefile rule.
rulesep = ': '
sep_idx = line.find(rulesep)
if sep_idx >= 0:
return line[sep_idx + 2:]
else:
return line | [
188,
5484,
446,
7455
] |
def METHOD_NAME(cls, args, task, embed_tokens):
tgt_dict = task.tgt_dict
from examples.simultaneous_translation.models.transformer_monotonic_attention import (
TransformerMonotonicDecoder,
)
decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder | [
56,
3642
] |
def METHOD_NAME(self, value): ... | [
885,
623
] |
f METHOD_NAME(self): | [
9,
2767,
2640,
1755,
529,
50
] |
f METHOD_NAME(
self) -> Tuple[beam_runner_api_pb2.Pipeline, '_PCollectionPlaceHolder']: | [
56,
1102,
58,
671
] |
def METHOD_NAME(description, keywords, creators, metadata, unset, metadata_source):
"""Edit project metadata."""
from renku.command.project import edit_project_command
if list(creators) == [NO_VALUE]:
creators = NO_VALUE
if list(keywords) == [NO_VALUE]:
keywords = NO_VALUE
if "k" in unset or "keywords" in unset:
if keywords is not NO_VALUE:
raise click.UsageError("Cant use '--keyword' together with unsetting keywords")
keywords = None
if "m" in unset or "metadata" in unset:
if metadata is not NO_VALUE:
raise click.UsageError("Cant use '--metadata' together with unsetting metadata")
metadata = None
if metadata_source is not NO_VALUE and metadata is NO_VALUE:
raise click.UsageError("The '--metadata-source' option can only be used with the '--metadata' flag")
if metadata_source is NO_VALUE and metadata is not NO_VALUE:
metadata_source = "renku"
custom_metadata = metadata
if metadata and metadata is not NO_VALUE:
path = Path(metadata)
if not path.exists():
raise click.UsageError(f"Path {path} does not exist.")
custom_metadata = json.loads(Path(metadata).read_text())
result = (
edit_project_command()
.build()
.execute(
description=description,
creator=creators,
keywords=keywords,
custom_metadata=custom_metadata,
custom_metadata_source=metadata_source,
)
)
updated, no_email_warning = result.output
if not updated:
click.echo("Nothing to update. Check available fields with `renku project edit --help`\n")
else:
click.echo("Successfully updated: {}.".format(", ".join(updated.keys())))
if no_email_warning:
click.echo(ClickCallback.WARNING + f"No email or wrong format for: {no_email_warning}") | [
2004
] |
def METHOD_NAME(self):
config = pnconf_copy()
pn = PubNub(config)
remove_uuid = pn.remove_uuid_metadata()
assert remove_uuid is not None
assert isinstance(remove_uuid, RemoveUuid)
assert isinstance(remove_uuid, Endpoint) | [
9,
188,
4977,
841,
1272
] |
def METHOD_NAME(directory, logger=None, repo_obj=None):
"""
:param directory: Repository directory path.
:type logger: logging.Logger
:param logger: Optional logger instance.
:param repo_obj: Repository object.
"""
logger = logger or default_logger
return RepositoryLoader(directory, logger=logger, repo_obj=repo_obj).load() | [
557,
1230
] |
def METHOD_NAME(self,
point = "const Vector&",
countBoundary = ("const bool", "true"),
tol = ("const double", "1.0e-8")):
"Test if the given point is internal to the polygon."
return "bool" | [
1992
] |
def METHOD_NAME(trainer, model):
model.build()
model.train()
model.to(trainer.device)
trainer.model = model | [
238,
578
] |
def METHOD_NAME(self):
if (self.led == 1):
return
try:
urllib.request.urlopen("http://" + str(self._device_pbip) +
"/settings?led_status_disable=false",
timeout=3)
log.info("Shelly button led on (%d) %s"
% (self.device_nummer, self._device_pbip))
except Exception as e1:
log.warning("Shelly button on (%d) %s Fehlermeldung: %s "
% (self.device_nummer, self._device_pbip, str(e1)))
self.led = 1 | [
-1
] |
def METHOD_NAME():
return func2() | [
8704
] |
def METHOD_NAME(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.get_permutation_importances_df(), pd.DataFrame) | [
9,
1148,
2840,
5588
] |
def METHOD_NAME(orig_smiles, decoded_smiles, output_file=None):
"""
Compare decoded to original SMILES strings and output a table of Tanimoto distances, along with
binary flags for whether the strings are the same and whether the decoded string is valid SMILES.
orig_smiles and decoded_smiles are lists or arrays of strings.
If an output file name is provided, the table will be written to it as a CSV file.
Returns the table as a DataFrame.
"""
res_df = pd.DataFrame(dict(original=orig_smiles, decoded=decoded_smiles))
is_valid = []
is_same = []
tani_dist = []
accuracy = []
count = 0
data_size = len(orig_smiles)
for row in res_df.itertuples():
count = count + 1
#compute char by char accuracy
hit = 0
for x, y in zip(row.original, row.decoded):
if x == y:
hit = hit+1
accuracy.append((hit/len(row.original))*100)
is_same.append(int(row.decoded == row.original))
orig_mol = Chem.MolFromSmiles(row.original)
if orig_mol is None:
print("INVALID AT input ", count, " ", row.original)
#Note, input may be invalid, if original SMILE string is truncated
is_valid.append('x')
tani_dist.append(-1)
continue
dec_mol = Chem.MolFromSmiles(row.decoded)
RDLogger.DisableLog('rdApp.*')
if dec_mol is None:
is_valid.append(0)
tani_dist.append(1)
else:
is_valid.append(1)
orig_fp = AllChem.GetMorganFingerprintAsBitVect(orig_mol, 2, 1024)
dec_fp = AllChem.GetMorganFingerprintAsBitVect(dec_mol, 2, 1024)
tani_sim = DataStructs.FingerprintSimilarity(orig_fp, dec_fp, metric=DataStructs.TanimotoSimilarity)
tani_dist.append(1.0 - tani_sim)
res_df['is_valid'] = is_valid
res_df['is_same'] = is_same
res_df['smile_accuracy'] = accuracy
res_df['tanimoto_distance'] = tani_dist
global_acc = np.mean(np.array(accuracy))
res_df['total_avg_accuracy'] = [global_acc]*len(accuracy)
print("Mean global accuracy % ", global_acc)
print("Validity % ", (is_valid.count(1)/data_size)*100)
print("Same % ", (is_same.count(1)/data_size)*100)
valid_tani_dist = [ t for t in tani_dist if t >= 0 ]
print("Average tanimoto ", np.mean(np.array(valid_tani_dist)))
if output_file is not None:
output_columns = ['original', 'decoded', 'is_valid', 'is_same', 'smile_accuracy','tanimoto_distance','total_avg_accuracy']
res_df.to_csv(output_file, index=False, columns=output_columns)
return(res_df) | [
979,
7079,
24,
1533,
12074
] |
def METHOD_NAME(self) -> set:
variables = set()
clazz = ConfluentServices
for key, value in vars(ConfluentServices).items():
if callable(getattr(clazz, key)) and key.isupper():
func = getattr(clazz, key)
result = func(self)
variables.add(result.group)
return variables | [
19,
75,
846,
83
] |
def METHOD_NAME(self):
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet_tsm_50_full = ResNetTSM(
50,
pretrained='torchvision://resnet50',
non_local=non_local,
non_local_cfg=non_local_cfg,
temporal_pool=True)
resnet_tsm_50_full.init_weights()
input_shape = (16, 3, 32, 32)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet_tsm_50_full(imgs)
assert feat.shape == torch.Size([8, 2048, 1, 1]) | [
9,
2215,
14758,
324
] |
def METHOD_NAME(alembic_config: Config, conn: Connection, rev: str = "head") -> None:
with _alembic_lock:
alembic_config.attributes["connection"] = conn
stamp(alembic_config, rev) | [
2418,
8171,
3853
] |
def METHOD_NAME(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if index > self.MAX_NUM_PSU:
raise RuntimeError("index ({}) shouldn't be greater than {}".format(index, self.MAX_NUM_PSU))
status = self._read_file(self.psu_presence, index)
return status == 1 | [
19,
4060,
4061
] |
def METHOD_NAME(self, x, y=0.0):
return int((x - y + 0.5 * self.step) // self.step) | [
724,
2443
] |
def METHOD_NAME(self, *args, **kwargs):
# type: (Route, *object, **object) -> Any
hub = Hub.current
integration = hub.get_integration(BottleIntegration)
prepared_callback = old_make_callback(self, *args, **kwargs)
if integration is None:
return prepared_callback
# If an integration is there, a client has to be there.
client = hub.client # type: Any
def wrapped_callback(*args, **kwargs):
# type: (*object, **object) -> Any
try:
res = prepared_callback(*args, **kwargs)
except HTTPResponse:
raise
except Exception as exception:
event, hint = event_from_exception(
exception,
client_options=client.options,
mechanism={"type": "bottle", "handled": False},
)
hub.capture_event(event, hint=hint)
raise exception
return res
return wrapped_callback | [
1265,
93,
1076
] |
def METHOD_NAME(protected_item_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProtectedItemResult]:
"""
Gets the details of the protected item.
Azure REST API version: 2021-02-16-preview.
:param str protected_item_name: The protected item name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The vault name.
"""
... | [
19,
814,
1024,
146
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self) -> LabwareParameters:
return self._parameters | [
19,
386
] |
def METHOD_NAME(self, db: _Database) -> None: ... | [
1160
] |
def METHOD_NAME(self, msg_type, handler):
doc = self.session._document
def wrapper(*args, **kwargs):
if msg_type == 'comm_open':
return
with set_curdoc(doc):
state.execute(partial(handler, *args, **kwargs), schedule=True)
return wrapper | [
503,
1519
] |
def METHOD_NAME(i: Expr) -> Expr:
return Extract(Itob(i), Int(7), Int(1)) | [
14406
] |
async def METHOD_NAME():
d = succeed("foo")
res = await d
return res | [
15037
] |
def METHOD_NAME(initial_file: str, output_file: str, include_search_path1: str, include_search_path2: str, discardables: List[Callable] = []) -> None:
embed([
initial_file,
"-I",
include_search_path1,
"-I",
include_search_path2,
"-o",
output_file
], discardables) | [
129,
97,
572,
171
] |
def METHOD_NAME(fname):
schema = QAPISchema(fname)
schema.visit(QAPISchemaTestVisitor())
for doc in schema.docs:
if doc.symbol:
print('doc symbol=%s' % doc.symbol)
else:
print('doc freeform')
print(' body=\n%s' % doc.body.text)
for arg, section in doc.args.items():
print(' arg=%s\n%s' % (arg, section.text))
for feat, section in doc.features.items():
print(' feature=%s\n%s' % (feat, section.text))
for section in doc.sections:
print(' section=%s\n%s' % (section.name, section.text)) | [
9,
1382
] |
def METHOD_NAME(self, data):
stream = self._stream
stream.write(data)
stream.flush() | [
77,
919
] |
def METHOD_NAME(msg, *args, **kwargs):
Log._GLOBAL_LOG.METHOD_NAME(msg, *args, **kwargs) | [
257
] |
def METHOD_NAME(suffix):
"""
Test if giving a wrong optional parameter to
:func:`save_structure()` raises a :class:`TypeError`
"""
array = strucio.load_structure(join(data_dir("structure"), "1l2y.mmtf"))
temp = NamedTemporaryFile("w+", suffix=f".{suffix}")
with pytest.raises(TypeError):
strucio.save_structure(
temp.name, array, answer=42
)
temp.close() | [
9,
6837,
41,
1967,
335
] |
def METHOD_NAME(run_line, go_ep1_id):
"""
confirms --local-user is present in delete dry-run output
"""
load_response_set("cli.get_submission_id")
result = run_line(
f"globus delete -F json --dry-run -r --local-user my-user {go_ep1_id}:/"
)
json_output = json.loads(result.output)
assert json_output["local_user"] == "my-user" | [
9,
34,
125,
21
] |
def METHOD_NAME(self):
peername = self._get_peer_name()
if peername:
return "{} ({})".format(self.peer, peername)
else:
return self.peer | [
19,
502,
147
] |
def METHOD_NAME(cls) -> None:
"""
Prepare Ingredients
"""
User.__table__.create(bind=cls.engine)
# Insert 30 rows
for i in range(10):
data = [
User(
name="John",
fullname="John Doe",
nickname="johnny b goode",
comments="no comments",
age=30,
),
User(
name="Jane",
fullname="Jone Doe",
nickname=None,
comments="maybe some comments",
age=31,
),
User(
name="John",
fullname="John Doe",
nickname=None,
comments=None,
age=None,
),
]
cls.session.add_all(data)
cls.session.commit() | [
0,
1,
2
] |
METHOD_NAME(self): | [
19,
1461
] |
def METHOD_NAME(self, sample_rate, num_channels, subtype_and_bit_depth):
"""`soundfile_backend.info` can check sph file correctly"""
duration = 1
num_frames = sample_rate * duration
#data = torch.randn(num_frames, num_channels).numpy()
data = paddle.randn(shape=[num_frames, num_channels]).numpy()
path = self.get_temp_path("data.nist")
subtype, bits_per_sample = subtype_and_bit_depth
soundfile.write(path, data, sample_rate, subtype=subtype)
info = soundfile_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == "PCM_S" | [
9,
7768
] |
def METHOD_NAME():
logger.debug(request)
schain_name = request.json.get('schain_name')
snapshot_from = request.json.get('snapshot_from', '')
result = toggle_schain_repair_mode(
schain_name, snapshot_from=snapshot_from)
if result:
return construct_ok_response()
else:
return construct_err_response(
msg=f'No schain with name {schain_name}'
) | [
4894
] |
def METHOD_NAME(self):
minimum_wavelength = self.get_wavelegth_from_energy(self.maximum_energy)
maximum_wavelength = self.get_wavelegth_from_energy(self.minimum_energy)
return minimum_wavelength, maximum_wavelength | [
19,
8925,
4355
] |
def METHOD_NAME(self):
# Verfiy that thread can be started and stopped multiple times.
class LogThread(timed_threads.TimedThread):
def on_interval(self):
logging.info("Thread Run")
log_thread = LogThread(interval=0.1)
for _ in range(2):
self.assertFalse(log_thread.is_alive())
with self.assertLogs(level="INFO") as logs:
log_thread.start()
time.sleep(1)
self.assertTrue(log_thread.is_alive())
log_thread.stop()
self.assertIn("INFO:absl:Thread Run", logs.output)
time.sleep(0.1)
self.assertFalse(log_thread.is_alive()) | [
9,
3516,
600,
1141
] |
def METHOD_NAME(
plotIt=True,
survey_type="dipole-dipole",
rho_background=1e3,
rho_block=1e2,
block_x0=100,
block_dx=10,
block_y0=-10,
block_dy=5,
):
np.random.seed(1)
# Initiate I/O class for DC
IO = DC.IO()
# Obtain ABMN locations
xmin, xmax = 0.0, 200.0
ymin, ymax = 0.0, 0.0
zmin, zmax = 0, 0
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
# Generate DC survey object
survey = DCutils.generate_dcip_survey(
endl, survey_type=survey_type, dim=2, a=10, b=10, n=10
)
survey = IO.from_abmn_locations_to_survey(
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
survey_type,
data_dc_type="volt",
)
# Obtain 2D TensorMesh
mesh, actind = IO.set_mesh()
# Flat topography
actind = active_from_xyz(
mesh, np.c_[mesh.cell_centers_x, mesh.cell_centers_x * 0.0]
)
survey.drape_electrodes_on_topography(mesh, actind, option="top")
# Use Exponential Map: m = log(rho)
parametric_block = maps.ParametricBlock(mesh, slopeFact=1e2)
mapping = maps.ExpMap(mesh) * parametric_block
# Set true model
# val_background,val_block, block_x0, block_dx, block_y0, block_dy
mtrue = np.r_[np.log(1e3), np.log(10), 100, 10, -20, 10]
# Set initial model
m0 = np.r_[
np.log(rho_background),
np.log(rho_block),
block_x0,
block_dx,
block_y0,
block_dy,
]
rho = mapping * mtrue
rho0 = mapping * m0
# Show the true conductivity model
fig = plt.figure(figsize=(12, 3))
ax = plt.subplot(111)
temp = rho.copy()
temp[~actind] = np.nan
out = mesh.plot_image(
temp,
grid=False,
ax=ax,
grid_opts={"alpha": 0.2},
pcolor_opts={"cmap": "viridis", "norm": colors.LogNorm(10, 1000)},
)
ax.plot(
survey.unique_electrode_locations[:, 0],
survey.unique_electrode_locations[:, 1],
"k.",
)
ax.set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())
ax.set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())
cb = plt.colorbar(out[0])
cb.set_label("Resistivity (ohm-m)")
ax.set_aspect("equal")
ax.set_title("True resistivity model")
plt.show()
# Show the true conductivity model
fig = plt.figure(figsize=(12, 3))
ax = plt.subplot(111)
temp = rho0.copy()
temp[~actind] = np.nan
out = mesh.plot_image(
temp,
grid=False,
ax=ax,
grid_opts={"alpha": 0.2},
pcolor_opts={"cmap": "viridis", "norm": colors.LogNorm(10, 1000)},
)
ax.plot(
survey.unique_electrode_locations[:, 0],
survey.unique_electrode_locations[:, 1],
"k.",
)
ax.set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())
ax.set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())
cb = plt.colorbar(out[0])
cb.set_label("Resistivity (ohm-m)")
ax.set_aspect("equal")
ax.set_title("Initial resistivity model")
plt.show()
# Generate 2.5D DC problem
# "N" means potential is defined at nodes
prb = DC.Simulation2DNodal(
mesh, survey=survey, rhoMap=mapping, storeJ=True, solver=Solver
)
# Make synthetic DC data with 5% Gaussian noise
data = prb.make_synthetic_data(mtrue, relative_error=0.05, add_noise=True)
# Show apparent resisitivty pseudo-section
IO.plotPseudoSection(data=data.dobs / IO.G, data_type="apparent_resistivity")
# Show apparent resisitivty histogram
fig = plt.figure()
out = hist(data.dobs / IO.G, bins=20)
plt.show()
# Set standard_deviation
# floor
eps = 10 ** (-3.2)
# percentage
relative = 0.05
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data)
uncert = abs(data.dobs) * relative + eps
dmisfit.standard_deviation = uncert
# Map for a regularization
mesh_1d = TensorMesh([parametric_block.nP])
# Related to inversion
reg = regularization.WeightedLeastSquares(mesh_1d, alpha_x=0.0)
opt = optimization.InexactGaussNewton(maxIter=10)
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
target = directives.TargetMisfit()
invProb.beta = 0.0
inv = inversion.BaseInversion(invProb, directiveList=[target])
prb.counter = opt.counter = utils.Counter()
opt.LSshorten = 0.5
opt.remember("xc")
# Run inversion
mopt = inv.METHOD_NAME(m0)
# Convert obtained inversion model to resistivity
# rho = M(m), where M(.) is a mapping
rho_est = mapping * mopt
rho_true = rho.copy()
# show recovered conductivity
fig, ax = plt.subplots(2, 1, figsize=(20, 6))
out1 = mesh.plot_image(
rho_true,
pcolor_opts={"cmap": "viridis", "norm": colors.LogNorm(10, 1000)},
ax=ax[0],
)
out2 = mesh.plot_image(
rho_est,
pcolor_opts={"cmap": "viridis", "norm": colors.LogNorm(10, 1000)},
ax=ax[1],
)
out = [out1, out2]
for i in range(2):
ax[i].plot(
survey.unique_electrode_locations[:, 0],
survey.unique_electrode_locations[:, 1],
"kv",
)
ax[i].set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())
ax[i].set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())
cb = plt.colorbar(out[i][0], ax=ax[i])
cb.set_label(r"Resistivity ($\Omega$m)")
ax[i].set_xlabel("Northing (m)")
ax[i].set_ylabel("Elevation (m)")
ax[i].set_aspect("equal")
ax[0].set_title("True resistivity model")
ax[1].set_title("Recovered resistivity model")
plt.tight_layout()
plt.show() | [
22
] |
def METHOD_NAME(
results: BatchBenchmarkResults, test_size: int, batch_size: int
):
assert len(results.model_batch_timings) == test_size
assert len(results.e2e_batch_timings) == test_size
assert results.batch_size == batch_size
assert results.model_batch_seconds > 0.0
assert results.model_batches_per_second > 0.0
assert results.model_item_seconds > 0.0
assert results.model_items_per_second > 0.0
assert results.e2e_batch_seconds > 0.0
assert results.e2e_batches_per_second > 0.0
assert results.e2e_item_seconds > 0.0
assert results.e2e_items_per_second > 0.0 | [
51,
1125,
250
] |
def METHOD_NAME(self):
index = self.load_widget_file("index.html")
parser = InLineScript(self.load_widget_file)
parser.feed(index)
return parser.content | [
557,
724
] |
def METHOD_NAME(self):
actual = self.__class__.example_abook_completer.complete("foo", 3)
expected = [("foo <[email protected]>", 21)]
self.assertListEqual(actual, expected) | [
9,
53,
85,
41,
1866,
156
] |
def METHOD_NAME(self):
r"""Can be implemented to perform some actions before the rest of the
widgets are initialized.
"""
pass | [
1553,
176,
1551
] |
def METHOD_NAME(self, snow_fraction_data: Cube) -> ndarray:
"""Calculates the snow_phase data"""
return np.where(snow_fraction_data >= self.upper_threshold, 1, 0) | [
8526,
3200
] |
def METHOD_NAME(im):
h, w = im.shape
# Get indices orderd by value from high to low
indices = [(i, j) for i in range(h) for j in range(w)]
indices.sort(key=lambda p: get(im, p), reverse=True)
# Maintains the growing sets
uf = UnionFind()
groups0 = {}
def get_comp_birth(p):
return get(im, uf[p])
# Process pixels from high to low
for i, p in enumerate(indices):
v = get(im, p)
ni = [uf[q] for q in iter_neighbors(p, w, h) if q in uf]
nc = sorted([(get_comp_birth(q), q) for q in set(ni)], reverse=True)
if i == 0:
groups0[p] = (v, v, None)
uf.add(p, -i)
if len(nc) > 0:
oldp = nc[0][1]
uf.union(oldp, p)
# Merge all others with oldp
for bl, q in nc[1:]:
if uf[q] not in groups0:
# print(i, ": Merge", uf[q], "with", oldp, "via", p)
groups0[uf[q]] = (bl, bl - v, p)
uf.union(oldp, q)
groups0 = [(k, groups0[k][0], groups0[k][1], groups0[k][2]) for k in groups0]
groups0.sort(key=lambda g: g[2], reverse=True)
return groups0 | [
19,
4808
] |
def METHOD_NAME(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase") | [
3866
] |
def METHOD_NAME(self, exchange_name: str, queue_name: str, routing_key: str):
self.check_connection()
self.channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=routing_key
) | [
287,
651
] |
def METHOD_NAME(upgrader, publication_7):
value = upgrader.upgrade('publication', publication_7, current_version='7', target_version='8')
assert value['schema_version'] == '8'
assert 'Incorrect date_published formatting: 3/30/20' in value['notes']
assert 'date_published' not in value | [
9,
3973,
738,
3141,
1629
] |
def METHOD_NAME(path, mode=0o755):
"""ensure that a directory exists
If it doesn't exist, try to create it and protect against a race condition
if another process is doing the same.
The default permissions are 755, which differ from os.makedirs default of 777.
"""
if not os.path.exists(path):
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST:
raise
elif not os.path.isdir(path):
raise IOError("%r exists but is not a directory" % path) | [
602,
1190,
954
] |
def METHOD_NAME(tmpdir) -> None:
valid_extensions = _make_valid_extensions()
valid_extensions = list(valid_extensions)
fake_extensions = _make_fake_extensions(seed=42)
mock_extensions = valid_extensions + fake_extensions
mock_files = _make_fake_files(mock_extensions, seed=42)
mockdir = _make_mock_dir(tmpdir, mock_files)
message = "Found invalid file extensions"
with pytest.warns(UserWarning, match=message):
filtered = filter_valid_files(mockdir, valid_extensions=valid_extensions)
assert all(i not in fake_extensions for i in filtered) | [
9,
527,
1205,
1537
] |
def METHOD_NAME(tax_benefit_system, test_client):
response = test_client.get("/parameter/benefits")
assert response.status_code == client.OK
parameter = json.loads(response.data)
assert sorted(list(parameter.keys())), [
"description",
"documentation",
"id",
"metadata",
"source" == "subparams",
]
assert parameter["documentation"] == (
"Government support for the citizens and residents of society."
"\nThey may be provided to people of any income level, as with social security,"
"\nbut usually it is intended to ensure that everyone can meet their basic human needs"
"\nsuch as food and shelter.\n(See https://en.wikipedia.org/wiki/Welfare)"
)
model_benefits = tax_benefit_system.parameters.benefits
assert parameter["subparams"].keys() == model_benefits.children.keys(), parameter[
"subparams"
].keys()
assert "description" in parameter["subparams"]["basic_income"]
assert parameter["subparams"]["basic_income"]["description"] == getattr(
model_benefits.basic_income, "description", None
), parameter["subparams"]["basic_income"]["description"] | [
9,
511,
1716
] |
def METHOD_NAME(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionsForMIPPolicySyncResult]:
"""
Gets the specified private endpoint connection associated with the service.
Azure REST API version: 2021-03-08.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
... | [
19,
547,
841,
560,
43,
10962,
54
] |
def METHOD_NAME(self):
"""
Return the URL of the current site.
"""
return settings.SITE_URL | [
1055,
274
] |
def METHOD_NAME(
extracts: util.StandardExtracts,
num_rows_key: str,
array_to_truncate: np.ndarray,
model_name: Optional[str] = None,
allow_missing_key: Optional[bool] = False,
) -> np.ndarray:
"""Get the array to be truncated by the number of rows.
Args:
extracts: TFMA extracts that stores the keys.
num_rows_key: Number of rows in each column except the paddings. For
multi-dimensional input, it will truncate on the first dimension.
array_to_truncate: the array to be truncated te
model_name: The name of the model for outputs.
allow_missing_key: (Optional) If true, it will do nothing instead of
raising errors when col_names are not found.
Returns:
The array truncated by the number of rows.
Raises:
KeyError: The num_rows_key is not found in extracts.
"""
num_of_rows = None
dict_to_search = collections.ChainMap(
extracts.get_predictions(model_name) or {},
extracts.get_features() or {},
extracts.get_transformed_features(model_name) or {})
if num_rows_key:
if dict_to_search and num_rows_key in dict_to_search:
num_of_rows = dict_to_search[num_rows_key]
if isinstance(num_of_rows, np.ndarray):
num_of_rows = num_of_rows.item()
else:
if not allow_missing_key:
raise KeyError(f"Key {num_rows_key} is not found under predictions, "
"features, or transformed features of the extracts."
"Please set allow_missing_key to True, if you want to "
"skip truncation instead.")
result = array_to_truncate
if num_of_rows and num_of_rows > 0 and len(result) > num_of_rows:
result = result[:num_of_rows]
return result | [
5419,
604,
181,
9408
] |
def METHOD_NAME(application_events):
"""
Test win_event.count
"""
ret = win_event.count("Application")
assert ret == 6 | [
9,
29
] |
def METHOD_NAME(bdf_filename):
"""unimplemented method for splitting elements"""
model = read_bdf(bdf_filename, xref=True)
for eid, elem in model.elements.items():
if elem.type == 'CTRIA3':
#
# 3
# /|\
# / | \
# / | \
# / 4 \
# / / \ \
# / / \ \
# 1-------------2
#
p1, p2, p3 = elem.get_node_positions()
#centroid = (p1 + p2 + p3) / 3.
#
# 3
# /|\
# / | \
# / | \
# / | \
# 1----4----2
#
elif elem.type == 'CQUAD4':
#
#
# 4---------3
# | \ / |
# | \ / |
# | 5 |
# | / \ |
# |/ \|
# 1---------2
#
# the same thing shown in a rotated view
# 4
# /| \
# / | \
# / | \
# / | \
# 1---------5---------3
# \ | /
# \ | /
# \ | /
# \ | /
# 2
#
# max_area, taper_ratio, area_ratio
# 4----7----3
# | | |
# | | |
# 8----9----6
# | | |
# | | |
# 1----4----2
#
# max_interior_angle
# 4---------3
# / \ /
# / \ /
# / \ /
# / \ /
# 1---------2
#
# taper_ratio
# 4--6--3
# / | \
# / | \
# / | \
# 1------5------2
#
# taper_ratio
# 4------3
# / \ / \
# / \ / \
# / \/ \
# 1-------5------2
#
# taper_ratio
# 4------3
# / \ \
# / \ \
# / \ \
# 1-------5------2
pass | [
265,
1532
] |
def METHOD_NAME():
"""Runs diagnostics.
The result of this can be printed using pretty_print.
"""
return [
sys_info(),
ccid_info(),
otp_info(),
fido_info(),
"End of diagnostics",
] | [
19,
7643
] |
def METHOD_NAME(self, original_tag_count, modified_tag_count):
if original_tag_count.get("freeform_tags") != modified_tag_count.get("freeform_tags"):
return True
else:
original_defined_tag = original_tag_count.get("defined_tags")
modified_defined_tag = modified_tag_count.get("defined_tags")
if original_defined_tag:
for namespace in original_defined_tag:
if original_defined_tag.get(namespace) != modified_defined_tag.get(namespace):
return True
return False
else:
return False | [
82,
674,
280,
191
] |
def METHOD_NAME(tax_app):
webhooks = [
Webhook(
name=f"Tax checkout webhook no {i}",
app=tax_app,
target_url=f"https://www.example.com/tax-checkout-{i}",
)
for i in range(3)
]
Webhook.objects.bulk_create(webhooks)
WebhookEvent.objects.bulk_create(
WebhookEvent(
event_type=WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES,
webhook=webhook,
)
for webhook in webhooks
)
return list(
Webhook.objects.filter(
events__event_type=WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES
)
) | [
6131,
2170,
3436
] |
def METHOD_NAME(self):
orig = cupy.ndarray
with pytest.raises(AssertionError):
with testing.AssertFunctionIsCalled('cupy.ndarray'):
pass
assert cupy.ndarray is orig | [
9,
180,
130,
259
] |
def METHOD_NAME(env):
"""This function adds the path to the Phar Lap binaries, includes,
and libraries, if they are not already there."""
ph_path = getPharLapPath()
try:
env_dict = env['ENV']
except KeyError:
env_dict = {}
env['ENV'] = env_dict
SCons.Util.AddPathIfNotExists(env_dict, 'PATH',
os.path.join(ph_path, 'bin'))
SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE',
os.path.join(ph_path, 'include'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, 'lib'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, os.path.normpath('lib/vclib')))
env['PHARLAP_PATH'] = getPharLapPath()
env['PHARLAP_VERSION'] = str(getPharLapVersion()) | [
238,
7765,
3609,
3336
] |
def METHOD_NAME(x):
if type(x) is np.ndarray and x.dtype.kind in ('i', 'f'):
return torch.from_numpy(x)
return x | [
24,
3296
] |
def METHOD_NAME(self):
"""
Creates a new AFS token
Raises:
CredentialRenewalError: If the renewal process returns a non-zero value
"""
command = 'kinit && aklog'
process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = process.communicate(getpass('Kerberos password: ').encode())
if process.returncode == 0:
logger.info('AFS token %s created. Valid for %s', self.location, self.time_left())
else:
raise CredentialRenewalError(stderrdata) | [
129
] |
def METHOD_NAME(self, text):
"""
Check if text is offensive using string matcher and classifier.
"""
if text == '':
return False
if (
hasattr(self, 'offensive_string_matcher')
and text in self.offensive_string_matcher
):
return True
if hasattr(self, 'offensive_classifier') and text in self.offensive_classifier:
return True
return False | [
250,
15073
] |
f METHOD_NAME(self): | [
7374,
3150
] |
def METHOD_NAME(self):
exp = set("ACDEFGHIKLMNOPQRSTUVWY")
self.assertEqual(Protein("").definite_chars, exp)
self.assertEqual(Protein.definite_chars, exp) | [
9,
16931,
2107
] |
def METHOD_NAME(self):
value = get_most_specific_notification_setting_value(
notification_settings_by_scope={},
recipient=RpcActor(id=self.user.id, actor_type=ActorType.USER),
parent_id=1,
type=NotificationSettingTypes.WORKFLOW,
)
assert value == NotificationSettingOptionValues.SUBSCRIBE_ONLY | [
9,
19,
759,
3303,
857,
1333,
99
] |
def METHOD_NAME(self, mock_send_email, mock_send_teams_message):
notification_config = NotificationConfig.load(config=TEAMS_NOTIFICATION_CONFIG_NO_ALERT_ON)
sender = NotificationSender(config=notification_config)
pipeline_run = self.__class__.pipeline_run
sender.send_pipeline_run_failure_message(self.__class__.pipeline, pipeline_run)
self.assertEqual(mock_send_email.call_count, 0)
self.assertEqual(mock_send_teams_message.call_count, 0) | [
9,
2941,
69,
830,
35
] |
def METHOD_NAME(self, clientid, groupids):
q1 = """delete from GROUPS where clientid = %s"""
self.c.execute(q1, (clientid,))
if len(groupids) == 0:
return
q2 = """insert into GROUPS (clientid, groupid) values (%s, %s)"""
for id in groupids:
self.c.execute(q2, (clientid, id)) | [
1971,
846
] |
def METHOD_NAME(self):
self.assertIsInstance(
frame.decode_frame(self.CONTENT_HEADER)[1], frame.Header) | [
1268,
572,
896,
89,
9
] |
def METHOD_NAME(
self,
path: str,
labels: typing.Optional[typing.Dict] = None,
*,
default: typing.Optional[float] = None,
) -> float:
"""
Returns a single metric value at specified path. If a dict of labels
is provided, does en exact match of labels (i.e. {} stands for no
labels; {'a': 'b', 'c': 'd'} matches only {'a': 'b', 'c': 'd'} or
{'c': 'd', 'a': 'b'} but neither match {'a': 'b'} nor
{'a': 'b', 'c': 'd', 'e': 'f'}).
@throws AssertionError if not one metric by path
"""
entry = self.get(path, set())
assert (
entry or default is not None
), f'No metrics found by path "{path}"'
if labels is not None:
entry = {x for x in entry if x.labels == labels}
assert (
entry or default is not None
), f'No metrics found by path "{path}" and labels {labels}'
assert len(entry) <= 1, (
f'Multiple metrics found by path "{path}" and labels {labels}:'
f' {entry}'
)
else:
assert (
len(entry) <= 1
), f'Multiple metrics found by path "{path}": {entry}'
if default is not None and not entry:
return default
return next(iter(entry)).value | [
99,
1541
] |
def METHOD_NAME(t: torch.Tensor, prev_version: int) -> None: ... | [
7035,
0,
281,
2469
] |
def METHOD_NAME():
mosaic_grid = EOxSRectifiedGrid(
srid = 3035,
low = (0, 0),
high = (1492, 899),
origin = (4208500.0, 2948000.0),
offsets = ((500.0, 0.0), (0.0, -500.0)),
)
resample(
mosaic_grid.srid, mosaic_grid.origin, mosaic_grid.offsets,
"image2009", "image2009_mosaic_resampled", "*.tiff"
) | [
4182,
-1
] |
def METHOD_NAME():
# image with range [0.9, 0.91]
image = np.random.random((100, 100)) * 0.01 + 0.9
contours = find_contours(image) # use default level
# many contours should be found
assert len(contours) > 1 | [
9,
33,
235
] |
def METHOD_NAME(self):
if evalOnFirstUpdate.value:
self._Setup() | [
69,
865,
86
] |
def METHOD_NAME():
@op(out=Out(typing.Dict[str, str]))
def emit_dict():
return {"foo": 1}
with pytest.raises(DagsterTypeCheckDidNotPass):
wrap_op_in_graph_and_execute(emit_dict) | [
9,
756,
4703,
6973,
2445,
99,
909
] |
def METHOD_NAME(self):
shutil.rmtree(self.tmp_dir)
super().METHOD_NAME() | [
531,
481
] |
def METHOD_NAME(self):
"""
Runs an embeddings upsert operation for previously batched documents.
"""
self.execute("get", "upsert") | [
5592
] |
def METHOD_NAME(self, source, action_type="person-update"):
if not self.edits_made:
# No-op in this case
return self.person
with transaction.atomic():
metadata = self.get_change_metadata_for_bot(source)
self.person.record_version(metadata)
self.person.METHOD_NAME()
existing_action = LoggedAction.objects.filter(
popit_person_new_version=metadata["version_id"]
)
if not existing_action.exists():
LoggedAction.objects.create(
user=self.user,
person=self.person,
action_type=action_type,
ip_address=None,
popit_person_new_version=metadata["version_id"],
source=metadata["information_source"],
)
self.person.invalidate_identifier_cache()
return self.person | [
73
] |
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StackMemoryStats, dict):
for key, value in self.items():
result[key] = value
return result | [
24,
553
] |
def METHOD_NAME(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
else:
data = self.request.data
if len(data['warehouse_name']) > 45:
raise APIException({"detail": "The warehouse name is set to more than 45 characters"})
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers) | [
86
] |
def METHOD_NAME(self):
"Read/write access the protected node extent field member"
return "Field<%(Dimension)s, Vector>&" | [
1089,
1716,
7862,
101
] |
def METHOD_NAME():
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '4x1',
'PadStart': 'Nx0',
'PadEnd' : 'Nx0',
})
log.debug(conv.printUsage(z))
e= {'n': 1, 'c': 4, 'h': 12, 'w': 8, 'k': 1, 'p': 2 , 'p_': 3}
ec = ConvProblem(e, conv)
assert (ec.zeroPadA == [[1,4, 2, 3]]) | [
9,
2459,
-1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.