text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(xml_file):
"""Parse the xml file to create types, scaling factor types, and scales."""
tree = ElementTree()
tree.parse(xml_file)
for param in tree.find("parameters"):
VARIABLES[param.get("name")] = param.get("value")
types_scales = {}
for prod in tree.find("product"):
ascii = (prod.tag in ["mphr", "sphr"])
res = []
for i in prod:
lres = CASES[i.tag](i, ascii)
if lres is not None:
res.append(lres)
types_scales[(prod.tag, int(prod.get("subclass")))] = res
types = {}
stypes = {}
scales = {}
for key, val in types_scales.items():
types[key] = to_dtype(val)
stypes[key] = to_scaled_dtype(val)
scales[key] = to_scales(val)
return types, stypes, scales | [
214,
275
] |
def METHOD_NAME(self):
self.assertRaises(TypeError, copyreg.pickle,
type(1), int, "not a callable") | [
9,
17095,
2821
] |
def METHOD_NAME(self):
store = self.StoreClass()
# Initial value is None
assert store.gettargetlanguage() is None
# sourcelanguage shouldn't change the targetlanguage
store.setsourcelanguage("en")
assert store.gettargetlanguage() is None
# targetlanguage setter works correctly
store.settargetlanguage("de")
assert store.gettargetlanguage() == "de"
# explicit targetlanguage wins over filename
store.filename = "Project/it.lproj/Localizable.stringsdict"
assert store.gettargetlanguage() == "de" | [
9,
17134,
235,
-1
] |
def METHOD_NAME(unit, axis):
"""Set the axis label based on unit
Parameters
----------
unit : Unit object, string, or tuple
This parameter comes from unyt_arrayConverter.default_units() or from
user code such as Axes.plot(), Axis.set_units(), etc. In user code, it
is possible to convert the plotted units by specifing the new unit as
a string, such as "ms", or as a tuple, such as ("J", "thermal")
following the call signature of unyt_array.convert_to_units().
axis : Axis object
Returns
-------
AxisInfo object with the label formatted as in-line math latex
"""
if isinstance(unit, tuple):
unit = unit[0]
unit_obj = unit if isinstance(unit, Unit) else Unit(unit)
name = unyt_arrayConverter._axisnames.get(axis, "")
if unit_obj.is_dimensionless:
label = name
else:
name += " "
unit_str = unit_obj.latex_representation()
if unyt_arrayConverter._labelstyle == "[]":
label = name + "$\\left[" + unit_str + "\\right]$"
elif unyt_arrayConverter._labelstyle == "/":
axsym = "$q_{\\rm" + axis.axis_name + "}$"
name = axsym if name == " " else name
if "/" in unit_str:
label = name + "$\\;/\\;\\left(" + unit_str + "\\right)$"
else:
label = name + "$\\;/\\;" + unit_str + "$"
else:
label = name + "$\\left(" + unit_str + "\\right)$"
return AxisInfo(label=label.strip()) | [
6314
] |
def METHOD_NAME(monkeypatch):
monkeypatch.setenv("GIT_AUTHOR_NAME", "John Doe")
monkeypatch.setenv("GIT_AUTHOR_EMAIL", "[email protected]")
assert info.is_git_configured() | [
9,
1493,
137,
4196,
2499,
485,
1659
] |
def METHOD_NAME(self):
"""Testing a valid file"""
self.assertNotEqual(utils.loadModule(Path("../depthai_sdk/utils.py")), None) | [
9,
557,
16860
] |
def METHOD_NAME(self):
... | [
962,
768
] |
def METHOD_NAME(used_numexpr: bool) -> None:
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr) | [
1308,
9,
1571
] |
def METHOD_NAME(self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None | [
15,
1881
] |
def METHOD_NAME(parent_key, parent_chain_code, i):
""" Derives a child key from an existing key, i is current derivation parameter.
Logic adapted from https://github.com/satoshilabs/slips/blob/master/slip-0010/testvectors.py. """
assert len(parent_key) == 32
assert len(parent_chain_code) == 32
k = parent_chain_code
if (i & BIP32_PRIVDEV) != 0:
key = b'\x00' + parent_key
else:
key = derive_public_key(parent_key)
d = key + struct.pack('>L', i)
while True:
h = hmac.new(k, d, hashlib.sha512).digest()
key, chain_code = h[:32], h[32:]
a = int.from_bytes(key, byteorder='big')
b = int.from_bytes(parent_key, byteorder='big')
key = (a + b) % BIP32_CURVE.order
if a < BIP32_CURVE.order and key != 0:
key = int(key).to_bytes(32, byteorder='big')
break
d = b'\x01' + h[32:] + struct.pack('>L', i)
return key, chain_code | [
229,
-1
] |
def METHOD_NAME(packer, es_lkas_state_msg, enabled, visual_alert, left_line, right_line, left_lane_depart, right_lane_depart):
values = {s: es_lkas_state_msg[s] for s in [
"CHECKSUM",
"COUNTER",
"LKAS_Alert_Msg",
"Signal1",
"LKAS_ACTIVE",
"LKAS_Dash_State",
"Signal2",
"Backward_Speed_Limit_Menu",
"LKAS_Left_Line_Enable",
"LKAS_Left_Line_Light_Blink",
"LKAS_Right_Line_Enable",
"LKAS_Right_Line_Light_Blink",
"LKAS_Left_Line_Visible",
"LKAS_Right_Line_Visible",
"LKAS_Alert",
"Signal3",
]}
# Filter the stock LKAS "Keep hands on wheel" alert
if values["LKAS_Alert_Msg"] == 1:
values["LKAS_Alert_Msg"] = 0
# Filter the stock LKAS sending an audible alert when it turns off LKAS
if values["LKAS_Alert"] == 27:
values["LKAS_Alert"] = 0
# Filter the stock LKAS sending an audible alert when "Keep hands on wheel" alert is active (2020+ models)
if values["LKAS_Alert"] == 28 and values["LKAS_Alert_Msg"] == 7:
values["LKAS_Alert"] = 0
# Filter the stock LKAS sending an audible alert when "Keep hands on wheel OFF" alert is active (2020+ models)
if values["LKAS_Alert"] == 30:
values["LKAS_Alert"] = 0
# Filter the stock LKAS sending "Keep hands on wheel OFF" alert (2020+ models)
if values["LKAS_Alert_Msg"] == 7:
values["LKAS_Alert_Msg"] = 0
# Show Keep hands on wheel alert for openpilot steerRequired alert
if visual_alert == VisualAlert.steerRequired:
values["LKAS_Alert_Msg"] = 1
# Ensure we don't overwrite potentially more important alerts from stock (e.g. FCW)
if visual_alert == VisualAlert.ldw and values["LKAS_Alert"] == 0:
if left_lane_depart:
values["LKAS_Alert"] = 12 # Left lane departure dash alert
elif right_lane_depart:
values["LKAS_Alert"] = 11 # Right lane departure dash alert
values["LKAS_ACTIVE"] = 1 # Show LKAS lane lines
values["LKAS_Dash_State"] = 2 if enabled else 0 # Green enabled indicator
values["LKAS_Left_Line_Visible"] = int(left_line)
values["LKAS_Right_Line_Visible"] = int(right_line)
return packer.make_can_msg("ES_LKAS_State", CanBus.main, values) | [
129,
2752,
2542,
551
] |
def METHOD_NAME(data_ds, aug_type, num_aug=10, percent=0.1, aug_base="mlm", example_keys=None):
"""
Extend train dataset with augmentation.
"""
if example_keys is None:
return data_ds
if aug_type is None or aug_type == "None":
return data_ds
if aug_type == "delete":
aug = WordDelete(create_n=num_aug, aug_percent=percent)
elif aug_type == "substitute":
aug = WordSubstitute(aug_base, create_n=num_aug, aug_percent=percent)
elif aug_type == "insert":
aug = WordInsert(aug_base, create_n=num_aug, aug_percent=percent)
elif aug_type == "swap":
aug = WordSwap(create_n=num_aug, aug_percent=percent)
else:
raise ValueError("Unsupported data augment strategy `{}`".format(aug_type))
aug_data = []
for example in data_ds:
for key in example_keys:
text_aug = aug.augment(example[key])
for text in text_aug:
new_example = example.copy()
example[key] = text
aug_data.append(new_example)
data_ds = MapDataset([x for x in data_ds] + aug_data)
return data_ds | [
978,
41,
365,
3725
] |
def METHOD_NAME(args):
dicom_dataset = pydicom.read_file(args.input_file, force=True)
new_dicom_dataset = adjust_machine_name(dicom_dataset, args.new_machine_name)
pydicom.write_file(args.output_file, new_dicom_dataset) | [
270,
1600,
156,
615
] |
def METHOD_NAME(self, *args, **options):
migrator = ServiceMigrator()
# make sure we have our new list of services
created_service_list = migrator.create_provider_service_list()
self.stdout.write(
self.style.SUCCESS(
"Created our updated list of services"
f"Created {len(created_service_list)} new services"
)
)
# Migrate these services using the appropriate mappings
for slug in MIGRATION_DICT.keys():
migrator.migrate_service_tags_to_service(slug, MIGRATION_DICT[slug])
# remove previous tags
for service_tag_id in SERVICES_TO_DELETE:
tag_models.Tag.objects.get(id=service_tag_id).delete()
self.stdout.write(self.style.SUCCESS("All done!")) | [
276
] |
def METHOD_NAME(x, w):
new_data = 0
if isinstance(x, SparseVector):
for idx, v in x.get_all_data():
# if idx < len(w):
new_data += v * w[idx]
else:
new_data = np.dot(x, w)
return new_data | [
3203,
1903
] |
def METHOD_NAME(v: type) -> TypeGuard[type[IDTypeSubclass]]:
return IDTypeSubclassHint.is_bearable(v) | [
137,
147,
44,
9260
] |
def METHOD_NAME(self) -> None:
if not self._is_upload_required():
return
datetime_portion = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
s3_folder_location = "{base_directory}/{datetime_portion}".format(
base_directory=self.base_amundsen_data_path,
datetime_portion=datetime_portion,
)
self.upload_files(s3_folder_location)
bulk_upload_response = self.neptune_api_client.load(
s3_object_key=s3_folder_location,
failOnError=self.fail_on_error
)
try:
load_id = bulk_upload_response['payload']['loadId']
except KeyError:
raise Exception("Failed to load csv. Response: {0}".format(str(bulk_upload_response)))
load_status = "LOAD_NOT_STARTED"
all_errors: List[NeptuneBulkLoaderLoadStatusErrorLogEntry] = []
while load_status in ("LOAD_IN_PROGRESS", "LOAD_NOT_STARTED", "LOAD_IN_QUEUE"):
time.sleep(self.status_polling_period)
load_status, errors = self._poll_status(load_id)
all_errors.extend(errors)
for error in all_errors:
exception_message = """
Error Code: {error_code}
Error Message: {error_message}
Failed File: {s3_path}
""".format(
error_code=error.get('errorCode'),
error_message=error.get('errorMessage'),
s3_path=error.get('fileName')
)
LOGGER.exception(exception_message) | [
2411,
2581
] |
def METHOD_NAME(self):
"""The name of the code in human-readable form"""
return self.name.replace('_', ' ').title() | [
156,
10730
] |
def METHOD_NAME(self) -> str:
"""
Status of address validation
"""
return pulumi.get(self, "address_validation_status") | [
85,
437,
452
] |
def METHOD_NAME(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(data.cpu().METHOD_NAME(mean=0.0, std=0.02).to(data.device)) | [
1576
] |
def METHOD_NAME(self):
"""
close the window containing this panel
"""
self.close() | [
69,
1462
] |
def METHOD_NAME(volume, _):
verts, faces, normals, values = marching_cubes_lewiner(volume, 1)
faces = correct_mesh_orientation(volume, verts, faces)
return verts, None, faces | [
10246
] |
METHOD_NAME(self) -> None: | [
0,
1
] |
def METHOD_NAME(ctx: AnalysisContext, rpm: Artifact, href: str) -> Artifact:
out = ctx.actions.declare_output("xml.json")
ctx.actions.run(
cmd_args(
ctx.attrs.makechunk[RunInfo],
cmd_args(rpm, format = "--rpm={}"),
cmd_args(out.as_output(), format = "--out={}"),
"--href={}".format(href),
),
category = "makexml",
)
return out | [
93,
399
] |
async def METHOD_NAME(
domain_with_response_ids: Domain,
default_tracker: DialogueStateTracker,
mock_nlg_endpoint: MagicMock,
) -> None:
callback_nlg = CallbackNaturalLanguageGenerator(mock_nlg_endpoint)
output_channel = CollectingOutputChannel()
events = await ActionBotResponse("utter_multiple_ids").run(
output_channel, callback_nlg, default_tracker, domain_with_response_ids
)
expected_body = nlg_request_format(
"utter_multiple_ids",
default_tracker,
output_channel.name(),
response_id="2",
)
mock_nlg_endpoint.request.assert_called_once_with(
method="post", json=expected_body, timeout=DEFAULT_REQUEST_TIMEOUT
)
assert len(events) == 1
assert events[0].metadata == {"utter_action": "utter_multiple_ids"} | [
9,
1006,
1227,
17,
1076,
41,
107
] |
def METHOD_NAME():
utils.fix_output_encoding()
settings = ComicTaggerSettings()
# this can only work with files with ComicRack tags
style = MetaDataStyle.CIX
if len(sys.argv) < 2:
print >> sys.stderr, "Usage: {0} [comic_folder]".format(sys.argv[0])
return
filelist = utils.get_recursive_filelist(sys.argv[1:])
# first make a list of all comic archive files
comics_list = []
max_name_len = 2
fmt_str = u"{{0:{0}}}".format(max_name_len)
for filename in filelist:
ca = ComicArchive(filename, settings.rar_exe_path)
if ca.seemsToBeAComicArchive():
# Check the images in the file, see if we need to reduce any
for idx in range(ca.getNumberOfPages()):
in_data = ca.getPage(idx)
if in_data is not None:
try:
im = Image.open(StringIO.StringIO(in_data))
w, h = im.size
if h > max_height:
comics_list.append(ca)
max_name_len = max(max_name_len, len(filename))
fmt_str = u"{{0:{0}}}".format(max_name_len)
print >> sys.stderr, fmt_str.format(filename) + "\r",
sys.stderr.flush()
break
except IOError:
# doesn't appear to be an image
pass
print >> sys.stderr, fmt_str.format("") + "\r",
print "--------------------------------------------------------------------------"
print "Found {0} comics with over-large pages".format(len(comics_list))
print "--------------------------------------------------------------------------"
for item in comics_list:
print item.path
# now actually process those files with over-large pages
for ca in comics_list:
filename = ca.path
curr_folder = os.path.dirname(filename)
curr_subfolder = os.path.join(curr_folder, subfolder_name)
# skip any of our generated subfolders...
if os.path.basename(curr_folder) == subfolder_name:
continue
sys.stdout.write("Processing: " + filename)
# verify that we can write to current folder
if not os.access(filename, os.W_OK):
print "Can't move: {0}: skipped!".format(filename)
continue
if not os.path.exists(curr_subfolder) and not os.access(curr_folder, os.W_OK):
print "Can't create subfolder here: {0}: skipped!".format(filename)
continue
if not os.path.exists(curr_subfolder):
os.mkdir(curr_subfolder)
if not os.access(curr_subfolder, os.W_OK):
print "Can't write to the subfolder here: {0}: skipped!".format(filename)
continue
# generate a new file with temp name
tmp_fd, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
os.close(tmp_fd)
cix_md = None
if ca.hasCIX():
cix_md = ca.readCIX()
try:
zout = zipfile.ZipFile(tmp_name, "w")
# Check the images in the file, see if we want to reduce them
page_count = ca.getNumberOfPages()
for idx in range(ca.getNumberOfPages()):
name = ca.getPageName(idx)
in_data = ca.getPage(idx)
out_data = in_data
if in_data is not None:
try:
im = Image.open(StringIO.StringIO(in_data))
w, h = im.size
if h > max_height:
# resize the image
hpercent = max_height / float(h)
wsize = int((float(w) * float(hpercent)))
size = (wsize, max_height)
im = im.resize(size, Image.ANTIALIAS)
output = StringIO.StringIO()
im.save(output, format="JPEG", quality=85)
out_data = output.getvalue()
output.close()
except IOError:
# doesn't appear to be an image
pass
else:
# page is empty?? nothing to write
out_data = ""
sys.stdout.write(".")
sys.stdout.flush()
# write out the new resized image
zout.writestr(name, out_data)
# preserve the old comment
comment = ca.archiver.getArchiveComment()
if comment is not None:
zout.comment = ca.archiver.getArchiveComment()
except Exception as e:
print "Failure creating new archive: {0}!".format(filename)
print e, sys.exc_info()[0]
zout.close()
os.unlink(tmp_name)
else:
zout.close()
# Success! Now move the files
shutil.move(filename, curr_subfolder)
os.rename(tmp_name, filename)
# TODO: We might have converted a rar to a zip, and should probably change
# the extension, as needed.
print "Done!".format(filename)
# Create a new archive object for the new file, and write the old
# CIX data, w/o page info
if cix_md is not None:
ca = ComicArchive(filename, settings.rar_exe_path)
cix_md.pages = []
ca.writeCIX(cix_md) | [
57
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
f METHOD_NAME(self, experience): | [
86,
6171,
664
] |
def METHOD_NAME(
algorithm_io_image, settings, django_capture_on_commit_callbacks
):
# Override the celery settings
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
alg = AlgorithmFactory()
image = AlgorithmImageFactory(
algorithm=alg,
is_manifest_valid=True,
image__from_path=algorithm_io_image,
)
assert not image.is_in_registry
with django_capture_on_commit_callbacks(execute=True):
upload_to_registry_and_sagemaker(
pk=image.pk,
app_label=image._meta.app_label,
model_name=image._meta.model_name,
mark_as_desired=False,
)
image = AlgorithmImage.objects.get(pk=image.pk)
assert image.is_in_registry
assert not image.is_desired_version
with django_capture_on_commit_callbacks(execute=True):
upload_to_registry_and_sagemaker(
pk=image.pk,
app_label=image._meta.app_label,
model_name=image._meta.model_name,
mark_as_desired=True,
)
image = AlgorithmImage.objects.get(pk=image.pk)
assert image.is_in_registry
assert image.is_desired_version | [
9,
172,
24,
510,
61,
9638
] |
def METHOD_NAME(assert_pixels):
assert_pixels('''
____
____
____
____
''', '''
<style>
@page { size: 4px 4px }
svg { display: block }
</style>
<svg width="4px" height="4px" xmlns="http://www.w3.org/2000/svg">
<That’s bad!
</svg>
''') | [
9,
660,
10129,
909
] |
def METHOD_NAME(A: dace.int64, B: dace.int64[5, 5]):
return A > B | [
9,
18009
] |
def METHOD_NAME(c):
for i in c:
print("%02x" % i8(i), end=" ")
print() | [
278
] |
def METHOD_NAME(checkpoint_path):
sd = torch.load(checkpoint_path, map_location="cpu")
return sd | [
557,
551,
553
] |
def METHOD_NAME(self):
"""
Ensure that the FlowTest can raise an exception via the screen_return_value.
"""
# A generic Exception should be caught by the Controller and routed to the
# UnhandledExceptionView.
self.run_sequence([
FlowStep(MainMenuView, screen_return_value=Exception("Test exception")),
FlowStep(UnhandledExceptionView),
]) | [
9,
241,
442,
2499,
5099,
1413,
99
] |
def METHOD_NAME(self):
for line in self.pipe.stdout:
print(line.decode("utf-8").strip()) | [
22
] |
def METHOD_NAME(self):
self.assertFalse(self.test_table.show_table_style_last_column)
self.test_table.show_table_style_last_column = True
self.assertTrue(self.test_table.show_table_style_last_column)
self.test_table.show_table_style_last_column = False | [
9,
697,
410,
641,
679,
105
] |
def METHOD_NAME(code, filename):
return os.path.join(code, os.path.basename(filename)) | [
3095,
2851,
171
] |
def METHOD_NAME(self):
x = java.util.HashMap()
x.put('a', 1)
x.put('b', 2)
x.put('c', 3)
x.put((1,2), "xyz")
y = dict(x)
self.assertEqual(set(y.items()), set([('a', 1), ('b', 2), ('c', 3), ((1,2), "xyz")])) | [
9,
17871
] |
def METHOD_NAME(self):
meta_info = dict(
img_size=[256, 256],
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
det_data_sample = DetDataSample(metainfo=meta_info)
assert 'img_size' in det_data_sample
assert det_data_sample.img_size == [256, 256]
assert det_data_sample.get('img_size') == [256, 256] | [
9,
176
] |
def METHOD_NAME(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""Validates the configuration for the Expectation.
For `expect_column_values_to_match_like_pattern`
we require that the `configuraton.kwargs` contain a `like_pattern_list` key that is either a `list` or `dict`.
Args:
configuration: The ExpectationConfiguration to be validated.
Raises:
InvalidExpectationConfigurationError: The configuraton does not contain the values required by the Expectation
"""
super().METHOD_NAME(configuration)
configuration = configuration or self.configuration
try:
assert (
"like_pattern_list" in configuration.kwargs
), "Must provide like_pattern_list"
assert isinstance(
configuration.kwargs.get("like_pattern_list"), (list, dict)
), "like_pattern_list must be a list"
assert isinstance(configuration.kwargs.get("like_pattern_list"), dict) or (
len(configuration.kwargs.get("like_pattern_list")) > 0
), "At least one like_pattern must be supplied in the like_pattern_list."
if isinstance(configuration.kwargs.get("like_pattern_list"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern_list"
), 'Evaluation Parameter dict for like_pattern_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e)) | [
187,
830
] |
def METHOD_NAME(self):
result = LookupMethods().property_icontains("Abc", "a")
self.assertTrue(result) | [
9,
8115
] |
def METHOD_NAME(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
} | [
4752,
61,
4753
] |
def METHOD_NAME(self) -> None:
run_result = job_run_result.JobRunResult(stdout='abc', stderr='123')
self.assertEqual(
repr(run_result), 'JobRunResult(stdout="abc", stderr="123")') | [
9,
92
] |
def METHOD_NAME(
self,
num_params=41,
dtypes=[torch.float32, torch.float16, torch.float64],
devices=['cuda', 'cpu'],
max_norm=0.54321,
norm_type=2.0,
rtol=1e-3,
atol=1e-20,
):
"""Make sure PyTorch and Apex gradient clipping produce same results"""
# Construct identical sets of parameters
torch_params, apex_params = make_params(
num_params,
dtypes=dtypes,
devices=devices,
make_copy=True,
)
# Apply gradient clipping
torch_norm = torch.nn.utils.clip_grad_norm_(
torch_params,
max_norm,
norm_type=norm_type,
)
apex_norm = clip_grad_norm_(
apex_params,
max_norm,
norm_type=norm_type,
)
# Make sure PyTorch and Apex get same results
torch.testing.assert_close(
apex_norm, torch_norm,
rtol=rtol,
atol=atol,
check_dtype=False,
)
for torch_p, apex_p in zip(torch_params, apex_params):
torch.testing.assert_close(
apex_p, torch_p,
rtol=0,
atol=0,
) # Params should be unaffected
torch.testing.assert_close(
apex_p.grad, torch_p.grad,
rtol=rtol,
atol=atol,
) | [
9,
855,
3299
] |
def METHOD_NAME(self):
self.fp.METHOD_NAME() | [
1462
] |
def METHOD_NAME(query: str) -> Dict:
data = json.dumps({
'query': query
})
r = requests.post(GH_GQL_ENDPOINT, data, headers=HEADERS)
return r.json() | [
74,
3776
] |
def METHOD_NAME(refresh=False, *args, **kwargs):
"""
The tera base model on 960hr
refresh (bool): whether to download ckpt/config again if existed
"""
return tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1(refresh, *args, **kwargs) | [
6336,
-1
] |
f METHOD_NAME(self, np_window_fn, tf_window_fn): | [
979,
1092,
5685
] |
def METHOD_NAME(self):
tt = self.res.t_test(np.eye(len(self.res.params)))
from scipy import stats
pvalue = stats.norm.sf(np.abs(tt.tvalue)) * 2
assert_almost_equal(tt.tvalue, self.res.tvalues, DEC)
assert_almost_equal(pvalue, self.res.pvalues, DEC) | [
9,
4883
] |
def METHOD_NAME(self):
"""
Clears the screen. All pixels on the screen will be set to Color.WHITE.
"""
... | [
537
] |
def METHOD_NAME(self, webpage, video_id, name, is_live):
window_stores = self._extract_pagestore(webpage, video_id)
movie_stores = [
# extract all three important data (most of data are duplicated each other, but slightly different!)
traverse_obj(window_stores, ('v8', 'state', 'movie'), expected_type=dict),
traverse_obj(window_stores, ('v8', 'movie'), expected_type=dict),
traverse_obj(window_stores, 'movieStore', expected_type=dict),
]
if not any(movie_stores):
raise ExtractorError(f'Failed to extract {name} info')
formats = list(self._expand_media(video_id, get_first(movie_stores, 'media')))
if not formats:
# archived livestreams or subscriber-only videos
cookies = self._get_cookies('https://www.openrec.tv/')
detail = self._download_json(
f'https://apiv5.openrec.tv/api/v5/movies/{video_id}/detail', video_id,
headers={
'Origin': 'https://www.openrec.tv',
'Referer': 'https://www.openrec.tv/',
'access-token': try_get(cookies, lambda x: x.get('access_token').value),
'uuid': try_get(cookies, lambda x: x.get('uuid').value),
})
new_media = traverse_obj(detail, ('data', 'items', ..., 'media'), get_all=False)
formats = list(self._expand_media(video_id, new_media))
is_live = False
return {
'id': video_id,
'title': get_first(movie_stores, 'title'),
'description': get_first(movie_stores, 'introduction'),
'thumbnail': get_first(movie_stores, 'thumbnailUrl'),
'formats': formats,
'uploader': get_first(movie_stores, ('channel', 'user', 'name')),
'uploader_id': get_first(movie_stores, ('channel', 'user', 'id')),
'timestamp': int_or_none(get_first(movie_stores, ['publishedAt', 'time']), scale=1000) or unified_timestamp(get_first(movie_stores, 'publishedAt')),
'is_live': is_live,
} | [
297,
1786
] |
def METHOD_NAME(self, ttype):
"""
Set the type of p grid generator.
"""
if ttype == TYPE_UNIFORM or ttype == TYPE_BIUNIFORM:
self.type = ttype
else:
raise DREAMException("PGrid {}: Unrecognized grid type specified: {}.".format(self.name, self.type)) | [
0,
44
] |
def METHOD_NAME(payload, serialized_event_request):
request = EventRequest.deserialize(serialized_event_request)
metadata = EventMetadata(request=request)
event = InventoryCleanupFinished(metadata, {"cleanup": payload})
event.post() | [
72,
950,
1756,
417
] |
def METHOD_NAME(self, pred, bitmap, dest_width, dest_height):
"""
bitmap: single map with shape (H, W), whose values are binarized as {0, 1}
"""
assert len(bitmap.shape) == 2
height, width = bitmap.shape
contours, _ = cv2.findContours(
(bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
num_contours = min(len(contours), self.max_candidates)
boxes = np.zeros((num_contours, 4, 2), dtype=np.int16)
scores = np.zeros((num_contours,), dtype=np.float32)
for index in range(num_contours):
contour = contours[index].squeeze(1)
points, sside = self.get_mini_boxes(contour)
if sside < self.min_size:
continue
points = np.array(points)
score = self.box_score_fast(pred, contour)
if self.box_thresh > score:
continue
box = self.unclip(
points, unclip_ratio=self.unclip_ratio).reshape(-1, 1, 2)
box, sside = self.get_mini_boxes(box)
if sside < self.min_size + 2:
continue
box = np.array(box)
if not isinstance(dest_width, int):
dest_width = dest_width.item()
dest_height = dest_height.item()
box[:, 0] = np.clip(
np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(
np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes[index, :, :] = box.astype(np.int16)
scores[index] = score
return boxes, scores | [
2877,
280,
5651
] |
def METHOD_NAME(upgrader, pooled_clone_sequencing_not_control):
value = upgrader.upgrade('functional_characterization_experiment', pooled_clone_sequencing_not_control, current_version='9', target_version='10')
assert value['schema_version'] == '10'
assert value['control_type'] == "control" | [
9,
4167,
2037,
2355,
738,
1136,
24
] |
def METHOD_NAME(self):
"""
:return: rms value of background noise
"""
if self._background_rms is None:
if self._noise_map is None:
raise ValueError(
"rms background value as 'background_rms' not specified!"
)
self._background_rms = np.median(self._noise_map)
return self._background_rms | [
2272,
6984
] |
def METHOD_NAME(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events)) | [
651
] |
def METHOD_NAME(self, task):
"""
Job End notification
"""
print("Job ended")
return | [
202,
1798
] |
f METHOD_NAME(self, lr, initial_period_steps, t_mul, iters): | [
19,
16083,
199
] |
def METHOD_NAME():
b64art = create_artifact("http://sp.example.com/saml.xml", b"aabbccddeeffgghhiijj")
art = base64.b64decode(b64art.encode("ascii"))
assert art[:2] == ARTIFACT_TYPECODE
assert int(art[2:4]) == 0
s = sha1(b"http://sp.example.com/saml.xml")
assert art[4:24] == s.digest() | [
9,
129,
1831
] |
def METHOD_NAME(self, get_components):
DG = nx.path_graph(5, create_using=nx.DiGraph)
G = nx.disjoint_union(DG, DG)
seen = set()
for component in get_components(G):
assert len(seen & component) == 0
seen.update(component)
component.clear() | [
9,
2261,
9901
] |
def METHOD_NAME(self):
logger.debug("Reading discover data from %s.", self.path)
try:
fo = open(self.path, 'rb')
except IOError as e:
logger.debug("Failed to read manifest: %s.", e)
return self.data
with fo:
self.json = fo.METHOD_NAME()
self.etag = self.file_etag = compute_etag(self.json)
try:
data = json.loads(self.json.decode('utf-8'))
except json.JSONDecodeError as e:
raise UserError("Malformed manifest: %s" % e)
if not isinstance(data, dict):
raise UserError("Malformed manifest: not a mapping")
self.data.update(data)
self.mtime = os.stat(self.path).st_mtime
return self.data | [
203
] |
def METHOD_NAME(self):
app = web.Application()
app.on_response_prepare.append(set_default_headers)
app.add_routes([
web.get('/json', self.json),
web.get('/overview', self.overview),
web.get('/', self.index)
])
return app | [
129,
991
] |
def METHOD_NAME(test_case, input_dtype, device):
sorted_sequence_1d = flow.tensor(
np.array([1, 3, 5, 7, 9]), dtype=input_dtype, device=flow.device(device)
)
values = flow.tensor(
np.array([3, 6, 9]), dtype=input_dtype, device=flow.device(device)
)
gt = np.array([1, 3, 4])
output = flow.searchsorted(sorted_sequence_1d, values)
test_case.assertTrue(np.allclose(output.numpy(), gt, 0.0001, 0.0001))
test_case.assertTrue(output.dtype == flow.int64) | [
9,
1070,
1389,
988
] |
def METHOD_NAME(self, datacollection_id):
try:
process_dir = os.path.join(self.xds_directory, "..")
raw_process_dir = os.path.join(self.raw_data_input_file_dir, "..")
for dir in (process_dir, raw_process_dir):
for filename in ("x_geo_corr.cbf.bz2", "y_geo_corr.cbf.bz2"):
dest = os.path.join(dir, filename)
if os.path.exists(dest):
continue
shutil.copyfile(
os.path.join("/data/id29/inhouse/opid291", filename), dest
)
except Exception:
logging.exception("Exception happened while copying geo_corr files")
return ESRFMultiCollect.METHOD_NAME(self, datacollection_id) | [
77,
362,
1537
] |
def METHOD_NAME(opts, flgs):
TMPVECT = []
DEBUG = True if flgs["d"] else False
atexit.register(cleanup, vect=TMPVECT, debug=DEBUG)
# check input maps
rhydro = ["kind_label", "discharge", "id_point", "id_plant"]
rother = ["kind_label", "discharge", "id_point", "id_plant"]
ovwr = overwrite()
try:
hydro = check_required_columns(
opts["hydro"], int(opts["hydro_layer"]), rhydro, "hydro"
)
if opts["other"]:
other = check_required_columns(
opts["other"], opts["other_layer"], rother, "other"
)
else:
other = None
# minflow = check_float_or_raster(opts['minflow'])
except ParameterError as exc:
exception2error(exc)
# start working
hydro.open("r")
el, mset = (
opts["elevation"].split("@")
if "@" in opts["elevation"]
else (opts["elevation"], "")
)
elev = RasterRow(name=el, mapset=mset)
elev.open("r")
# import ipdb; ipdb.set_trace()
plants, skipped = read_plants(
hydro,
elev=elev,
restitution=opts["hydro_kind_turbine"],
intake=opts["hydro_kind_intake"],
)
hydro.close()
rvname, rvmset = (
opts["river"].split("@") if "@" in opts["river"] else (opts["river"], "")
)
vplants = opts["output_plants"] if opts["output_plants"] else "tmpplants"
# FIXME: I try with tmpplants in my mapset and it doesn'work
if opts["output_plants"] == "":
TMPVECT.append(vplants)
with VectorTopo(rvname, rvmset, mode="r") as river:
write_plants(plants, vplants, river, elev, overwrite=ovwr)
if skipped:
for skip in skipped:
print("Plant: %r, Point: %r, kind: %r" % skip)
elev.close()
# compute a buffer around the plants
buff = vplants + "buff"
v.buffer(input=vplants, type="line", output=buff, distance=0.1, overwrite=ovwr)
TMPVECT.append(buff)
# return all the river segments that are not already with plants
v.overlay(
flags="t",
ainput=opts["river"],
atype="line",
binput=buff,
operator="not",
output=opts["output_streams"],
overwrite=ovwr,
) | [
57
] |
def METHOD_NAME(self, desc, isrange, issolvable, hasprivatekeys):
info = self.nodes[0].getdescriptorinfo(desc)
assert_equal(info, self.nodes[0].getdescriptorinfo(descsum_create(desc)))
assert_equal(info['descriptor'], descsum_create(desc))
assert_equal(info['isrange'], isrange)
assert_equal(info['issolvable'], issolvable)
assert_equal(info['hasprivatekeys'], hasprivatekeys) | [
9,
1966
] |
def METHOD_NAME(self):
for i in range(self.n_servers):
self.start_server(i)
time.sleep(1) | [
447,
3356
] |
def METHOD_NAME(driver, key, settings, env, stdout, stderr, prefix_output_with_timestamp):
def _exec_command(command, slot_info, events):
host = slot_info.hostname
local_rank = slot_info.local_rank
verbose = settings.verbose
result = rsh(driver.addresses(), key, host, command, env, local_rank, verbose,
stdout, stderr, prefix_output_with_timestamp, False, events)
return result, time.time()
return _exec_command | [
1005,
462,
667
] |
def METHOD_NAME(self, offset, magnitude):
"""
Stringify `offset` and `magnitude`.
Expects the string to be shown top/left of the value it refers to.
"""
METHOD_NAME = ""
if offset != 0.:
METHOD_NAME += self.compact_exponential(offset) + " + "
if magnitude != 1.:
METHOD_NAME += self.compact_exponential(magnitude) + " × "
return self.fix_minus(METHOD_NAME) | [
426
] |
def METHOD_NAME(self):
self.assertEqual(simple_marshal(True), ':boolean') | [
9,
6617,
863
] |
def METHOD_NAME(job, *args, in_seconds=0, **kwargs):
"""
Start job async with redis or sync if redis is not connected
:param job: Job function
:param args: Function arguments
:param in_seconds: Job will be delayed for in_seconds
:param kwargs: Function keywords arguments
:return: Job or function result
"""
redis = redis_connected() and kwargs.get('redis', True)
queue_name = kwargs.get("queue_name", "default")
if 'queue_name' in kwargs:
del kwargs['queue_name']
if 'redis' in kwargs:
del kwargs['redis']
job_timeout = None
if 'job_timeout' in kwargs:
job_timeout = kwargs['job_timeout']
del kwargs['job_timeout']
if redis:
logger.info(f"Start async job {job.__name__} on queue {queue_name}.")
queue = django_rq.get_queue(queue_name)
enqueue_method = queue.enqueue
if in_seconds > 0:
enqueue_method = partial(queue.enqueue_in, timedelta(seconds=in_seconds))
job = enqueue_method(
job,
*args,
**kwargs,
job_timeout=job_timeout
)
return job
else:
on_failure = kwargs.pop('on_failure', None)
try:
return job(*args, **kwargs)
except Exception:
exc_info = sys.exc_info()
if on_failure:
on_failure(job, *exc_info)
raise | [
447,
202,
958,
894,
164
] |
def METHOD_NAME(self):
server = ('rotate.aprs.net',
'cwop.aprs.net')[self.fixed_data['passcode'] == '-1']
# give up after 50 seconds as there'll be new data to send instead
timeout = time.time() + 50
try:
while True:
try:
with self.connect(server) as METHOD_NAME:
response = METHOD_NAME.recv(4096).decode('ASCII')
logger.debug('server software: %s', response.strip())
yield METHOD_NAME, 'OK'
break
except socket.timeout:
if time.time() > timeout:
raise
except Exception as ex:
logger.error(str(ex))
yield None, repr(ex) | [
240
] |
def METHOD_NAME(name: Union[nn.Module, str], inplace=None, **kwargs):
act_layer = get_act_layer(name)
if act_layer is None:
return None
if inplace is None:
return act_layer(**kwargs)
try:
return act_layer(inplace=inplace, **kwargs)
except TypeError:
# recover if act layer doesn't have inplace arg
return act_layer(**kwargs) | [
129,
740,
94
] |
def METHOD_NAME(self):
"""
Retrieves the hardware revision of the device
Returns:
string: Revision value of device
"""
return "N/A" | [
19,
71
] |
def METHOD_NAME(name):
"""
Finds the first matching file path within :py:attr:`files_paths`.
:param name: Name of the file
:type name: str
:return: Found file path
:rtype: str or None
"""
return current_actor().METHOD_NAME(name) | [
19,
7675,
171,
157
] |
def METHOD_NAME(self, orc):
old_orc = int(os.environ.get("DRGN_PREFER_ORC_UNWINDER", "0")) != 0
with setenv("DRGN_PREFER_ORC_UNWINDER", "1" if orc else "0"):
if orc == old_orc:
prog = self.prog
else:
prog = Program()
prog.set_kernel()
self._load_debug_info(prog)
self._test_drgn_test_kthread_trace(
prog.stack_trace(prog["drgn_test_kthread"].pid)
) | [
9,
604,
2243
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self):
no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")')
self.assertIsNone(no_backend)
simple_backend = find_backend(" if not is_tokenizers_available():")
self.assertEqual(simple_backend, "tokenizers")
backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
self.assertEqual(backend_with_underscore, "tensorflow_text")
double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):")
self.assertEqual(double_backend, "sentencepiece_and_tokenizers")
double_backend_with_underscore = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
)
self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
triple_backend = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):"
)
self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") | [
9,
416,
3127
] |
def METHOD_NAME(Run):
# -- List of Primary Datasets:
dbs_cmd = """ dbs search --query='find primds.name
where run=%d and dataset like */RAW' """ % (Run)
rows = subprocess.getoutput(dbs_cmd)
lines = rows.split("\n")
j=0
print("\nThe primary datasets for this run are: \n")
for Line in lines:
j=j+1
if j <=4:
continue
print(Line)
line=Line.split()
Datasets.append(line[0])
print(" ") | [
1379,
4146
] |
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result | [
146
] |
def METHOD_NAME(type_string):
"""
Return the `Node` sub class that corresponds to the given type string.
:param type_string: the `type` string of the node
:return: a sub class of `Node`
"""
from aiida.orm import Data, Node
from aiida.plugins.entry_point import load_entry_point
if type_string == '':
return Node
if type_string == 'data.Data.':
return Data
if not type_string.endswith('.'):
raise exceptions.DbContentError(f'The type string `{type_string}` is invalid')
try:
base_path = type_string.rsplit('.', 2)[0]
except ValueError:
raise exceptions.EntryPointError from ValueError
# This exception needs to be there to make migrations work that rely on the old type string starting with `node.`
# Since now the type strings no longer have that prefix, we simply strip it and continue with the normal logic.
if base_path.startswith('node.'):
base_path = strip_prefix(base_path, 'node.')
# Data nodes are the only ones with sub classes that are still external, so if the plugin is not available
# we fall back on the base node type
if base_path.startswith('data.'):
entry_point_name = strip_prefix(base_path, 'data.')
try:
return load_entry_point('aiida.data', entry_point_name)
except exceptions.MissingEntryPointError:
return Data
if base_path.startswith('process'):
entry_point_name = strip_prefix(base_path, 'nodes.')
return load_entry_point('aiida.node', entry_point_name)
# At this point we really have an anomalous type string. At some point, storing nodes with unresolvable type strings
# was allowed, for example by creating a sub class in a shell and then storing an instance. Attempting to load the
# node then would fail miserably. This is now no longer allowed, but we need a fallback for existing cases, which
# should be rare. We fallback on `Data` and not `Node` because bare node instances are also not storable and so the
# logic of the ORM is not well defined for a loaded instance of the base `Node` class.
warnings.warn(f'unknown type string `{type_string}`, falling back onto `Data` class') # pylint: disable=no-member
return Data | [
557,
1716,
2
] |
def METHOD_NAME(pg_repo_local):
runner = CliRunner()
# Uncheckout first
result = runner.invoke(checkout_c, [str(pg_repo_local), "-u", "-f"])
assert result.exit_code == 0
assert pg_repo_local.head is None
assert not get_engine().schema_exists(str(pg_repo_local))
result = runner.invoke(checkout_c, [str(pg_repo_local) + ":latest", "-l"])
assert result.exit_code == 0
assert pg_repo_local.head is not None
assert get_engine().schema_exists(str(pg_repo_local))
assert get_engine().get_table_type(str(pg_repo_local), "fruits") == "VIEW"
assert get_engine().get_table_type(str(pg_repo_local), WRITE_LOWER_PREFIX + "fruits") in (
"FOREIGN TABLE",
"FOREIGN",
)
assert (
get_engine().get_table_type(str(pg_repo_local), WRITE_UPPER_PREFIX + "fruits")
== "BASE TABLE"
) | [
9,
4598,
-1,
2170
] |
def METHOD_NAME(original_func, instance, args, kwargs):
# Catching the name of the operation that called make_request()
operation_name = None
# Go up the stack until we get the first non-ddtrace module
# DEV: For `lambda.list_functions()` this should be:
# - ddtrace.contrib.boto.patch
# - ddtrace.vendor.wrapt.wrappers
# - boto.awslambda.layer1 (make_request)
# - boto.awslambda.layer1 (list_functions)
# But can vary depending on Python versions; that's why we use an heuristic
frame = inspect.currentframe().f_back
operation_name = None
while frame:
if frame.f_code.co_name == "make_request":
operation_name = frame.f_back.f_code.co_name
break
frame = frame.f_back
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return original_func(*args, **kwargs)
endpoint_name = instance.host.split(".")[0]
with pin.tracer.trace(
schematize_cloud_api_operation(
"{}.command".format(endpoint_name), cloud_provider="aws", cloud_service=endpoint_name
),
service=schematize_service_name("{}.{}".format(pin.service, endpoint_name)),
span_type=SpanTypes.HTTP,
) as span:
span.set_tag(SPAN_MEASURED_KEY)
if args:
http_method = get_argument_value(args, kwargs, 0, "method")
span.resource = "%s.%s" % (endpoint_name, http_method.lower())
else:
span.resource = endpoint_name
if not config.boto["tag_no_params"] and config.boto["tag_all_params"]:
aws.add_span_arg_tags(span, endpoint_name, args, AWS_AUTH_ARGS_NAME, AWS_AUTH_TRACED_ARGS)
# Obtaining region name
region_name = _get_instance_region_name(instance)
meta = {
aws.AGENT: "boto",
aws.OPERATION: operation_name,
}
if region_name:
meta[aws.REGION] = region_name
meta[aws.AWSREGION] = region_name
span.set_tags(meta)
# Original func returns a boto.connection.HTTPResponse object
result = original_func(*args, **kwargs)
span.set_tag(http.STATUS_CODE, result.status)
span.set_tag_str(http.METHOD, result._method)
# set analytics sample rate
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.boto.get_analytics_sample_rate())
span.set_tag_str(COMPONENT, config.boto.integration_name)
# set span.kind to the type of request being performed
span.set_tag_str(SPAN_KIND, SpanKind.CLIENT)
return result | [
1265,
2433,
377
] |
def METHOD_NAME():
model = PrinterOutputModel(MagicMock())
model.peripheralsChanged = MagicMock()
peripheral = MagicMock(spec=Peripheral)
peripheral.name = "test"
peripheral2 = MagicMock(spec=Peripheral)
peripheral2.name = "test2"
model.addPeripheral(peripheral)
assert model.peripheralsChanged.emit.call_count == 1
model.addPeripheral(peripheral2)
assert model.peripheralsChanged.emit.call_count == 2
assert model.peripherals == "test, test2"
model.removePeripheral(peripheral)
assert model.peripheralsChanged.emit.call_count == 3
assert model.peripherals == "test2" | [
9,
-1
] |
def METHOD_NAME(self, vid, value, subgroup=None, ignore_type=False):
"""
check if vid is in test section of file
"""
newval = EnvBase.METHOD_NAME(self, vid, value, subgroup, ignore_type)
if newval is None:
tnode = self.get_optional_child("test")
if tnode is not None:
newval = self.set_element_text(vid, value, root=tnode)
return newval | [
0,
99
] |
def METHOD_NAME(self):
return len(self.get_all_forecast_override_inputs_elements()) | [
19,
104,
661
] |
f METHOD_NAME(self, work_item): | [
1283,
3160
] |
def METHOD_NAME(self):
return util.safe_load_file(self.state_file) | [
557,
171
] |
def METHOD_NAME(fid, node, matkind, indent=" ", transpose=False):
"""Read named matrix from the given node.
Parameters
----------
fid : file
The opened file descriptor.
node : dict
The node in the tree.
matkind : int
The type of matrix.
transpose : bool
If True, transpose the matrix. Default is False.
%(verbose)s
Returns
-------
mat: dict
The matrix data
"""
# Descend one level if necessary
if node["block"] != FIFF.FIFFB_MNE_NAMED_MATRIX:
for k in range(node["nchild"]):
if node["children"][k]["block"] == FIFF.FIFFB_MNE_NAMED_MATRIX:
if has_tag(node["children"][k], matkind):
node = node["children"][k]
break
else:
logger.info(
indent + "Desired named matrix (kind = %d) not " "available" % matkind
)
return None
else:
if not has_tag(node, matkind):
logger.info(
indent + "Desired named matrix (kind = %d) not " "available" % matkind
)
return None
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
raise ValueError("Matrix data missing")
else:
data = tag.data
nrow, ncol = data.shape
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None and tag.data != nrow:
raise ValueError(
"Number of rows in matrix data and FIFF_MNE_NROW " "tag do not match"
)
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None and tag.data != ncol:
raise ValueError(
"Number of columns in matrix data and " "FIFF_MNE_NCOL tag do not match"
)
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
row_names = tag.data.split(":") if tag is not None else []
tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
col_names = tag.data.split(":") if tag is not None else []
mat = dict(
nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, data=data
)
if transpose:
_transpose_named_matrix(mat)
return mat | [
203,
1640,
430
] |
def METHOD_NAME(self):
rack = create_object(tutobjects.TutorialWeaponRack, key="rack", location=self.room1)
rack.db.available_weapons = ["sword"]
self.call(tutobjects.CmdGetWeapon(), "", "You find Rusty sword.", obj=rack) | [
9,
-1
] |
def METHOD_NAME(
text: TextData,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS), | [
73,
1227,
526
] |
def METHOD_NAME(monkeypatch, mock_snmp_get, mock_snmp_set) -> Callable[[], MagicMock]:
monkeypatch.setattr("agent_plugins.exploiters.snmp.src.snmp_client.getCmd", mock_snmp_get)
monkeypatch.setattr("agent_plugins.exploiters.snmp.src.snmp_client.setCmd", mock_snmp_set)
return SNMPClient(snmp_request_timeout=0.5, snmp_retries=3) | [
121,
340
] |
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem) | [
297,
365
] |
f METHOD_NAME(self): | [
9,
719,
8669,
272,
928
] |
def METHOD_NAME(self):
"""
Uses QChem log files to test that
molecular degrees of freedom can be properly read.
"""
log = QChemLog(os.path.join(self.data_path, "npropyl.out"))
conformer, unscaled_frequencies = log.load_conformer()
assert conformer.spin_multiplicity == 2
log = QChemLog(os.path.join(self.data_path, "co.out"))
conformer, unscaled_frequencies = log.load_conformer()
assert conformer.spin_multiplicity == 1 | [
9,
5548,
15207,
280,
12534,
390
] |
def METHOD_NAME():
fv3gfs = MockFV3GFS()
mapper = FV3StateMapper(fv3gfs)
with pytest.raises(KeyError):
assert "not in fv3" not in mapper
mapper["not in fv3"] | [
9,
474,
7278,
551,
3782,
45,
59
] |
def METHOD_NAME(
external_actor_id: int, organization: Organization
) -> ExternalActor:
try:
return ExternalActor.objects.get(id=external_actor_id, organization=organization)
except ExternalActor.DoesNotExist:
raise Http404 | [
19,
751,
7675,
894,
2121
] |
def METHOD_NAME():
in_img << imgIn[i, j]
in_xm1 << xm1
in_ym1 << ym1
in_ym2 << ym2
out_y1 >> y1[i, j]
out_xm1 >> xm1
out_ym1 >> ym1
out_ym2 >> ym2
out_y1 = a1 * in_img + a2 * in_xm1 + b1 * in_ym1 + b2 * in_ym2
out_xm1 = in_img
out_ym2 = in_ym1
out_ym1 = out_y1 | [
1653,
15987
] |
def METHOD_NAME(self):
return self.fn_wrap or self.fn | [
4243,
667
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.