text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self) -> str:
"""Validate PackageConfig.
Returns:
String that the config is valid.
Raises:
PackitConfigException: when the config is not valid
"""
schema_errors: Union[List[Any], Dict[Any, Any]] = None
config = None
try:
config = PackageConfig.get_from_dict(
self.content,
config_file_path=str(self.config_file_path),
search_specfile=get_local_specfile_path,
dir=self.config_file_path.parent,
repo_name=self.project_path.name,
)
except ValidationError as e:
schema_errors = e.messages
specfile_path = self.content.get("specfile_path", None)
if specfile_path and not (self.project_path / specfile_path).is_file():
logger.warning(
f"The spec file you defined ({specfile_path}) is not "
f"present in the repository. If it's being generated "
f"dynamically, you can verify the functionality by "
f"running `packit srpm` to create an SRPM "
f"from the current checkout. If it's not being generated, "
f"please make sure the path is correct and "
f"the file is present."
)
synced_files_errors = []
if config:
for package_config in config.get_package_config_views().values():
synced_files_errors = [
f
for f in iter_srcs(package_config.files_to_sync)
if not (
(self.project_path / package_config.paths[0] / f).exists()
or any(self.project_path.glob(f))
)
] # right now we use just the first path in a monorepo package
output = f"{self.config_file_path.name} does not pass validation:\n"
if schema_errors:
if isinstance(schema_errors, list):
output += "\n".join(map(str, schema_errors))
else:
for field_name, errors in schema_errors.items():
output += self.validate_get_field_output(errors, field_name)
if synced_files_errors:
output += (
"The following {} configured to be synced but "
"{} not present in the repository: {}\n"
).format(
*(
(
"paths are",
"are",
)
if (len(synced_files_errors) > 1)
else ("path is", "is")
),
", ".join(synced_files_errors),
)
if schema_errors or synced_files_errors:
raise PackitConfigException(output)
else:
return f"{self.config_file_path.name} is valid and ready to be used" | [
187
] |
def METHOD_NAME(*args, **kwargs):
return replacement_target(*args, **kwargs, process_group=process_group, ranks=ranks) | [
846,
3729,
1030
] |
def METHOD_NAME(some_point):
assert loads(dumps(some_point, hex=True), hex=True) == some_point | [
9,
5725,
696
] |
def METHOD_NAME(self, subset_name, instance_data, pre_create_data):
# Only allow a single render instance to exist
if self._get_singleton_node():
raise CreatorError("A Render instance already exists - only "
"one can be configured.")
# Apply default project render settings on create
if self.render_settings.get("apply_render_settings"):
lib_rendersettings.RenderSettings().set_default_renderer_settings()
super(CreateRenderlayer, self).METHOD_NAME(subset_name,
instance_data,
pre_create_data) | [
129
] |
def METHOD_NAME(res: list, key: str) -> list:
"""
Unpacks a query result list (i.e., list of dicts) into a list of
unique values for the given dict key.
"""
res_list = list(set([r.get(key) for r in res]))
res_list = [v for v in res_list if v is not None]
return res_list | [
19,
245,
47,
2768,
199
] |
def METHOD_NAME(self, spec):
createfunc = pythonapi.PyModule_FromDefAndSpec2
# c_void_p is standin for PyModuleDef *
createfunc.argtypes = c_void_p, py_object, c_int
createfunc.restype = py_object
module = createfunc(self.module_def, spec, self.api_version)
return module | [
129,
298
] |
METHOD_NAME( self ) : | [
9,
343,
195
] |
f METHOD_NAME(self, point): | [
238,
1669,
217,
3005
] |
def METHOD_NAME(spliced_spec):
"""Given a spliced spec, this function conducts all the rewiring on all
nodes in the DAG of that spec."""
assert spliced_spec.spliced
for spec in spliced_spec.traverse(order="post", root=True):
if not spec.build_spec.installed:
# TODO: May want to change this at least for the root spec...
# spec.build_spec.package.do_install(force=True)
raise PackageNotInstalledError(spliced_spec, spec.build_spec, spec)
if spec.build_spec is not spec and not spec.installed:
explicit = spec is spliced_spec
rewire_node(spec, explicit) | [
4342
] |
def METHOD_NAME(apps, schema_editor):
ConferenceSetting = apps.get_model("conferences", "ConferenceSetting")
objs = ConferenceSetting.objects.all()
for obj in objs:
obj.delete() | [
188,
235,
199
] |
def METHOD_NAME(self):
self.lbf = self.lb_class(**LB_PARAMS, **self.lb_params)
self.system.lb = self.lbf | [
0,
1
] |
def METHOD_NAME(current_actor_context, nfs_fstype, monkeypatch):
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp=nfs_fstype,
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
current_actor_context.feed(StorageInfo(mount=with_mount_share))
current_actor_context.run()
report_fields = current_actor_context.consume(Report)[0].report
assert is_inhibitor(report_fields) | [
9,
7675,
41,
2844,
834
] |
def METHOD_NAME(file_path: str) -> bool:
"""Run the given Python file; return True if no errors, else False."""
source = Path(file_path).read_text()
try:
exec(compile(source, file_path, "exec"), globals()) # noqa: S102
except Exception as e:
print(e, file=sys.stderr)
return False
finally:
plt.close("all")
return True | [
22,
1441
] |
def METHOD_NAME(self, event):
"""
:param QKeyEvent event:
:return:
"""
key = event.key()
if key == Qt.Key_Tab:
self._symexec_view.switch_to_disassembly_view()
event.accept()
super().METHOD_NAME(event) | [
59,
2971,
417
] |
def METHOD_NAME(node: Node, visited_children: Tuple[Any]) -> Literal:
text = node.text[1:-1]
text = newline_re.sub(text, "\n")
match = text.replace("\\'", "'")
return Literal(None, match) | [
716,
5659,
1479
] |
def METHOD_NAME(dtype):
"""Returns the native Python type from a Perspective type"""
mapping = {
t_dtype.DTYPE_BOOL: bool,
t_dtype.DTYPE_FLOAT32: float,
t_dtype.DTYPE_FLOAT64: float,
t_dtype.DTYPE_UINT8: int,
t_dtype.DTYPE_UINT16: int,
t_dtype.DTYPE_UINT32: int,
t_dtype.DTYPE_UINT64: int,
t_dtype.DTYPE_INT8: int,
t_dtype.DTYPE_INT16: int,
t_dtype.DTYPE_INT32: int,
t_dtype.DTYPE_INT64: int,
t_dtype.DTYPE_DATE: date,
t_dtype.DTYPE_TIME: datetime,
t_dtype.DTYPE_STR: str,
}
return _extract_type(dtype, mapping) | [
1249,
24,
16202
] |
def METHOD_NAME(self) -> 'outputs.SubscriptionAliasResponsePropertiesResponse':
"""
Subscription Alias response properties.
"""
return pulumi.get(self, "properties") | [
748
] |
def METHOD_NAME(self):
while True:
try:
yield self._message_backlog.popleft()
except IndexError:
break | [
7165,
3041
] |
f METHOD_NAME(self): | [
19,
782,
156
] |
def METHOD_NAME(self, content):
try:
def to_directive(x):
name, rest = x
rest = [rest] if rest is not None else []
return Directive(name=name.value.strip(), attrs=rest, lineno=name.lineno, src=self)
def to_section(name, rest):
return Section(name=name.value.strip(), children=rest, lineno=name.lineno, src=self)
def apply_defaults(cfg):
if "DEFAULT" not in cfg:
return cfg
defaults = cfg["DEFAULT"]
not_defaults = cfg[~eq("DEFAULT")]
for c in not_defaults:
for d in defaults.grandchildren:
if d.name not in c:
c.children.append(d)
cfg.children = list(not_defaults)
return cfg
def make_bytes(number, char_multiple):
if char_multiple.lower() == 'k':
return number * 2**10
if char_multiple.lower() == 'm':
return number * 2**20
if char_multiple.lower() == 'g':
return number * 2**30
content = "\n".join(content)
header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ")
sep_chars = set("=")
key_chars = header_chars - sep_chars
value_chars = set(string.printable) - set("\n\r")
On = Literal("on", True, ignore_case=True)
Off = Literal("off", False, ignore_case=True)
Tru = Literal("true", True, ignore_case=True)
Fals = Literal("false", False, ignore_case=True)
Boolean = ((On | Off | Tru | Fals) & (WSChar | LineEnd)) % "Boolean"
Num = Number & (WSChar | LineEnd)
QuoStr = QuotedString & (WSChar | LineEnd)
# Handle php.ini shorthand notation for memory limits: 1G, 8M, 50K
# https://www.php.net/manual/en/faq.using.php#faq.using.shorthandbytes
MemNum = (Lift(make_bytes) * Number * (Char('K') | Char('M') | Char('G'))) & (WSChar | LineEnd)
LeftEnd = (WS + LeftBracket + WS)
RightEnd = (WS + RightBracket + WS)
Header = (LeftEnd >> PosMarker(String(header_chars)) << RightEnd) % "Header"
Key = WS >> PosMarker(String(key_chars)) << WS
Sep = InSet(sep_chars, "Sep")
Value = WS >> (Boolean | MemNum | Num | QuoStr | HangingString(value_chars))
KVPair = WithIndent(Key + Opt(Sep >> Value)) % "KVPair"
Comment = (WS >> (OneLineComment(";")).map(lambda x: None))
Line = Comment | KVPair.map(to_directive)
Sect = Lift(to_section) * Header * Many(Line).map(skip_none)
Doc = Many(Comment | Sect).map(skip_none)
Top = Doc << WS << EOF
res = Entry(children=Top(content), src=self)
return apply_defaults(res)
except SkipComponent:
raise
except:
raise ParseException("Could not parse content: '{0}'".
format(content)) | [
214,
366
] |
def METHOD_NAME(self):
self.job_id = str(uuid.uuid1())
# session = Session.create(0, 0).init_computing("abc").computing
session.init(self.job_id) | [
0,
1
] |
def METHOD_NAME(self, uid: int) -> dict[str, Any]:
"""
Get group entry for gid
"""
for e in self.group:
if uid == e["gr_gid"]:
return e
raise KeyError("getgruid(): uid not found in group file: " + str(uid)) | [
-1
] |
def METHOD_NAME(
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_scale: "relay.Expr",
input_zero_point: "relay.Expr",
output_scale: "relay.Expr",
output_zero_point: "relay.Expr",
in_axis: int = -1,
out_axis: int = -1,
in_dtype: str = "uint8",
out_dtype: str = "uint8",
) -> np.ndarray:
"""
Return a table where each input indexes to the output quantizing the given function.
Note this also supports mapping unsigned and signed integers to each other.
Args:
floating_point_func: The numpy function which this table is to approximate
input_scale: The scale of the quantized input tensor.
input_zero_point: The zero point of the quantized input tensor.
output_scale: The scale of the quantized output tensor.
output_zero_point: The zero point of the quantized output tensor.
in_axis: The axis for multi-channel quantization of the input if applicable.
out_axis: The axis for multi-channel quantization of the output if applicable.
in_dtype: The dtype of the input tensor.
out_dtype: The wanted dtype of the output tensor.
Returns:
A numpy array where values in quantized space will index to the output in quantized space
approximating the given function.
"""
if not np.issubdtype(np.dtype(in_dtype), np.integer) or not np.issubdtype(
np.dtype(out_dtype), np.integer
):
raise ValueError(
f"Only integer dtypes allowed got {in_dtype} and {out_dtype} for in and out dtypes."
)
dtype_info = np.iinfo(in_dtype)
num_bits = dtype_info.bits
# Use TVMs quantization methods via relay to be consistent
# inputs_quantized = np.array(range(dtype_info.min, dtype_info.max + 1)).astype(in_dtype)
# First generate a list of all num_bit integer patterns
inputs_quantized = np.array(range(0, 2**num_bits), dtype=f"uint{num_bits}")
# Reinterpret bits as the real datatype
# Note what we are doing here is a bit tricky, the canonical view of our lookup table
# is using the uintX version. When we run the lookup in the relay graph, we cast the
# bit pattern back into this form.
inputs_quantized = inputs_quantized.view(in_dtype)
inputs_quantized = relay.const(inputs_quantized, dtype=in_dtype)
inputs_dequantized = run_const_expr(
relay.qnn.op.dequantize(
inputs_quantized,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=in_axis,
)
)
output_dequantized = relay.const(floating_point_func(inputs_dequantized))
output_quantized = run_const_expr(
relay.qnn.op.quantize(
output_dequantized, output_scale, output_zero_point, out_axis, out_dtype
)
)
return output_quantized | [
129,
4143,
1906,
410
] |
def METHOD_NAME(self):
self.smartMT = StartGUI()
self.smartMT.show()
QTest.qWaitForWindowActive(self.smartMT)
print("matplotlib backend: {}".format(matplotlib.get_backend())) | [
0,
1
] |
def METHOD_NAME(mask):
if is_int(mask):
return mask
elif is_int_list(mask):
return int_list_to_int(mask)
elif is_comma_sep_hex(mask):
return comma_sep_hex_to_int(mask)
elif is_comma_sep_list(mask):
return comma_sep_list_to_int(mask)
else:
raise BadMask(mask) | [
24,
962
] |
def METHOD_NAME(widget):
print("action 6") | [
9685
] |
def METHOD_NAME(
user, user_id, team_user_id
):
remoteci = user.post(
"/api/v1/remotecis",
data={"name": "My remoteci", "team_id": team_user_id},
).data["remoteci"]
r = user.post("/api/v1/remotecis/%s/users" % remoteci["id"])
assert r.status_code == 201
r = user.delete(
"/api/v1/remotecis/%s" % remoteci["id"],
headers={"If-match": remoteci["etag"]},
)
assert r.status_code == 204
subscribed_remotecis = user.get("/api/v1/users/%s/remotecis" % user_id).data[
"remotecis"
]
assert len(subscribed_remotecis) == 0 | [
9,
34,
385,
-1,
34,
983,
7522
] |
def METHOD_NAME(self, verbose=False) -> None:
def vprint(txt):
if verbose:
print(txt)
if self.read_uint(16) != 0x8b1f:
raise ValueError("Invalid GZIP magic number")
cmeth = self.read_uint(8)
if cmeth != 8:
raise ValueError(f"Unsupported compression method: {str(cmeth)}")
# reserved flags
flags: int = self.read_uint(8)
if flags & 0xe0 != 0:
vprint("Reserved flags are set")
# modification time
mtime = self.read_uint(32)
if mtime != 0:
dt = datetime.datetime.fromtimestamp(mtime, datetime.timezone.utc)
vprint(f"Last modified: {dt}")
else:
vprint("Last modified: N/A")
# extra flags
extraflags = self.read_uint(8)
if extraflags == 2:
vprint("Extra flags: Maximum compression")
elif extraflags == 4:
vprint("Extra flags: Fastest compression")
else:
vprint(f"Extra flags: Unknown ({extraflags})")
osbyte = self.read_uint(8)
osstr: str = self.operating_system.get(osbyte, "Really unknown")
vprint(f"Operating system: {osstr}")
# handle assorted flags
if flags & 0x01:
vprint("Flag: Text")
if flags & 0x04:
vprint("Flag: Extra")
count: int = self.read_uint(16)
while count > 0: # Skip extra data
self.read_uint(8)
count -= 1
if flags & 0x08:
vprint(f"File name: {self.read_nul_terminated_string()}")
if flags & 0x02:
vprint(f"Header CRC-16: {self.read_uint(16):04X}")
if flags & 0x10:
vprint(f"Comment: {self.read_nul_terminated_string()}") | [
203,
572
] |
def METHOD_NAME(self, watched: QtCore.QObject, event: QtCore.QEvent) -> bool:
"""Tab context menu implementation."""
if watched == self._tab_bar:
if event.type() == QtCore.QEvent.ContextMenu:
pos = event.pos()
tab_index = self._tab_bar.tabAt(pos)
tab_widget = self.tabs.widget(tab_index)
tab_menu = QtWidgets.QMenu(self._tab_bar)
close_action = tab_menu.addAction(
"Close", lambda: self._on_tab_close_requested(tab_index)
)
if len(self._viewers.get(type(tab_widget), [])) == 1:
# Only enable the action if there is more than one viewer of this
# type open.
close_action.setEnabled(False)
tab_menu.popup(self._tab_bar.mapToGlobal(pos))
return True
return False | [
417,
527
] |
def METHOD_NAME(variant_scalar_rgb):
b = mi.load_dict({'type': 'dielectric'})
assert b is not None
assert b.component_count() == 2
assert b.flags(0) == (mi.BSDFFlags.DeltaReflection | mi.BSDFFlags.FrontSide |
mi.BSDFFlags.BackSide)
assert b.flags(1) == (mi.BSDFFlags.DeltaTransmission | mi.BSDFFlags.FrontSide |
mi.BSDFFlags.BackSide | mi.BSDFFlags.NonSymmetric)
assert b.flags() == b.flags(0) | b.flags(1)
# Should not accept negative IORs
with pytest.raises(RuntimeError):
mi.load_dict({'type': 'dielectric', 'int_ior': -0.5}) | [
3243,
129
] |
def METHOD_NAME(self, ts):
try:
ts.dimensions = (
self.dimensions[0]
if self.dimensions.shape[0] == 1
else self.dimensions[ts.frame]
)
except IndexError as e:
raise ValueError(
f"Dimensions array has no data for frame {ts.frame}"
) from e
return ts | [
1053
] |
def METHOD_NAME() -> Dict[str, Any]:
"""
Return the 'logs' attribute of the thread-local variable.
"""
if hasattr(_THREAD, "logs"):
return _THREAD.logs
return {} | [
19,
600,
1099
] |
def METHOD_NAME(self):
root_folder = '../../../resources/none_contains'
check_id = "NetworkACL"
should_pass = []
should_fail = ['azurerm_key_vault.kv']
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id) | [
9,
98,
1228,
1918,
8861
] |
def METHOD_NAME(self, train_dirs):
names = []
curdir = os.path.abspath(os.path.curdir)
for train_dir in train_dirs:
abspath = os.path.abspath(train_dir)
names.append(os.path.basename(abspath) if abspath != curdir else 'current')
data = {}
dirs = [{'name': name, 'path': path} for name, path in zip(names, train_dirs)]
for dir_info in dirs:
path = dir_info.get('path')
content = self._update_data_from_dir(path)
if not content:
continue
data[path] = {
'path': path,
'name': dir_info.get('name'),
'content': content
}
self.data = data | [
557,
365
] |
def METHOD_NAME():
'''
Example vulnerability export request
'''
return {
'first_found': 1635798607,
'last_found': 1635798607,
'indexed_at': 1635798607,
'last_fixed': 1635798607,
'since': 1635798607,
'plugin_family': ['Family Name'],
'plugin_id': [19506, 21745, 66334],
'scan_uuid': '992b7204-bde2-d17c-cabf-1191f2f6f56b7f1dbd59e117463c',
'severity': ['CRITICAL', 'High', 'medium', 'LoW', 'InfO'],
'state': ['OPENED', 'reopened', 'Fixed'],
'vpr_score': {
'eq': [2.0, 3.1],
'neq': [9.9],
'gt': 1,
'gte': 1.1,
'lt': .5,
'lte': .4
},
'tags': [
('test1', 'val1'),
('test2', 'val2'),
('test3', 'val3'),
('test3', 'val4')
],
'network_id': 'f634d639-cc33-4149-a683-5ad6b8f29d9c',
'cidr_range': '192.0.2.0/24',
'include_unlicensed': True,
} | [
4028,
294
] |
def METHOD_NAME(self):
self._setupAsyncioRunner()
super().METHOD_NAME()
self._tearDownAsyncioRunner() | [
290
] |
def METHOD_NAME(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc())) | [
276,
5453,
442
] |
def METHOD_NAME(service):
"""Get reporting details from config for service
Args:
service (str): Service name
"""
_dict = {}
log.info(f"Loading credentials for reporting service '{service}'")
try:
_service = _get_reports()[service]
_dict["url"] = _service["url"]
_dict["svn_repo"] = _service["svn_repo"]
_dict["user"] = _service["user"]
_dict["token"] = _service["token"]
_dict["default_project"] = _service.get("default_project", "CEPH")
_dict["cert_path"] = _service["cert_path"]
return _dict
except KeyError:
raise ConfigError(f"Insufficient config for '{service}'") | [
19,
3378
] |
def METHOD_NAME(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors | [
19,
4261
] |
def METHOD_NAME(opts):
document = '\n'.join(
f"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}" for opt in opts
)
return yaml.safe_load(document) | [
1881,
280,
-1
] |
def METHOD_NAME(node, vm, test):
""""
get numa node size from HMP info command
:param node: numa node number
:param vm: vm object
:param test: qemu test object
"""
output = vm.monitor.info("numa").splitlines()
for numa_info in output:
if "node " + str(node) + " size: " in numa_info:
node_size = numa_info.split("node " + str(node) + " size: ").pop().split(" ")[0]
if node_size is None:
test.fail("Error, unexpected numa node size info at node %d" % node)
error_context.context("node %d size: %s" % (node, node_size), test.log.debug)
return int(node_size) | [
19,
1716,
1318
] |
def METHOD_NAME(self, NwkId, Ep):
if self.ControllerLink.loadTransmit() >= 1:
self.ScanDevicesToBeDone.append([NwkId, Ep])
else:
scan_device_for_grp_membership(self, NwkId, Ep) | [
579,
43,
846,
1823,
3944,
-1
] |
def METHOD_NAME(self):
tf_dataset = load(self.test_data_path)
# Extract records into a list.
dataset = list(tf_dataset)
self.assertEqual(len(dataset), 1)
lidar_tensors = next(iter(dataset))
num_boxes = lidar_tensors["label_box"].shape[0]
self.assertEqual(num_boxes, 16)
self.assertNotEqual(lidar_tensors["frame_id"], 0)
self.assertNotEqual(lidar_tensors["timestamp_micros"], 0)
self.assertEqual(lidar_tensors["timestamp_offset"], 0)
self.assertGreater(lidar_tensors["timestamp_micros"], 0)
self.assertAllEqual(
lidar_tensors["label_box_detection_difficulty"],
np.zeros(num_boxes, dtype="int32"),
)
# Laser points.
point_xyz_mean = tf.reduce_mean(lidar_tensors["point_xyz"], axis=0)
self.assertAllClose(
point_xyz_mean, lidar_tensors["pose"][:3, 3], atol=100
)
point_feature_mean = tf.reduce_mean(
lidar_tensors["point_feature"], axis=0
)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_all(lidar_tensors["point_mask"]))
# Laser labels.
self.assertEqual(lidar_tensors["label_box_id"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_meta"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_class"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_density"].shape[0], num_boxes)
self.assertTrue(tf.math.reduce_all(lidar_tensors["label_box_mask"]))
self.assertAllGreater(
tf.math.reduce_max(lidar_tensors["label_point_class"]), 0
)
# Multi-frame tensors for augmentation.
augmented_example = next(
iter(tf_dataset.map(transformer.build_tensors_for_augmentation))
)
self.assertEqual(augmented_example["point_clouds"].shape, [183142, 8])
self.assertEqual(augmented_example["bounding_boxes"].shape, [16, 11]) | [
9,
557,
61,
1053
] |
def METHOD_NAME(file_path, processor):
result = ""
throughput = 0
if processor == "cpu":
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
if "Total img/sec on " in line:
result = line + "\n"
throughput += float(
re.search(r"(CPU\(s\):[ ]*)(?P<throughput>[0-9]+\.?[0-9]+)", line).group(
"throughput"
)
)
elif processor == "gpu":
"""calculate average throughput"""
result_list, throughput_list = [], []
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
if "images/sec: " in line:
result_list.append(line.strip("\n"))
throughput = float(
re.search(r"(images/sec:[ ]*)(?P<throughput>[0-9]+\.?[0-9]+)", line).group(
"throughput"
)
)
throughput_list.append(throughput)
result = "\n".join(result_list[-100:]) + "\n"
if len(throughput_list) == 0:
raise Exception(
"Cannot find throughput lines. Looks like SageMaker job was not run successfully. Please check"
)
# Take average of last 100 throughput lines
throughput = sum(throughput_list[-100:]) / len(throughput_list[-100:])
LOGGER.info(result)
return result, throughput | [
38,
51,
47,
9
] |
def METHOD_NAME(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_by_name('math'),
self.classroom_model)
self.assertEqual(
classroom_models.ClassroomModel.get_by_name('incorrect_name'),
None) | [
9,
19,
578,
604,
156
] |
def METHOD_NAME(
*,
factor: float = 0.1,
jitter_scale: float = 0.001,
) -> Callable[[int], float]:
def _f(i: int) -> float:
delay: int = 2 ** i
return delay * factor + random.randrange(100) * jitter_scale
return _f | [
2962,
4287
] |
def METHOD_NAME(grammar: Grammar) -> List[Derivation]:
def expand_rule_expansion(rule: GrammarRule, expansion: Expansion) -> List[Derivation]:
results: List[Derivation] = [Derivation(value=None, children=[], tags=rule.tags + expansion.tags)]
# Go through each item of the RHS of the rule and expand it, forming
# the cross product as we go.
for item in expansion.children:
# Get list of candidate children
candidates: List[Derivation] = []
category = get_category(item)
if category is None:
# Terminal
candidates = [Derivation(value=item, children=None, tags=[])]
else:
# Non-terminal
candidates = expand_category(category)
# Extend each derivation with each candidate children
new_results: List[Derivation] = []
for derivation in results:
for child in candidates:
assert derivation.children is not None
new_derivation = replace(derivation, children=derivation.children + [child])
new_results.append(new_derivation)
results = new_results
return results
def expand_category(category: str) -> List[Derivation]:
results: List[Derivation] = []
for rule in grammar.category_to_rules[category]:
for expansion in rule.expansions:
results.extend(expand_rule_expansion(rule, expansion))
return results
return expand_category(ROOT_CATEGORY) | [
567,
17770
] |
def METHOD_NAME(self, unit: "Unit") -> None:
"""Handle cleanup for a specific unit"""
pass | [
950,
805
] |
def METHOD_NAME(self, mock_open):
main_win, w = self.createWidget()
mock_open.return_value = "/no_exist", None
QTest.mouseClick(w.choose_exe_button, Qt.LeftButton)
self.assertEqual(w.exe_line.text(), "")
mock_open.return_value = self.test_exe, None
QTest.mouseClick(w.choose_exe_button, Qt.LeftButton)
self.assertEqual(self.path, self.test_exe)
self.assertEqual(self.app_info.valid(), True)
self.assertEqual(w.exe_line.text(), self.test_exe)
cmd, args = w.buildCommand("input.i")
self.assertEqual(cmd, self.test_exe)
self.assertEqual(len(args), 3)
self.assertEqual(args[0], "Outputs/csv=true")
self.assertEqual(args[1], "-i")
self.assertEqual(args[2], "input.i")
QTest.mouseClick(w.mpi_checkbox, Qt.LeftButton)
self.assertEqual(w.mpi_checkbox.isChecked(), True)
cmd, args = w.buildCommand("input.i")
self.assertEqual(cmd, "mpiexec")
self.assertNotIn("--recover", args)
QTest.mouseClick(w.recover_checkbox, Qt.LeftButton)
cmd, args = w.buildCommand("input.i")
self.assertIn("--recover", args)
self.assertNotIn("--n-threads=2", args)
QTest.mouseClick(w.threads_checkbox, Qt.LeftButton)
cmd, args = w.buildCommand("input.i")
self.assertIn("--n-threads=2", args)
cmd, args = w.buildCommand("input.i")
self.assertEqual(cmd, "mpiexec")
self.assertIn(self.test_exe, args)
self.assertIn("-i", args)
self.assertIn("input.i", args)
self.assertIn("-n", args)
self.assertIn("2", args)
self.assertIn("--recover", args)
self.assertIn("Outputs/csv=true", args)
self.assertIn("--n-threads=2", args) | [
9,
7367,
3198
] |
def METHOD_NAME(ids, outfile, prefixed):
"""Writes a dictionary of machine ids to a file
Each dictionary entry is written on a single line. The function does not
print trailing new-line if the dictionary contains only one item as it is
common format of one-liners placed in a dump directory.
Keyword arguments:
ids -- a dictionary [generator name: machine ids]
outfile -- output file
prefixed -- use 'generator name=' prefix or not
"""
fmt = '{0}={1}' if prefixed else '{1}'
if len(ids) > 1:
fmt += '\n'
for sd, mid in ids.items():
outfile.write(fmt.format(sd,mid)) | [
38,
1571
] |
def METHOD_NAME(codeeditor):
"""Test toggle comment in a single line."""
editor = codeeditor
text = ("#class a():\n"
"# self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
editor.set_text(text)
# Toggle comment without spaces from the prefix and manually inserted
text = toggle_comment(editor)
assert text == ("class a():\n"
"# self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment with space insertion
text = toggle_comment(editor)
assert text == ("# class a():\n"
"# self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment deleting the insert space
text = toggle_comment(editor)
assert text == ("class a():\n"
"# self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment with space at the right of prefix but manually inserted
text = toggle_comment(editor, start_line=2)
assert text == ("class a():\n"
" self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment with space insertion
text = toggle_comment(editor, start_line=2)
assert text == ("class a():\n"
" # self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment deleting inserted space
text = toggle_comment(editor, start_line=2)
assert text == ("class a():\n"
" self.b = 1\n"
" # print(self.b)\n"
"# \n"
)
# Toggle comment with space at the right and left of prefix
# but manually inserted
text = toggle_comment(editor, start_line=3)
assert text == ("class a():\n"
" self.b = 1\n"
" print(self.b)\n"
"# \n"
) | [
9,
97,
534,
1591
] |
def METHOD_NAME(self) -> str:
"""
Resource region.
"""
return pulumi.get(self, "region") | [
1216
] |
def METHOD_NAME(val):
try:
return float(val) > 0.0
except ValueError:
return False | [
214,
1819
] |
def METHOD_NAME(dataset_name, namespace, mp1, mp2):
exec_command = ["/bin/sh",
"-c",
"alluxio fs mount"]
resp = stream(
client.CoreV1Api().connect_get_namespaced_pod_exec, "{}-master-0".format(dataset_name), namespace,
command=exec_command, stderr=True, stdin=False,
stdout=True, tty=False, container='alluxio-master')
print("Response: " + resp)
if mp1 not in resp or mp2 not in resp:
print("checkAlluxioruntimeMountpoint Failed")
return False
return True | [
250,
-1,
8127
] |
def METHOD_NAME(self):
a,b = insertMinimumPadding(self.a, self.b, self.dist)
for aElt,bElt in zip(a,b):
if self.dist(aElt, bElt):
yield aElt,bElt | [
19,
7803
] |
def METHOD_NAME(self):
self.memH_test_template("ompthrcount") | [
9,
14746,
790,
2054,
-1
] |
def METHOD_NAME(self, pred_box: list, resize_factor: float):
cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[
1] + 0.5 * self.state[3]
cx, cy, w, h = pred_box
half_side = 0.5 * self.cfg.TEST.SEARCH_SIZE / resize_factor
cx_real = cx + (cx_prev - half_side)
cy_real = cy + (cy_prev - half_side)
return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h] | [
422,
3521,
318
] |
def METHOD_NAME(self):
# Note: not a setup_class method, not to conflict with AbstractTestFixture's setup
rabbit = os.getenv('JORMUNGANDR_BROKER_URL', "pyamqp://guest:guest@localhost:5672")
self._mock_rabbit_connection = BrokerConnection(rabbit)
self._connections = {self._mock_rabbit_connection}
self._exchange = Exchange('navitia', durable=True, delivry_mode=2, type='topic')
self._mock_rabbit_connection.connect()
# wait for the cnx to run the test
self._wait_for_rabbitmq_cnx() | [
0,
1
] |
def METHOD_NAME(run_cli_command, config_with_profile_factory):
"""Test that `global_only` options are only set globally even if the `--global` flag is not set."""
config_with_profile_factory()
option_name = 'autofill.user.email'
option_value = '[email protected]'
options = ['config', 'set', option_name, str(option_value)]
result = run_cli_command(cmd_verdi.verdi, options, use_subprocess=False)
options = ['config', 'get', option_name]
result = run_cli_command(cmd_verdi.verdi, options, use_subprocess=False)
# Check that the current profile name is not in the output
assert option_value in result.output.strip()
assert get_profile().name not in result.output.strip() | [
9,
200,
0,
1335,
285,
246
] |
def METHOD_NAME(self):
named_spec = SageMakerMonitoringScheduleSpec(self.REQUIRED_ARGS)
with patch(
"MonitoringSchedule.src.MonitoringSchedule_component.SageMakerComponent._get_current_namespace"
) as mock_namespace:
mock_namespace.return_value = "test-namespace"
self.component.Do(named_spec)
self.assertEqual("test", self.component.job_name) | [
9,
74,
2757,
156
] |
def METHOD_NAME(self, method):
request = self.make_request(method=method)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_410_GONE
assert resp.data == {"message": "This API no longer exists."}
self.assert_deprecation_metadata(request, resp) | [
638,
4496,
377
] |
def METHOD_NAME(name):
return tf.convert_to_tensor(
[self._private_vars[name]] * batch_dim, dtype=tf.float32) | [
3275,
486
] |
def METHOD_NAME(self) -> str:
arn = self._get_param("ARN")
tags = self._get_param("TagList")
self.opensearch_backend.METHOD_NAME(arn, tags)
return "{}" | [
238,
114
] |
def METHOD_NAME(self):
METHOD_NAME = self.data.get('show_controls', True)
return True if METHOD_NAME is None else METHOD_NAME | [
697,
3566
] |
def METHOD_NAME(self):
self.nfw = CNFW()
self.nfw_e = CNFW_ELLIPSE() | [
102,
103
] |
def METHOD_NAME(self, **kwargs):
"""
Get the changes for a person. By default only the last 24 hours are returned.
You can query up to 14 days in a single query by using the start_date
and end_date query parameters.
Args:
start_date: (optional) Filter the results with a start date.
Expected format is 'YYYY-MM-DD'.
end_date: (optional) Filter the results with a end date.
Expected format is 'YYYY-MM-DD'.
page: (optional) Minimum 1, maximum 1000, default 1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | [
1103
] |
def METHOD_NAME(self):
# print("<< Set process is called")
data = self.inputs[0].sv_get()
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
obj = get_object(path)
if isinstance(obj, (int, float, bpy_prop_array)):
obj = get_object(path[:-1])
p_type, value = path[-1]
if p_type == "attr":
setattr(obj, value, data[0][0])
else:
obj[value] = data[0][0]
else:
assign_data(obj, data) | [
356
] |
def METHOD_NAME():
for e in os.listdir(CORE_FILE_PATH):
fl = CORE_FILE_PATH + e
if os.path.isfile(fl) and not e.startswith(UPLOAD_PREFIX):
Handler.handle_file(fl) | [
793
] |
def METHOD_NAME(index_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPlaceIndexResult]:
"""
Retrieve information about a Location Service Place Index.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.location.get_place_index(index_name="example")
```
:param str index_name: Name of the place index resource.
:param Mapping[str, str] tags: Key-value map of resource tags for the place index.
"""
... | [
19,
2038,
724,
146
] |
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem) | [
297,
365
] |
def METHOD_NAME(self, files, config):
for root, dirs, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(".yaml"):
new_filename = os.path.join(root, filename)
markdown_filename = self.generate_yaml_markdown(
new_filename, config
)
if markdown_filename is not None:
f = File(
markdown_filename,
config["docs_dir"],
config["site_dir"],
False,
)
files.append(f)
mdFile = mdutils.MdUtils(
file_name=config["docs_dir"] + "/" + self.table_filename
)
mdFile.new_header(level=1, title="Tests index")
for table_name, test_table in self.test_tables.items():
mdFile.new_header(level=2, title='<span class="tag">%s</span>' % table_name)
mdFile.new_line("| Name | Description | Scope |")
mdFile.new_line("| --- | --- | --- |")
for row in sorted(test_table, key=lambda item: item["name"]):
mdFile.new_line(
"| %s | %s | %s |"
% (row["name"], row["description"].replace("\n", ""), row["scope"])
)
mdFile.new_line("")
mdFile.create_md_file()
newfile = File(
path=str(self.table_filename) + ".md",
src_dir=config["docs_dir"],
dest_dir=config["site_dir"],
use_directory_urls=False,
)
files.append(newfile)
return files | [
69,
1537
] |
def METHOD_NAME(self):
return {"gcc": "10.3", "clang": "12", "apple-clang": "13", "msvc": "192"} | [
1681,
1775,
281
] |
def METHOD_NAME(regions, min_gap_size):
"""Filter regions, joining those separated by small gaps."""
min_gap_size = min_gap_size or 0
for chrom, rows in regions.by_chromosome():
logging.info("%s: Joining over small gaps", chrom)
coords = iter(zip(rows["start"], rows["end"]))
prev_start, prev_end = next(coords)
for start, end in coords:
gap = start - prev_end
assert gap > 0, (
f"Impossible gap between {chrom} {prev_start}-{prev_end} "
+ f"and {start}-{end} (={gap})"
)
if gap < min_gap_size:
# Join with the previous region
logging.info(
"\tJoining %s %d-%d and %d-%d (gap size %d)",
chrom,
prev_start,
prev_end,
start,
end,
gap,
)
prev_end = end
else:
# Keep the gap; emit the previous region as-is
logging.info(
"\tKeeping gap %s:%d-%d (size %d)", chrom, prev_end, start, gap
)
yield (chrom, prev_start, prev_end)
prev_start, prev_end = start, end
yield (chrom, prev_start, prev_end) | [
2831,
3479
] |
def METHOD_NAME(self):
done = {DONE_TAG: True}
status_file = os.path.join(self._dest_dir, self.model_id, STATUS_FILE)
with open(status_file, "w") as f:
json.dump(done, f, indent=4)
mr = ModelRegisterer(self.model_id, config_json=self.config_json)
mr.register(is_from_dockerhub=False) | [
1434
] |
def METHOD_NAME(filename: str, slug: str) -> str:
"""
Look for a source file location.
If the location is found, return a string location suitable for GDB's
break command. Raise a RuntimeError otherwise.
:param filename: Target source file name.
:param slug: Source file excerpt for the location. For instance, a
specific string that is in a comment.
:return: String location suitable for GDB's break command.
"""
with open(filename, "r") as f:
for i, line in enumerate(f, 1):
if slug in line:
return "{}:{}".format(filename, i)
raise RuntimeError(
"Could not find location in {} for {}".format(filename, slug)
) | [
416,
209
] |
def METHOD_NAME(self):
with temporary_env({"SERVICES": "foobar:1235", "EAGER_SERVICE_LOADING": "1"}):
result = get_enabled_apis()
assert len(result) == 1
assert "foobar" in result | [
9,
343,
237,
445,
623,
3186,
485
] |
f METHOD_NAME(benchmark_config): | [
250,
6844
] |
def METHOD_NAME(self):
for line in self:
line.repeat = bool(line.repeat_expression) | [
226,
5293
] |
def METHOD_NAME(self):
"""
Test exporting a QA model to ONNX and running inference
"""
path = "distilbert-base-cased-distilled-squad"
# Export model to ONNX
onnx = HFOnnx()
model = onnx(path, "question-answering")
questions = Questions((model, path))
self.assertEqual(questions(["What is the price?"], ["The price is $30"])[0], "$30") | [
9,
1937
] |
def METHOD_NAME(inception_model_pytorch):
"""Test the InceptionBlocks model that WebGME folks provided us."""
galaxy_images_output = torch.zeros((1, 5, 64, 64))
ebv_output = torch.zeros((1,))
# Run the model once to get some ground truth outpot (from PyTorch)
output = inception_model_pytorch(galaxy_images_output, ebv_output).detach().numpy()
# Convert to MDF
mdf_model, params_dict = pytorch_to_mdf(
model=inception_model_pytorch,
args=(galaxy_images_output, ebv_output),
trace=True,
)
# Get the graph
mdf_graph = mdf_model.graphs[0]
# Add inputs to the parameters dict so we can feed this to the EvaluableGraph for initialization of all
# graph inputs.
params_dict["input1"] = galaxy_images_output.numpy()
params_dict["input2"] = ebv_output.numpy()
eg = EvaluableGraph(graph=mdf_graph, verbose=False)
eg.evaluate(initializer=params_dict)
output_mdf = eg.output_enodes[0].get_output()
assert np.allclose(
output,
output_mdf,
)
mdf_model2 = Model.from_json(mdf_model.to_json()) | [
9,
9180
] |
def METHOD_NAME(file, l):
file.write(l + "\n") | [
77,
534
] |
def METHOD_NAME(strings, extract_re):
"""Extract a group from a regular expression in any of the strings.
"""
for s in strings:
match = re.search(extract_re, s)
if match:
return match.group(1)
return None | [
19,
1571
] |
def METHOD_NAME(query, params):
"""Assemble a Bing-News request."""
sxng_locale = params['searxng_locale']
engine_region = traits.get_region(mkt_alias.get(sxng_locale, sxng_locale), traits.all_locale)
engine_language = traits.get_language(sxng_locale, 'en')
SID = uuid.uuid1().hex.upper()
set_bing_cookies(params, engine_language, engine_region, SID)
# build URL query
#
# example: https://www.bing.com/news/infinitescrollajax?q=london&first=1
query_params = {
# fmt: off
'q': query,
'InfiniteScroll': 1,
# to simplify the page count lets use the default of 10 images per page
'first' : (int(params.get('pageno', 1)) - 1) * 10 + 1,
# fmt: on
}
if params['time_range']:
# qft=interval:"7"
query_params['qft'] = 'qft=interval="%s"' % time_map.get(params['time_range'], '9')
params['url'] = base_url + '?' + urlencode(query_params)
return params | [
377
] |
def METHOD_NAME(self):
ret = libvirt_setup.hypervisor_has_virtio("kvm")
self.assertTrue(ret, "Hypervisor has virtio")
for libvirt_type in ["xen", "hyperv"]:
ret = libvirt_setup.hypervisor_has_virtio(libvirt_type)
self.assertFalse(ret, "Hypervisor has no virtio") | [
9,
7941,
220,
9910
] |
def METHOD_NAME(self):
"""
Lists modules in the directory of this module (if this module is a
package).
"""
names = {}
try:
method = self.py__path__
except AttributeError:
pass
else:
for path in method():
mods = iter_modules([path])
for module_loader, name, is_pkg in mods:
# It's obviously a relative import to the current module.
names[name] = SubModuleName(self, name)
# TODO add something like this in the future, its cleaner than the
# import hacks.
# ``os.path`` is a hardcoded exception, because it's a
# ``sys.modules`` modification.
# if str(self.name) == 'os':
# names.append(Name('path', parent_context=self))
return names | [
1066,
468,
553
] |
def METHOD_NAME(self):
self.admin_sessions[0].assert_icommand('ilsresc --tree', 'STDOUT_SINGLELINE', 'resc') | [
9,
-1,
151
] |
def METHOD_NAME(nsi_lines: t.List[str]) -> t.Tuple[t.List[str], t.List[str]]:
slice = nsi_lines.index(';@INSERT_TRANSLATIONS@\n')
return nsi_lines[:slice], nsi_lines[slice+1:] | [
265,
-1
] |
def METHOD_NAME(self):
""" Initialization scripts should create a default Block object. """
self.assertTrue(Block.objects.filter(name="Default").exists()) | [
9,
235,
573,
152
] |
def METHOD_NAME():
return bytes.fromhex("BFEB1E56FBCD973BB219022430A57843003D5644D21E62B9D4F180E7E6C33941") | [
19,
2229
] |
def METHOD_NAME(self) -> 'outputs.ComponentVersionResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "component_version_properties") | [
1007,
281,
748
] |
def METHOD_NAME(self):
self.altCam.setX(self.altCam.getX() + 1) | [
132,
879
] |
def METHOD_NAME(self):
url = reverse("retirement_api:claiming_en")
response = self.client.get(url)
self.assertTrue(response.status_code == 200)
url = reverse("retirement_api:claiming_es")
response = self.client.get(url)
self.assertTrue(response.status_code == 200) | [
9,
414,
1179
] |
def METHOD_NAME(self, gc_mock, requests_mock):
telemetry = Telemetry(url=self.url)
gc_mock.return_value.telemetry_enabled = False
telemetry.emit(self.metric_mock)
requests_mock.post.assert_not_called() | [
9,
1923,
130,
353,
1646,
1158,
1295
] |
def METHOD_NAME(self) -> None:
"""
Closes the database
"""
if isinstance(self.target, h5py._hl.files.File):
self.target.METHOD_NAME() | [
1462
] |
def METHOD_NAME(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir) | [
21,
1888,
157
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--save_dir',
type=str,
default='output',
help="directory to save compressed model.")
parser.add_argument(
'--devices',
type=str,
default='gpu',
help="which device used to compress.")
return parser | [
7220
] |
def METHOD_NAME():
"""Call to order.rank() using hard-coded data written to temp files"""
# candidate dataframe to write to cand file
cand_df = pd.DataFrame(
{
"w": [0.15, 0.15, 0.15, 0.175, 0.175, 0.15, 0.15, 0.15],
"G": [2700, 2500, 2000, 2500, 2500, 1500, 1500, 2000],
"lldg": [0.15, 0.15, 0.25, 0.25, 0.3, 0.15, 0.15, 0.3],
"L": [10039, 9060, 7519, 7358, 6185, 3100, 3454, 8529],
}
)
cand_fn = pl.Path(sys.path[0], "tmp_cand.csv")
df_utils.write(cand_fn, cand_df)
# dist matrix to be written to dmat.npy file
dmat = np.array(
[
[
1.00000000e01,
2.75151884e-02,
5.10153186e-01,
5.40001829e-01,
9.61868543e-01,
1.18523604e00,
1.11693144e00,
7.64581026e-01,
],
[
2.75151884e-02,
1.00000000e01,
3.70385458e-01,
4.64940661e-01,
8.54039200e-01,
8.52817813e-01,
7.94402325e-01,
6.53028010e-01,
],
[
5.10153186e-01,
3.70385458e-01,
1.00000000e01,
2.60486124e-01,
3.48005747e-01,
6.15110612e-01,
5.72261139e-01,
7.70540649e-02,
],
[
5.40001829e-01,
4.64940661e-01,
2.60486124e-01,
1.00000000e01,
8.21307813e-02,
1.02830593e00,
9.87082757e-01,
3.42180198e-01,
],
[
9.61868543e-01,
8.54039200e-01,
3.48005747e-01,
8.21307813e-02,
1.00000000e01,
1.21791690e00,
1.18854249e00,
3.38505599e-01,
],
[
1.18523604e00,
8.52817813e-01,
6.15110612e-01,
1.02830593e00,
1.21791690e00,
1.00000000e01,
1.78792001e-03,
1.06951995e00,
],
[
1.11693144e00,
7.94402325e-01,
5.72261139e-01,
9.87082757e-01,
1.18854249e00,
1.78792001e-03,
1.00000000e01,
1.01646822e00,
],
[
7.64581026e-01,
6.53028010e-01,
7.70540649e-02,
3.42180198e-01,
3.38505599e-01,
1.06951995e00,
1.01646822e00,
1.00000000e01,
],
]
)
dmat_fn = pl.Path(sys.path[0], "tmp_dmat.npy")
np.save(dmat_fn, dmat)
# file name dict to be passed to order.rank()
fnames = {"cand": str(cand_fn), "dmat": str(dmat_fn)}
# Make the actual call
fname_ranked = order.rank(fnames)
# Ranked results as a dataframe
ret_ranked_df = df_utils.load(fname_ranked)
# Expected ranked results as dataframe
ranked_df = pd.DataFrame(
{
"w": [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.175, 0.175],
"G": [2700, 2500, 1500, 1500, 2000, 2000, 2500, 2500],
"lldg": [0.15, 0.15, 0.15, 0.15, 0.25, 0.3, 0.3, 0.25],
"L": [10039, 9060, 3454, 3100, 7519, 8529, 6185, 7358],
}
)
test_results = ret_ranked_df.equals(ranked_df)
# Clean up tmp files
cand_fn.unlink()
dmat_fn.unlink()
pl.Path(fname_ranked).unlink()
assert test_results | [
9,
1499
] |
def METHOD_NAME(tmpdir, gdf):
# Create a geodataset and an ogr compliant version of it
ds = GeoDataset.from_gdf(gdf)
oc = ds.vector.ogr_compliant()
# Assert some ogr compliant stuff
assert oc.ogr_layer_type.upper() == "MULTIPOLYGON"
assert list(oc.dims)[0] == "index"
assert len(oc.Roman) == 2
# Write and load
fn = str(tmpdir.join("dummy_ogr.nc"))
ds.vector.to_netcdf(fn, ogr_compliant=True)
ds1 = GeoDataset.from_netcdf(fn)
assert np.all(ds.vector.geometry == ds1.vector.geometry) | [
9,
6584
] |
def METHOD_NAME(self, request, ex_confs):
ex_confs.sort(key=lambda ex_conf: ex_conf.start_date)
# For each round check separately that it is entirely exclusive
for round in request.contest.round_set.all():
round_not_excl_dates = []
round_excl_end_date = round.start_date
for ex_conf in ex_confs:
# Check if there's a gap before the next excl config
if ex_conf.start_date > round_excl_end_date:
round_not_excl_dates.append(
(round_excl_end_date, ex_conf.start_date)
)
round_excl_end_date = ex_conf.start_date
# Update how much of the round is covered by the next config
if ex_conf.end_date:
round_excl_end_date = max(round_excl_end_date, ex_conf.end_date)
else:
break
# Check if the round was covered entirely
if round.end_date and round_excl_end_date >= round.end_date:
break
else:
round_not_excl_dates.append((round_excl_end_date, round.end_date))
if round_not_excl_dates:
# Default to first date if there are no future dates
first_future_date = round_not_excl_dates[0]
for date in round_not_excl_dates:
if not date[1] or date[1] >= timezone.now():
first_future_date = date
break
if not first_future_date[1]:
msg = _(
"Exclusiveness configs usually cover entire rounds,"
" but currently round \"%s\" is not exclusive from"
" %s! Please verify that your exclusiveness"
" configs are correct."
) % (round.name, first_future_date[0])
else:
msg = _(
"Exclusiveness configs usually cover entire rounds,"
" but currently round \"%s\" is not exclusive from"
" %s to %s! Please verify that your exclusiveness"
" configs are correct."
) % (round.name, first_future_date[0], first_future_date[1])
messages.warning(request, msg) | [
883,
69,
130,
5238,
10346
] |
def METHOD_NAME(self, *args, **kwargs):
"""Report an event."""
if callable(self.event_handler):
return self.event_handler(*args, **kwargs) | [
276,
417
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.