text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
if len(processes) >= max_parallel:
return False
if args.experiments_per_gpu > 0:
least_busy_gpu, gpu_available_processes = find_least_busy_gpu()
if gpu_available_processes <= 0:
return False
return True | [
1046,
3822,
3823,
356
] |
async def METHOD_NAME():
gid = relay.GlobalID(type_name="FruitAsync", node_id="1")
fruit = await gid.resolve_node(fake_info)
assert_type(fruit, Optional[relay.Node])
assert isinstance(fruit, FruitAsync)
assert fruit.id == 1
assert fruit.name == "Banana" | [
9,
285,
147,
1014,
1716
] |
def METHOD_NAME(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
self.root_render_func(ctx) # type: ignore
)
except Exception:
return self.environment.handle_exception() | [
338
] |
def METHOD_NAME(self) -> None:
if self.code_hashes() != self.header.hashes:
raise util.FirmwareIntegrityError("Invalid firmware data.") | [
187,
544,
2012
] |
def METHOD_NAME(self):
config = {
"KEY": 15, # priority=addon
}
settings_dict = {
"ADDONS": {get_addon_cls(config): 1},
}
crawler = get_crawler(settings_dict=settings_dict)
self.assertEqual(crawler.settings.getint("KEY"), 15)
settings = Settings(settings_dict)
settings.set("KEY", 0, priority="default")
runner = CrawlerRunner(settings)
crawler = runner.create_crawler(Spider)
self.assertEqual(crawler.settings.getint("KEY"), 15)
settings_dict = {
"KEY": 20, # priority=project
"ADDONS": {get_addon_cls(config): 1},
}
settings = Settings(settings_dict)
settings.set("KEY", 0, priority="default")
runner = CrawlerRunner(settings)
crawler = runner.create_crawler(Spider)
self.assertEqual(crawler.settings.getint("KEY"), 20) | [
9,
817,
2654
] |
def METHOD_NAME(binary_filepath):
"""
This function uses objdump to get direct dependencies of a given binary.
Totally safe, but will require manual check over libraries which are not
found on the system.
"""
objdump_command = ("objdump", "-p", str(binary_filepath))
objdump_output = subprocess.check_output(objdump_command,
stderr=subprocess.STDOUT)
lines = objdump_output.decode().split("\n")
libraries = []
for line in lines:
line = line.strip()
if not line:
continue
if not line.startswith("NEEDED"):
continue
lib_name = line[6:].strip()
libraries.append(lib_name)
return libraries | [
19,
2002,
2948,
9520
] |
def METHOD_NAME(self, key, value, instance=0):
if key not in self.data:
self.data[key] = [value]
self.keys.append(key)
elif instance >= len(self.data[key]):
extra = instance + 1 - len(self.data[key])
for i in range(len(self.data[key]), instance-1):
self.data[key].append(None)
self.data[key].append(value)
else:
self.data[key].insert(instance, value)
self.changed = True | [
0
] |
def METHOD_NAME(
self,
first: Optional[str],
last: Optional[str],
title: Optional[str],
chat_type: str,
result: str,
):
chat = Chat(id=42, first_name=first, last_name=last, title=title, type=chat_type)
assert chat.full_name == result | [
9,
324,
156
] |
def METHOD_NAME(name, restore=None, interval=0.5):
"""
Instantiate (optionally from a checkpoint if restore is set to the
checkpoitn name) the system and run for interval seconds of
simulated time. At the end of the simulation interval, create a
checkpoint and exit.
As this function is intended to run in its own process using the
multiprocessing framework, the exit is a true call to exit which
terminates the process. Exit codes are used to pass information to
the parent.
"""
if restore is not None:
m5.instantiate(restore)
else:
m5.instantiate()
e = m5.simulate(m5.ticks.fromSeconds(interval))
cause = e.getCause()
if cause in _exit_limit:
m5.checkpoint(name)
sys.exit(_exitcode_checkpoint)
elif cause in _exit_normal:
sys.exit(_exitcode_done)
else:
print(f"Test failed: Unknown exit cause: {cause}")
sys.exit(_exitcode_fail) | [
22,
367
] |
def METHOD_NAME(self):
response = self.client.post(
path=self.url(),
data=json.dumps({"override": False, "template_data": "xxx"}),
content_type="application/json",
)
data = json.loads(response.content)
self.assertFalse(data["result"])
self.assertTrue("message" in data) | [
9,
512,
67,
671,
512,
1914,
168
] |
async def METHOD_NAME(host, n_results=100):
results = []
while len(results) < n_results:
batch_results = await asyncio.gather(*[get_prediction(host) for _ in range(20)])
for result in batch_results:
if result:
results.append(result)
data = pd.DataFrame(results).groupby("fn_to_hit").agg({"mean"})
data.columns = data.columns.get_level_values(0)
data = data.reset_index()
data = {"fn_to_hit": data["fn_to_hit"].to_list(), "duration": data["duration"].to_list()}
return data | [
57
] |
def METHOD_NAME(self):
"""Test a successful CRC check
Checks that the PDU ends in the ok port of CRC check
"""
self.common_test_crc_check(matching_crc=True)
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out, self.data)
self.assertEqual(self.dbg_fail.num_messages(), 0) | [
9,
2377,
250
] |
def METHOD_NAME(self):
entries = pwd.getpwall()
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assertIsInstance(e.pw_name, str)
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, str)
self.assertEqual(e[2], e.pw_uid)
self.assertIsInstance(e.pw_uid, int)
self.assertEqual(e[3], e.pw_gid)
self.assertIsInstance(e.pw_gid, int)
self.assertEqual(e[4], e.pw_gecos)
self.assertIn(type(e.pw_gecos), (str, type(None)))
self.assertEqual(e[5], e.pw_dir)
self.assertIsInstance(e.pw_dir, str)
self.assertEqual(e[6], e.pw_shell)
self.assertIsInstance(e.pw_shell, str)
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards (done in test_values_extended) | [
9,
199
] |
def METHOD_NAME(self):
"""Create Sphinx app."""
# Avoid warnings about re-registration, see:
# https://github.com/sphinx-doc/sphinx/issues/5038
with self.create_sphinx_app_context() as app:
pass
return app | [
129,
9637,
991
] |
def METHOD_NAME():
"""Get current test case from the module"""
return message_tests.pop(0) | [
19,
1056,
9,
331
] |
def METHOD_NAME(*, string: Optional[str] = "", int: Optional[int] = 0, bool: Optional[bool] = False) -> None:
pass | [
41,
6675,
665,
1618
] |
def METHOD_NAME(self):
s = self.title
if self.body:
s += f"\n{self.body}"
if self.data:
s += f"\nData: {self.data}"
return s | [
24,
526
] |
def METHOD_NAME(self) -> "ObjectPath":
return _ffi_node_api.ObjectPathGetParent(self) | [
935
] |
def METHOD_NAME(self, simple_client, monkeypatch):
call_counter = 0
def mock_call_json(*args, **kwargs):
nonlocal call_counter
call_counter += 1
if call_counter == 3:
raise TencentCloudSDKException()
raise socket.error()
monkeypatch.setattr(AbstractClient, "call_json", mock_call_json)
with pytest.raises(TencentCloudSDKException):
simple_client.execute_query("test", {})
assert call_counter == 3 | [
9,
2052,
442
] |
METHOD_NAME(self): | [
9,
19,
526
] |
def METHOD_NAME():
"""
Get a single salt process environment variable.
"""
assert not environ.get(True)
assert environ.get("key") == "" | [
9,
19
] |
def METHOD_NAME(field_schema, field):
for validator in field.validators:
try:
validator.modify_schema(field_schema)
except AttributeError:
pass | [
231,
7085,
11232
] |
def METHOD_NAME(self, x):
"""Probability density function."""
d = self.loc[x.index, x.columns]
pdf_arr = np.exp(-0.5 * ((x.values - d.mu) / d.sigma) ** 2)
pdf_arr = pdf_arr / (d.sigma * np.sqrt(2 * np.pi))
return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns) | [
4555
] |
def METHOD_NAME(self):
path = self._get_path("organization", "get_organization_by_id")
data: Dict[str, Any] = {"args": [], "meta": {}}
response = self._send_post_request(path, data)
assert response.status_code == 400
assert response.data == {
"detail": ErrorDetail(string="Malformed request.", code="parse_error")
} | [
9,
532,
335,
1207
] |
def METHOD_NAME(self):
geom = self.spark.sql(
"""select st_geomFromWKT('LINESTRING (30 10, 10 30, 40 40)')"""
).collect()[0][0]
assert type(geom) == LineString
assert geom.wkt == LineString([(30, 10), (10, 30), (40, 40)]).wkt | [
9,
3145,
13057
] |
METHOD_NAME(self): | [
9,
86,
1589,
2206,
104
] |
def METHOD_NAME(self, notify_exception, add_breadcrumb):
res = self.client.get('/slow_class')
self.assertEqual(res.status_code, 200)
notify_exception.assert_called_once()
add_breadcrumb.assert_not_called() | [
9,
3174,
3378,
3551,
2,
1179
] |
def METHOD_NAME(self):
self.sandbox.fake_execute_data(
True, b"o", "你好".encode("utf-8"), 0.1, 0.5, 1000, "OK")
stats = generic_step(self.sandbox, ONE_COMMAND, "name",
collect_output=False)
# No output collected on stats.
self.assertEqual(
stats, get_stats(0.1, 0.5, 1000 * 1024, Sandbox.EXIT_OK))
# Generic step always redirects stdout and stderr.
self.assertEqual(self.sandbox.stdout_file, "name_stdout_0.txt")
self.assertEqual(self.sandbox.stderr_file, "name_stderr_0.txt") | [
9,
97,
462,
1434,
654,
1444,
146
] |
def METHOD_NAME():
# Test init with no args
F = FrameBuffer()
glir_cmds = F._glir.clear()
assert len(glir_cmds) == 1
glir_cmds[0][0] == 'CREATE'
# Activate / deactivate
F.activate()
glir_cmd = F._glir.clear()[-1]
assert glir_cmd[0] == 'FRAMEBUFFER'
assert glir_cmd[2] is True
#
F.deactivate()
glir_cmd = F._glir.clear()[-1]
assert glir_cmd[0] == 'FRAMEBUFFER'
assert glir_cmd[2] is False
#
with F:
pass
glir_cmds = F._glir.clear()
assert len(glir_cmds) == 2
assert glir_cmds[0][0] == 'FRAMEBUFFER'
assert glir_cmds[1][0] == 'FRAMEBUFFER'
assert glir_cmds[0][2] is True and glir_cmds[1][2] is False
# Init with args
R = RenderBuffer((3, 3))
F = FrameBuffer(R)
assert F.color_buffer is R
#
R2 = RenderBuffer((3, 3))
F.color_buffer = R2
assert F.color_buffer is R2
# Wrong buffers
F = FrameBuffer()
assert_raises(TypeError, FrameBuffer.color_buffer.fset, F, 'FOO')
assert_raises(TypeError, FrameBuffer.color_buffer.fset, F, [])
assert_raises(TypeError, FrameBuffer.depth_buffer.fset, F, 'FOO')
assert_raises(TypeError, FrameBuffer.stencil_buffer.fset, F, 'FOO')
color_buffer = RenderBuffer((9, 9), 'color')
assert_raises(ValueError, FrameBuffer.depth_buffer.fset, F, color_buffer)
# But None is allowed!
F.color_buffer = None
# Shape
R1 = RenderBuffer((3, 3))
R2 = RenderBuffer((3, 3))
R3 = RenderBuffer((3, 3))
F = FrameBuffer(R1, R2, R3)
assert F.shape == R1.shape
assert R1.format == 'color'
assert R2.format == 'depth'
assert R3.format == 'stencil'
# Resize
F.resize((10, 10))
assert F.shape == (10, 10)
assert F.shape == R1.shape
assert F.shape == R2.shape
assert F.shape == R3.shape
assert R1.format == 'color'
assert R2.format == 'depth'
assert R3.format == 'stencil'
# Shape from any buffer
F.color_buffer = None
assert F.shape == (10, 10)
F.depth_buffer = None
assert F.shape == (10, 10)
F.stencil_buffer = None
assert_raises(RuntimeError, FrameBuffer.shape.fget, F)
# Also with Texture luminance
T = gloo.Texture2D((20, 30))
R = RenderBuffer(T.shape)
assert T.format == 'luminance'
F = FrameBuffer(T, R)
assert F.shape == T.shape[:2]
assert F.shape == R.shape
assert T.format == 'luminance'
assert R.format == 'depth'
# Resize
F.resize((10, 10))
assert F.shape == (10, 10)
assert T.shape == (10, 10, 1)
assert F.shape == R.shape
assert T.format == 'luminance'
assert R.format == 'depth'
# Also with Texture RGB
T = gloo.Texture2D((20, 30, 3))
R = RenderBuffer(T.shape)
assert T.format == 'rgb'
F = FrameBuffer(T, R)
assert F.shape == T.shape[:2]
assert F.shape == R.shape
assert T.format == 'rgb'
assert R.format == 'depth'
# Resize
F.resize((10, 10))
assert F.shape == (10, 10)
assert T.shape == (10, 10, 3)
assert F.shape == R.shape
assert T.format == 'rgb'
assert R.format == 'depth'
# Also with Texture for depth
T1 = gloo.Texture2D((20, 30, 3))
T2 = gloo.Texture2D((20, 30, 1))
assert T1.format == 'rgb'
assert T2.format == 'luminance'
F = FrameBuffer(T1, T2)
assert F.shape == T1.shape[:2]
assert F.shape == T2.shape[:2]
assert T1.format == 'rgb'
assert T2.format == 'luminance'
# Resize
F.resize((10, 10))
assert F.shape == (10, 10)
assert T1.shape == (10, 10, 3)
assert T2.shape == (10, 10, 1)
assert T1.format == 'rgb'
assert T2.format == 'luminance'
# Wrong shape in resize
assert_raises(ValueError, F. resize, (9, 9, 1))
assert_raises(ValueError, F. resize, (9,))
assert_raises(ValueError, F. resize, 'FOO') | [
9,
10913
] |
def METHOD_NAME(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**t)) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t | [
13740,
86,
2028
] |
def METHOD_NAME(
self,
entry_type: Type["ManifestEntryT"],
exclude: Optional[POFiltersType] = None,
include: Optional[POFiltersType] = None,
) -> Generator["ManifestEntryT", None, None]:
for entries in self.graph.objects(self.identifier, MF.entries):
for entry_iri in self.graph.items(entries):
assert isinstance(entry_iri, URIRef)
entry = entry_type(self, entry_iri)
if exclude is not None and entry.check_filters(exclude):
continue
if include is not None and not entry.check_filters(include):
continue
yield entry | [
-1
] |
def METHOD_NAME(self, instrument_code: str, are_you_sure: bool = False):
self.log.label(instrument_code=instrument_code)
if are_you_sure:
if self.is_code_in_data(instrument_code):
self._delete_instrument_data_without_any_warning_be_careful(
instrument_code
)
self.log.info("Deleted instrument object %s" % instrument_code)
else:
# doesn't exist anyway
self.log.warning("Tried to delete non existent instrument")
else:
self.log.error(
"You need to call delete_instrument_data with a flag to be sure"
) | [
34,
2933,
365
] |
def METHOD_NAME(levels=1, final=dict):
return (defaultdict(final) if levels < 2 else
defaultdict(lambda: METHOD_NAME(levels - 1, final))) | [
-1
] |
def METHOD_NAME(val, mapping=None):
return ','.join([escape_item(x, mapping) for x in val]) | [
4748,
0
] |
def METHOD_NAME():
grid_east = [-2., -1., 0., 1., 2.]
grid_north = [-1.5, 0., 1.5]
# Center of the survey site, puts the grid into reference
center_point = 20., 150.
test = conv._get_gdal_origin(grid_east, 1., center_point[1],
grid_north, 1.5, center_point[0])
# Origin should be upper-left, so western-most point shifted 1/2
# cell west and northern-most point shifted 1/2 cell north.
expected = [147.5, 22.25]
np.testing.assert_array_equal(test, expected) | [
9,
3908,
1788
] |
def METHOD_NAME(url, test_data_subdir='xml_input'):
filename = url.split('/')[-1]
path = TEST_DATA / test_data_subdir / filename
return MockResponse(path.read_bytes()) | [
1413,
9,
5507,
365
] |
def METHOD_NAME(args):
idim = 10
odim = 5
model = E2E(idim, odim, args)
batchsize = 2
ilens = [10, 9]
olens = [3, 4]
n_token = odim - 1
x = torch.randn(batchsize, max(ilens), idim)
y_src = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
y_tgt = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
for i in range(batchsize):
x[i, ilens[i] :] = -1
y_tgt[i, olens[i] :] = model.ignore_id
y_src[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, x, torch.tensor(ilens), y_tgt, y_src, data, uttid_list | [
123
] |
def METHOD_NAME(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
groups,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single depthwise_conv2d_nchwc operator."""
ishape = data_shape
wshape = (data_shape[1], 1, *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
groups = groups
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
) | [
9,
4611,
3385,
8645,
9060
] |
def METHOD_NAME(vit_features, features, use_readout, start_index=1):
if use_readout == "ignore":
readout_oper = [Slice(start_index)] * len(features)
elif use_readout == "add":
readout_oper = [AddReadout(start_index)] * len(features)
elif use_readout == "project":
readout_oper = [
ProjectReadout(vit_features, start_index) for out_feat in features
]
else:
assert (
False
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
return readout_oper | [
19,
7484,
10453
] |
def METHOD_NAME(self):
for i, attr in enumerate(self.attrs):
setattr(self, f"attr_{i}", attr) | [
176,
1685
] |
def METHOD_NAME(self):
worker = FakeWorker('worker1')
yield worker.setServiceParent(self.workers)
worker.parent = self.master
worker.manager = self.workers
worker.botmaster = self.master.botmaster
worker_new = FakeWorker('worker1')
self.new_config.workers = [worker_new]
yield self.workers.reconfigServiceWithBuildbotConfig(self.new_config)
# worker was not replaced..
self.assertIdentical(self.workers.workers['worker1'], worker) | [
9,
10159,
549,
5930,
10159
] |
def METHOD_NAME(self, arch, filename, segments):
binary_path = os.path.join(TESTS_BASE, arch, filename)
ld = cle.Loader(binary_path, auto_load_libs=False)
self.assertEqual(len(ld.main_object.segments), len(segments))
for i, segment in enumerate(ld.main_object.segments):
self.assertEqual(segment.offset, segments[i].offset)
self.assertEqual(segment.vaddr, segments[i].vaddr)
self.assertEqual(segment.memsize, segments[i].memsize)
self.assertEqual(segment.filesize, segments[i].filesize)
# address lookups
self.assertIsNone(ld.main_object.segments.find_region_containing(-1))
# skip all segments that are not mapped into memory
mapped_segments = [segment for segment in segments if segment.vaddr != 0]
for segment in mapped_segments:
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr).vaddr,
segment.vaddr,
)
if segment.memsize > 0:
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr + 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr + 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr + segment.memsize - 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr + segment.memsize - 1).vaddr,
segment.vaddr,
)
for i in range(len(mapped_segments) - 1):
seg_a, seg_b = mapped_segments[i], mapped_segments[i + 1]
if seg_a.vaddr + seg_a.memsize < seg_b.vaddr:
# there is a gap between seg_a and seg_b
for j in range(min(seg_b.vaddr - (seg_a.vaddr + seg_a.memsize), 20)):
a = seg_a.vaddr + seg_a.memsize + j
self.assertIsNone(ld.main_object.find_segment_containing(a))
self.assertIsNone(ld.main_object.segments.find_region_containing(a))
self.assertIsNone(ld.main_object.find_segment_containing(0xFFFFFFFF), None) | [
22,
1690
] |
def METHOD_NAME(client, sample_service):
data = {"notification_type": "sms", "days_of_retention": 3}
response = client.post(
"/service/{}/data-retention".format(str(sample_service.id)),
headers=[("Content-Type", "application/json"), create_authorization_header()],
data=json.dumps(data),
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))["result"]
results = ServiceDataRetention.query.all()
assert len(results) == 1
data_retention = results[0]
assert json_resp == data_retention.serialize() | [
9,
129,
549,
365,
3786
] |
def METHOD_NAME(sim=False):
ops_multisig_old = GreatApeSafe(r.badger_wallets.ops_multisig_old)
reward_logger = ops_multisig_old.contract(r.rewardsLogger)
reward_logger.grantRole(DEFAULT_ADMIN_ROLE, dev_msig)
if not sim:
ops_multisig_old.post_safe_tx(call_trace=True) | [
829,
2363,
-1
] |
def METHOD_NAME(n):
conds = []
for i in range(1, n + 1):
region = [{"X": (0, i / n), "Y": (0, (n + 1 - i) / n)}]
subst = {str(i): str(n + 1 - i)}
conds.append((region, subst))
overlaps = overlayFeatureVariations(conds)
assert len(overlaps) == n * (n + 1) // 2, overlaps
return conds, overlaps | [
9,
3380
] |
def METHOD_NAME(self, METHOD_NAME):
METHOD_NAME.parsed_params = {
"resource_link_id": sentinel.resource_link_id,
"lms": {"tool_consumer_instance_guid": sentinel.guid},
"context_id": sentinel.context_id,
"gradingStudentId": sentinel.grading_student_id,
"group_info": sentinel.group_info,
}
METHOD_NAME.user = sentinel.user
return METHOD_NAME | [
5533,
377
] |
def METHOD_NAME(self):
file_name = 'Test_QWG_test_dio_parameters.scpi.txt'
test_path = Path('test_output') / file_name
os.makedirs('test_output', exist_ok=True)
transport = FileTransport(str(test_path))
qwg = QWG('qwg_dio_parameters', transport) # FIXME: names must be unique unless we properly tell QCoDes to remove
qwg.init()
qwg.dio_mode('MASTER')
# dio_is_calibrated
qwg.dio_active_index(0)
transport.close() # to allow access to file
qwg.close() # release QCoDeS instrument | [
9,
12489,
386
] |
def METHOD_NAME(folder, index):
if folder == "Audio":
return f"quesst14_{index:05d}.wav"
elif folder == "dev_queries":
return f"quesst14_dev_{index:04d}.wav"
elif folder == "eval_queries":
return f"quesst14_eval_{index:04d}.wav"
return | [
19,
1147
] |
def METHOD_NAME(self, src_tokens, src_lengths):
encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device))
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"]
return {
"encoder_out": [output],
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
"encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
} | [
76
] |
async def METHOD_NAME(client):
response = await client.get("wallet", follow_redirects=False)
assert response.status_code == 307, f"{response.url} {response.status_code}"
# determine the next redirect location
request = client.build_request("GET", "wallet")
i = 0
while request is not None:
response = await client.send(request)
request = response.next_request
if i == 0:
# first redirect
assert response.status_code == 307, f"{response.url} {response.status_code}"
elif i == 1:
# then get the actual page
assert response.status_code == 200, f"{response.url} {response.status_code}"
i += 1 | [
9,
19,
2945,
654,
1736
] |
def METHOD_NAME(
workspace_file: StrPath,
workspace_dir: t.Optional[StrPath] = None,
) -> str:
"""
Return the real config path or raise an exception.
If workspace file is directory, scan for .tmuxp.{yaml,yml,json} in directory. If
one or more found, it will warn and pick the first.
If workspace file is ".", "./" or None, it will scan current directory.
If workspace file is has no path and only a filename, e.g. "my_workspace.yaml" it
will search workspace dir.
If workspace file has no path and no extension, e.g. "my_workspace", it will scan
for file name with yaml, yml and json. If multiple exist, it will warn and pick the
first.
Parameters
----------
workspace_file : str
workspace file, valid examples:
- a file name, my_workspace.yaml
- relative path, ../my_workspace.yaml or ../project
- a period, .
"""
if not workspace_dir:
workspace_dir = get_workspace_dir()
path = os.path
exists, join, isabs = path.exists, path.join, path.isabs
dirname, normpath, splitext = path.dirname, path.normpath, path.splitext
cwd = os.getcwd()
is_name = False
file_error = None
workspace_file = os.path.expanduser(workspace_file)
# if purename, resolve to confg dir
if is_pure_name(workspace_file):
is_name = True
elif (
not isabs(workspace_file)
or len(dirname(workspace_file)) > 1
or workspace_file == "."
or workspace_file == ""
or workspace_file == "./"
): # if relative, fill in full path
workspace_file = normpath(join(cwd, workspace_file))
# no extension, scan
if path.isdir(workspace_file) or not splitext(workspace_file)[1]:
if is_name:
candidates = [
x
for x in [
f"{join(workspace_dir, workspace_file)}{ext}"
for ext in VALID_WORKSPACE_DIR_FILE_EXTENSIONS
]
if exists(x)
]
if not len(candidates):
file_error = (
"workspace-file not found in workspace dir (yaml/yml/json) %s "
"for name" % (workspace_dir)
)
else:
candidates = [
x
for x in [
join(workspace_file, ext)
for ext in [".tmuxp.yaml", ".tmuxp.yml", ".tmuxp.json"]
]
if exists(x)
]
if len(candidates) > 1:
tmuxp_echo(
Fore.RED
+ "Multiple .tmuxp.{yml,yaml,json} workspace_files in %s"
% dirname(workspace_file)
+ Fore.RESET
)
tmuxp_echo(
"This is undefined behavior, use only one. "
"Use file names e.g. myproject.json, coolproject.yaml. "
"You can load them by filename."
)
elif not len(candidates):
file_error = "No tmuxp files found in directory"
if len(candidates):
workspace_file = candidates[0]
elif not exists(workspace_file):
file_error = "file not found"
if file_error:
raise FileNotFoundError(file_error, workspace_file)
return workspace_file | [
416,
1976,
171
] |
def METHOD_NAME(self, request: Request, team: Team) -> Response:
"""
Returns a dict of team projects, and a time-series dict of issue stat breakdowns for each.
If a list of statuses is passed then we return the count of each status and the totals.
Otherwise we the count of reviewed issues and the total count of issues.
"""
if not features.has("organizations:team-insights", team.organization, actor=request.user):
return Response({"detail": "You do not have the insights feature enabled"}, status=400)
start, end = get_date_range_from_params(request.GET)
end = end.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
start = start.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
environments = [e.id for e in get_environments(request, team.organization)]
if "statuses" in request.GET:
statuses = [
STRING_TO_STATUS_LOOKUP[status] for status in request.GET.getlist("statuses")
]
new_format = True
else:
statuses = [GroupHistoryStatus.UNRESOLVED] + ACTIONED_STATUSES
new_format = False
new_issues = []
base_day_format = {"total": 0}
if new_format:
for status in statuses:
base_day_format[STATUS_TO_STRING_LOOKUP[status]] = 0
else:
base_day_format["reviewed"] = 0
if GroupHistoryStatus.NEW in statuses:
group_environment_filter = (
Q(groupenvironment__environment_id=environments[0]) if environments else Q()
)
statuses.remove(GroupHistoryStatus.NEW)
new_issues = list(
Group.objects.filter_to_team(team)
.filter(group_environment_filter, first_seen__gte=start, first_seen__lte=end)
.annotate(bucket=TruncDay("first_seen"))
.order_by("bucket")
.values("project", "bucket")
.annotate(
count=Count("id"),
status=Value(GroupHistoryStatus.NEW, output_field=IntegerField()),
)
)
group_history_environment_filter = (
Q(group__groupenvironment__environment_id=environments[0]) if environments else Q()
)
bucketed_issues = (
GroupHistory.objects.filter_to_team(team)
.filter(
group_history_environment_filter,
status__in=statuses,
date_added__gte=start,
date_added__lte=end,
)
.annotate(bucket=TruncDay("date_added"))
.order_by("bucket")
.values("project", "bucket", "status")
.annotate(count=Count("id"))
)
current_day, date_series_dict = start, {}
while current_day < end:
date_series_dict[current_day.isoformat()] = copy.deepcopy(base_day_format)
current_day += timedelta(days=1)
project_list = Project.objects.get_for_team_ids(team_ids=[team.id])
agg_project_counts = {
project.id: copy.deepcopy(date_series_dict) for project in project_list
}
for r in chain(bucketed_issues, new_issues):
bucket = agg_project_counts[r["project"]][r["bucket"].isoformat()]
bucket["total"] += r["count"]
if not new_format and r["status"] != GroupHistoryStatus.UNRESOLVED:
bucket["reviewed"] += r["count"]
if new_format:
bucket[STATUS_TO_STRING_LOOKUP[r["status"]]] += r["count"]
return Response(agg_project_counts) | [
19
] |
def METHOD_NAME():
"""Testing convert_to basic call works."""
scitype = SCITYPES[0]
from_fixt = get_examples(mtype=MTYPES_SERIES[1], as_scitype=scitype).get(0)
# expectation is that the conversion is to mtype MTYPES_SERIES[0]
exp_fixt = get_examples(mtype=MTYPES_SERIES[0], as_scitype=scitype).get(0)
# carry out the conversion using convert_to
converted = convert_to(from_fixt, to_type=MTYPES_SERIES[0], as_scitype=scitype)
# compare expected output with actual output of convert_to
msg = "convert_to basic call does not seem to work."
assert deep_equals(converted, exp_fixt), msg | [
9,
197,
24,
53
] |
def METHOD_NAME(params: Dict[str, Any], search_space: Dict[str, BaseDistribution]) -> bool:
for param_name in params.keys():
param, param_distribution = params[param_name], search_space[param_name]
if not param_distribution._contains(param_distribution.to_internal_repr(param)):
return False
return True | [
137,
8113
] |
def METHOD_NAME(self):
"""Tests for register_view, the view to register items."""
self.login()
# Load the page.
for item_type in ["computer", "calculator", "phone"]:
response = self.client.get(reverse("itemreg_register", kwargs={"item_type": item_type}))
self.assertEqual(200, response.status_code)
# Register a phone.
response = self.client.post(
reverse("itemreg_register", kwargs={"item_type": "phone"}),
data={"manufacturer": "other", "imei": "123456789", "model": "test", "description": "haha"},
follow=True,
)
self.assertEqual(200, response.status_code)
self.assertEqual(1, PhoneRegistration.objects.filter(imei="123456789").count())
# Register a computer.
response = self.client.post(
reverse("itemreg_register", kwargs={"item_type": "computer"}),
data={"manufacturer": "other", "serial": "1234567890", "model": "test", "description": "haha", "screen_size": 14},
follow=True,
)
self.assertEqual(200, response.status_code)
self.assertEqual(1, ComputerRegistration.objects.filter(serial="1234567890").count())
# Register a calculator.
response = self.client.post(
reverse("itemreg_register", kwargs={"item_type": "calculator"}),
data={
"calc_type": "ti84p",
"calc_serial": "987654321",
"calc_id": "test",
},
follow=True,
)
self.assertEqual(200, response.status_code)
self.assertEqual(1, CalculatorRegistration.objects.filter(calc_serial="987654321").count()) | [
9,
372,
1179
] |
def METHOD_NAME(op):
"""Checks CMSIS-NN partitioning when layout is not NHWC"""
model = make_model(pool_op=op, layout="NCHW")
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod) | [
9,
532,
571
] |
def METHOD_NAME(p: pathlib.Path) -> types.ModuleType:
"""
Load a python module from a path.
:param pathlib.Path p: Path to ``<module>.py``
:rtype: Module
"""
import importlib.util
import importlib.abc
if not p.is_file():
raise FileNotFoundError(f"The module {str(p)!r} does not exist")
default_sys_path = sys.path
try:
module_spec = importlib.util.spec_from_file_location(
name=p.stem, location=str(p)
)
module = importlib.util.module_from_spec(module_spec)
if not isinstance(module_spec.loader, importlib.abc.Loader):
raise TypeError(f"Invalid module spec {module_spec!r}")
sys.path = default_sys_path + [str(p.parent)]
module_spec.loader.exec_module(module)
finally:
sys.path = default_sys_path
return module | [
557,
298
] |
def METHOD_NAME(self):
settings.PYTEST_FORCE_CHAHUB = False | [
531,
481
] |
def METHOD_NAME(signal, corrected=False):
"""**Fractal dimension via Normalized Length Density (NLDFD)**
NLDFD is a very simple index corresponding to the average absolute consecutive
differences of the (standardized) signal (``np.mean(np.abs(np.diff(std_signal)))``).
This method was developed for measuring signal complexity of very short durations (< 30
samples), and can be used for instance when continuous signal FD changes (or "running" FD) are
of interest (by computing it on sliding windows, see example).
For methods such as Higuchi's FD, the standard deviation of the window FD increases sharply
when the epoch becomes shorter. The NLD method results in lower standard deviation especially
for shorter epochs, though at the expense of lower accuracy in average window FD.
See Also
--------
fractal_higuchi
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
corrected : bool
If ``True``, will rescale the output value according to the power model estimated by
Kalauzi et al. (2009) to make it more comparable with "true" FD range, as follows:
``FD = 1.9079*((NLD-0.097178)^0.18383)``. Note that this can result in ``np.nan`` if the
result of the difference is negative.
Returns
--------
fd : DataFrame
A dataframe containing the fractal dimension across epochs.
info : dict
A dictionary containing additional information (currently, but returned nonetheless for
consistency with other functions).
Examples
----------
**Example 1**: Usage on a short signal
.. ipython:: python
import neurokit2 as nk
# Simulate a short signal with duration of 0.5s
signal = nk.signal_simulate(duration=0.5, frequency=[3, 5])
# Compute Fractal Dimension
fd, _ = nk.fractal_nld(signal, corrected=False)
fd
**Example 2**: Compute FD-NLD on non-overlapping windows
.. ipython:: python
import numpy as np
# Simulate a long signal with duration of 5s
signal = nk.signal_simulate(duration=5, frequency=[3, 5, 10], noise=0.1)
# We want windows of size=100 (0.1s)
n_windows = len(signal) // 100 # How many windows
# Split signal into windows
windows = np.array_split(signal, n_windows)
# Compute FD-NLD on all windows
nld = [nk.fractal_nld(i, corrected=False)[0] for i in windows]
np.mean(nld) # Get average
**Example 3**: Calculate FD-NLD on sliding windows
.. ipython:: python
# Simulate a long signal with duration of 5s
signal = nk.signal_simulate(duration=5, frequency=[3, 5, 10], noise=0.1)
# Add period of noise
signal[1000:3000] = signal[1000:3000] + np.random.normal(0, 1, size=2000)
# Create function-wrapper that only return the NLD value
nld = lambda x: nk.fractal_nld(x, corrected=False)[0]
# Use them in a rolling window of 100 samples (0.1s)
rolling_nld = pd.Series(signal).rolling(100, min_periods = 100, center=True).apply(nld)
@savefig p_nld1.png scale=100%
nk.signal_plot([signal, rolling_nld], subplots=True, labels=["Signal", "FD-NLD"])
@suppress
plt.close()
References
----------
* Kalauzi, A., Bojić, T., & Rakić, L. (2009). Extracting complexity waveforms from
one-dimensional signals. Nonlinear biomedical physics, 3(1), 1-11.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Amplitude normalization
signal = standardize(signal)
# Calculate normalized length density
nld = np.nanmean(np.abs(np.diff(signal)))
if corrected:
# Power model optimal parameters based on analysis of EEG signals (from Kalauzi et al. 2009)
a = 1.9079
k = 0.18383
nld_diff = nld - 0.097178 # NLD - NLD0
if nld_diff < 0:
warn(
"Normalized Length Density of the signal may be too small, retuning `np.nan`.",
category=NeuroKitWarning,
)
nld = np.nan
else:
nld = a * (nld_diff ** k)
# Compute fd
return nld, {} | [
16144,
-1
] |
def METHOD_NAME(self, method, full_url, path, body, duration, status_code=None, response=None, exception=None):
""" Log an unsuccessful API call. """
# do not log 404s on HEAD requests
if method == 'HEAD' and status_code == 404:
return
logger.warning(
'%s %s [status:%s request:%.3fs]', method, full_url,
status_code or 'N/A', duration, exc_info=exception is not None
)
# body has already been serialized to utf-8, deserialize it for logging
# TODO: find a better way to avoid (de)encoding the body back and forth
if body:
try:
body = body.decode('utf-8', 'ignore')
except AttributeError:
pass
logger.debug('> %s', body)
self._log_trace(method, path, body, status_code, response, duration)
if response is not None:
logger.debug('< %s', response) | [
390,
377,
180
] |
def METHOD_NAME(self,event):
DC = wx.PaintDC(self)
#self.PrepareDC(DC)
s = self.GetVirtualSize()
MemBitmap = wx_compat.EmptyBitmap(s.GetWidth(), s.GetHeight())
#del DC
MemDC = wx.MemoryDC()
OldBitmap = MemDC.SelectObject(MemBitmap)
try:
#DC.BeginDrawing()
self.DoPaint(MemDC)
DC.Blit(0, 0, s.GetWidth(), s.GetHeight(), MemDC, 0, 0)
#DC.EndDrawing()
finally:
del MemDC
del MemBitmap | [
69,
5932
] |
def METHOD_NAME(self, **kwargs):
return {
"connection_user": kwargs.get("connection_user") or self.mysql_root_user,
"connection_pass": kwargs.get("connection_pass") or self.mysql_root_passwd,
"connection_db": kwargs.get("connection_db") or "mysql",
"connection_port": kwargs.get("connection_port") or self.mysql_port,
} | [
19,
3568
] |
def METHOD_NAME(self, batch, stage):
"Given an input batch it computes the phoneme probabilities."
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs = self.modules.augmentation(wavs, wav_lens)
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
out = self.modules.model(feats)
out = self.modules.output(out)
pout = self.hparams.log_softmax(out)
return pout, wav_lens | [
226,
76
] |
def METHOD_NAME(test_directory):
proj_dirs = []
if not os.path.isdir(test_directory):
err_exit("test directory does not exist")
for l in os.listdir(test_directory):
if "corpus-" in l and os.path.isdir(os.path.join(test_directory, l)):
proj_dirs.append(l)
for proj_dir in proj_dirs:
check_project_dir(os.path.join(test_directory, proj_dir)) | [
250,
9,
2851
] |
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationsList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem) | [
297,
365
] |
def METHOD_NAME(
asset_with_mock: dict[str, Any]
) -> None:
asset = Asset(**asset_with_mock, mock_is_real=True)
assert asset.mock_is_real
asset.no_mock()
assert not asset.mock_is_real
with pytest.raises(ValidationError):
asset.mock_is_real = True
asset.mock = mock()
asset.mock_is_real = True
assert asset.mock_is_real | [
9,
248,
2659,
130,
1866,
1887,
0
] |
def METHOD_NAME():
tape = get_working_tape()
tape.clear_tape()
mesh = UnitOctahedralSphereMesh(refinement_level=2)
x = SpatialCoordinate(mesh)
mesh.init_cell_orientations(x)
S = mesh.coordinates.function_space()
s = Function(S,name="deform")
mesh.coordinates.assign(mesh.coordinates + s)
f = x[0]*x[1]*x[2]
V = FunctionSpace(mesh, "CG", 1)
u, v = TrialFunction(V), TestFunction(V)
a = inner(grad(u), grad(v))*dx + u*v*dx
l = f*v*dx
u = Function(V)
solve(a==l, u, solver_parameters={'ksp_type':'preonly', 'pc_type':'lu',
"mat_type": "aij",
"pc_factor_mat_solver_type": "mumps"})
J = assemble(u*dx(domain=mesh))
c = Control(s)
Jhat = ReducedFunctional(J, c)
A = 1e-1
h = Function(S,name="V")
h.interpolate(as_vector((A*x[2], A*cos(x[1]), A*x[0])))
# Finite difference
r0 = taylor_test(Jhat, s, h, dJdm=0)
Jhat(s)
assert(r0>0.95)
r1 = taylor_test(Jhat, s, h)
Jhat(s)
assert(r1>1.95)
# First order taylor
s.block_variable.tlm_value = h
tape = get_working_tape()
tape.evaluate_tlm()
r1 = taylor_test(Jhat, s, h, dJdm=J.block_variable.tlm_value)
assert(r1>1.95)
Jhat(s)
# # Second order taylor
dJdm = Jhat.derivative().vector().inner(h.vector())
Hm = compute_hessian(J, c, h).vector().inner(h.vector())
r2 = taylor_test(Jhat, s, h, dJdm=dJdm, Hm=Hm)
assert(r2>2.95) | [
9,
2918,
1178,
3889,
7925
] |
def METHOD_NAME(self, env):
self.setup_build_environment(env) | [
102,
22,
1027
] |
def METHOD_NAME(vocab_size):
base = len(string.ascii_letters)
n = math.ceil(math.log(vocab_size, base))
vocab = []
for i in range(1, n + 1):
for item in itertools.product(string.ascii_letters, repeat=i):
if len(vocab) >= vocab_size:
break
vocab.append("".join(item))
return vocab | [
129,
8932
] |
def METHOD_NAME(self):
test_subject_name = "_iblrig_test_mouse"
self.local_dir = tempfile.TemporaryDirectory()
self.remote_dir = tempfile.TemporaryDirectory()
def create_local_session():
local_session_folder = (
Path(self.local_dir.name) / "Subjects" / test_subject_name / "1900-01-01" / "001"
)
local_session_folder.mkdir(parents=True)
return str(local_session_folder)
def create_remote_subject():
remote_subject_dir = Path(self.remote_dir.name) / "Subjects"
remote_subject_dir.mkdir(parents=True)
return str(remote_subject_dir)
def assert_values(previous_session_folders):
self.assertTrue(isinstance(previous_session_folders, list))
if previous_session_folders:
# returned list is not empty and should contain strings
for session_folder in previous_session_folders:
self.assertTrue(isinstance(session_folder, str))
# Test for an existing subject, local does exist and remote does exist
# Create local session and remote subject temp directories
test_local_session_folder = create_local_session()
test_remote_subject_folder = create_remote_subject()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does exist and remote does NOT exist
self.remote_dir.cleanup()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does NOT exist and remote does exist
self.local_dir.cleanup()
test_remote_subject_folder = create_remote_subject()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for an existing subject, local does NOT exist and remote does NOT exist
self.local_dir.cleanup()
self.remote_dir.cleanup()
# Call the function
test_previous_session_folders = path_helper.get_previous_session_folders(
test_subject_name,
test_local_session_folder,
remote_subject_folder=test_remote_subject_folder,
)
assert_values(test_previous_session_folders)
# Test for a new subject
test_new_subject_name = "_new_iblrig_test_mouse"
test_new_session_folder = (Path(self.local_dir.name) / "Subjects" / test_new_subject_name / "1970-01-01" / "001")
test_previous_session_folders = path_helper.get_previous_session_folders(test_new_subject_name,
str(test_new_session_folder))
self.assertTrue(isinstance(test_previous_session_folders, list))
self.assertTrue(not test_previous_session_folders) # returned list should be empty | [
9,
19,
1511,
240,
2547
] |
def METHOD_NAME(self, origs, names):
return list(map(self.filter, origs, names)) | [
527,
245
] |
def METHOD_NAME():
bool_df = pd.DataFrame(
[
{"bool_col": True, "int_col": 0, "float_col": 3.0},
{"bool_col": False, "int_col": 100, "float_col": 0.7},
]
)
copy_of_df = bool_df.copy()
widget = AutoVizWidget(
df,
encoding,
renderer,
ipywidget_factory,
encoding_widget,
ipython_display,
spark_events=spark_events,
testing=True,
)
result = AutoVizWidget._convert_to_displayable_dataframe(bool_df)
# Ensure original DF not changed
assert_frame_equal(bool_df, copy_of_df)
assert_series_equal(bool_df["int_col"], result["int_col"])
assert_series_equal(bool_df["float_col"], result["float_col"])
assert result.dtypes["bool_col"] == object
assert len(result["bool_col"]) == 2
assert result["bool_col"][0] == "True"
assert result["bool_col"][1] == "False"
spark_events.emit_graph_render_event.assert_called_once_with(encoding.chart_type) | [
9,
197,
24,
-1,
1616
] |
def METHOD_NAME(self, token_config: "TokenConfig", previous_value):
if not self.active:
return previous_value
return METHOD_NAME() | [
19,
340,
466
] |
def METHOD_NAME(self, callback):
messenger.send('SGE_Flash', [self.nodePath])
if not callback:
messenger.send('SGE_madeSelection', [self.nodePath, callback])
else:
messenger.send('SGE_madeSelection', [self.nodePath]) | [
69,
1472
] |
def METHOD_NAME(line):
output_parts = []
while "${" in line:
start_pos = line.index("${")
end_pos = line.index("}", start_pos + 2)
if start_pos != 0:
output_parts.append("\"" + line[:start_pos].replace("\"", "\\\"") + "\"")
output_parts.append("str(" + line[start_pos+2:end_pos] + ")")
line = line[end_pos+1:]
if line:
output_parts.append("\"" + line.replace("\"", "\\\"") + "\"")
return " + ".join(output_parts) | [
4748
] |
def METHOD_NAME(
module: torch.nn.Module, dtype: torch.dtype
) -> torch.nn.Module:
qconfig = quant.QConfigDynamic(
activation=quant.PlaceholderObserver,
weight=quant.PlaceholderObserver.with_args(dtype=dtype),
)
return quant.quantize_dynamic(
module,
qconfig_spec={
BatchedFusedEmbeddingBag: qconfig,
BatchedDenseEmbeddingBag: qconfig,
BatchedDenseEmbedding: qconfig,
BatchedFusedEmbedding: qconfig,
},
mapping={
BatchedFusedEmbeddingBag: QuantBatchedEmbeddingBag,
BatchedDenseEmbeddingBag: QuantBatchedEmbeddingBag,
BatchedDenseEmbedding: QuantBatchedEmbedding,
BatchedFusedEmbedding: QuantBatchedEmbedding,
},
inplace=False,
) | [
1429,
5796,
2465
] |
def METHOD_NAME(self): ... | [
277
] |
def METHOD_NAME(self, cd_tmp_path: Path) -> None:
"""Test how path attribute is set."""
assert EnvManager("", "", path=cd_tmp_path).path == cd_tmp_path
assert EnvManager("", "").path == cd_tmp_path | [
9,
157
] |
def METHOD_NAME(seed, amount, nftoken_id, expiration, destination):
"""create_sell_offer""" | [
129,
8667,
6179
] |
def METHOD_NAME(self, resource, datum_ids, datum_kwarg_list):
self._create_datum_index()
return super().METHOD_NAME(resource, datum_ids, datum_kwarg_list) | [
2278,
408,
2030
] |
def METHOD_NAME():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return LooseVersion(active()) < LooseVersion(latest_installed()) | [
949,
4589
] |
def METHOD_NAME(ceph_cluster, **kw):
"""Test rename of files and directories on NFS mount
Args:
**kw: Key/value pairs of configuration information to be used in the test.
"""
config = kw.get("config")
nfs_nodes = ceph_cluster.get_nodes("nfs")
clients = ceph_cluster.get_nodes("client")
port = config.get("port", "2049")
version = config.get("nfs_version")
num_files = config.get("num_files")
num_dirs = config.get("num_dirs")
no_clients = int(config.get("clients", "3"))
nfs_name = "cephfs-nfs"
nfs_mount = "/mnt/nfs"
nfs_export = "/export"
nfs_server_name = nfs_nodes[0].hostname
fs_name = "cephfs"
# If the setup doesn't have required number of clients, exit.
if no_clients > len(clients):
raise ConfigError("The test requires more clients than available")
clients = clients[:no_clients] # Select only the required number of clients
try:
# Setup nfs cluster
setup_nfs_cluster(
clients,
nfs_server_name,
port,
version,
nfs_name,
nfs_mount,
fs_name,
nfs_export,
fs_name,
)
# Create files from Client 1 and perform lookups and rename from client 2 and client 3
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
create_files_future = executor.submit(
create_rename_files,
nfs_mount,
num_files,
client1=clients[0],
client2=clients[1],
)
create_dirs_future = executor.submit(
create_rename_dirs,
nfs_mount,
num_dirs,
client1=clients[0],
client2=clients[1],
)
perform_lookups_future = executor.submit(
perform_lookups, clients[2], nfs_mount, num_files, num_dirs
)
futures.extend(
[
create_files_future,
create_dirs_future,
perform_lookups_future,
]
)
# Wait for creates and renames to complete
concurrent.futures.wait(
[
create_files_future,
create_dirs_future,
perform_lookups_future,
]
)
log.info("Successfully completed the rename tests for files and dirs")
finally:
log.info("Cleaning up")
cleanup_cluster(clients, nfs_mount, nfs_name, nfs_export)
log.info("Cleaning up successfull")
return 0 | [
22
] |
def METHOD_NAME(
fmaps_in,
kernel_size,
num_fmaps,
num_repetitions,
activation="relu",
name="conv_pass",
):
"""Create a convolution pass::
f_in --> f_1 --> ... --> f_n
where each ``-->`` is a convolution followed by a (non-linear) activation
function and ``n`` ``num_repetitions``. Each convolution will decrease the
size of the feature maps by ``kernel_size-1``.
Args:
f_in:
The input tensor of shape ``(batch_size, channels, depth, height,
width)`` or ``(batch_size, channels, height, width)``.
kernel_size:
Size of the kernel. Forwarded to the tensorflow convolution layer.
num_fmaps:
The number of feature maps to produce with each convolution.
num_repetitions:
How many convolutions to apply.
activation:
Which activation to use after a convolution. Accepts the name of any
tensorflow activation function (e.g., ``relu`` for ``tf.nn.relu``).
"""
fmaps = fmaps_in
if activation is not None:
activation = getattr(tf.nn, activation)
conv_layer = getattr(
tf.layers, {2: "conv2d", 3: "conv3d"}[fmaps_in.get_shape().ndims - 2]
)
for i in range(num_repetitions):
fmaps = conv_layer(
inputs=fmaps,
filters=num_fmaps,
kernel_size=kernel_size,
padding="valid",
data_format="channels_first",
activation=activation,
name=name + "_%i" % i,
)
return fmaps | [
1306,
403
] |
def METHOD_NAME(self):
# There are duplicate occurences in a, and the same item is found in b
a = np.array([[1, 3, 3, 1, 7], [3, 3, 2, 3, 0]])
b = np.array([[3, 1, 2, 5, 3], [3, 3, 3, 1, 1]])
ma, ia = setmembership.ismember_rows(a, b)
ma_known = np.array([1, 1, 1, 1, 0], dtype=bool)
ia_known = np.array([1, 0, 2, 1])
self.assertTrue(np.allclose(ma, ma_known))
self.assertTrue(np.allclose(ia, ia_known)) | [
9,
9594,
1346,
2152,
16612,
385,
61
] |
def METHOD_NAME(self):
if not self.context_model.is_modified():
return True
if self.filepath():
filename = os.path.basename(self.filepath())
id_str = "context %r" % filename
title = "Close %s" % filename
else:
id_str = "the context"
title = "Close context"
if self.context_model.is_stale():
ret = QtWidgets.QMessageBox.warning(
self,
title,
"%s is pending a resolve.\n"
"Close and discard changes?\n"
"If you close, your changes will be lost."
% id_str.capitalize(),
QtWidgets.QMessageBox.Discard,
QtWidgets.QMessageBox.Cancel)
return (ret == QtWidgets.QMessageBox.Discard)
else:
ret = QtWidgets.QMessageBox.warning(
self,
title,
"Save the changes to %s before closing?\n"
"If you don't save the context, your changes will be lost."
% id_str,
buttons=(
QtWidgets.QMessageBox.Save
| QtWidgets.QMessageBox.Discard
| QtWidgets.QMessageBox.Cancel
)
)
if ret == QtWidgets.QMessageBox.Save:
if self.is_saveable():
self._save_context()
return True
else:
assert self.is_save_as_able()
return self._save_context_as()
else:
return (ret == QtWidgets.QMessageBox.Discard)
raise RuntimeError("Should never get here") # NOSONAR | [
1046,
1462
] |
def METHOD_NAME(self):
'''Return list of fds inherited from parent process.
This returns None if the current process was not started by fork
server.
'''
return self._inherited_fds | [
19,
8002,
2320
] |
f METHOD_NAME(self, specs): | [
276,
362
] |
def METHOD_NAME(self, mock_get):
mock_get.return_value = self.cache_value
with override_settings(SENTRY_RELEASE_REGISTRY_BASEURL="http://localhost:5000"):
assert get_option_value(self.node_fn, OPTION_VERSION) == "19"
assert get_option_value(self.node_fn, OPTION_LAYER_NAME) == "SentryNodeServerlessSDK"
assert get_option_value(self.node_fn, OPTION_ACCOUNT_NUMBER) == "943013980633"
assert get_option_value(self.python_fn, OPTION_VERSION) == "2"
assert (
get_option_value(self.python_fn, OPTION_LAYER_NAME) == "SentryPythonServerlessSDK"
)
assert get_option_value(self.python_fn, OPTION_ACCOUNT_NUMBER) == "943013980633" | [
9,
41,
596
] |
def METHOD_NAME(self):
"""Initialize before test case execution"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._ha_util = HaUtil() | [
0,
1
] |
def METHOD_NAME(self):
"""
Perform transformer operations.
"""
StudentViewTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields() # pylint: disable=protected-access
StudentViewTransformer(['video']).transform(
usage_info=None,
block_structure=self.block_structure,
)
VideoBlockURLTransformer().transform(
usage_info=None,
block_structure=self.block_structure,
) | [
1444,
61,
1053
] |
def METHOD_NAME(self, rollback_registry):
registry = rollback_registry
query = registry.System.Sequence.query()
assert repr(query) == str(query.sql_statement) | [
9,
92
] |
def METHOD_NAME(self):
body_str = """{"vswitch": {"name": "name1",
"rdev": "1234",
"vid": -1}}"""
self.req.body = body_str
self.assertRaises(exception.ValidationError, vswitch.vswitch_create,
self.req) | [
9,
2950,
129,
532,
7478
] |
def METHOD_NAME(step):
"""
Reset the history of the keystone proxy
:param step:
:return:
"""
requests.request('get', 'http://{ks_proxy_ip}:{ks_proxy_port}/reset_history'.format(ks_proxy_ip=world.ks_proxy_ip,
ks_proxy_port=world.ks_proxy_port)) | [
983,
18123,
127,
351,
656
] |
def METHOD_NAME(x):
pass | [
9,
2659,
3570
] |
def METHOD_NAME(self):
return "".join(self._result).strip() | [
19,
1571
] |
def METHOD_NAME():
x = [g.group.cartesian(u) for u in U]
for y in x:
y[:] = 0
if not pure_gauge:
forces = [[g.lattice(y) for y in x] for i in fields]
log.time("fermion forces")
for i in range(len(hasenbusch_ratios)):
forces[i] = action_fermions_s[i].gradient(fields[i], fields[i][0 : len(U)])
log.time()
for i in range(len(hasenbusch_ratios)):
log.gradient(forces[i], f"{hasenbusch_ratios[i][0]}/{hasenbusch_ratios[i][1]} {i}")
for j in range(len(x)):
x[j] += forces[i][j]
return x | [
4497,
1818
] |
def METHOD_NAME(url: PushUrlType) -> dict[str, Any]:
"""Return headers for replace url in browser tab response."""
url = (url if url != "False" else "false") if isinstance(url, str) else "false"
return {HTMXHeaders.REPLACE_URL: url} | [
19,
369,
274,
572
] |
def METHOD_NAME():
query = qb.groups.from_params(request.args)
total = Group.count(query)
paging = Page.from_params(request.args, total)
groups = Group.find_all(query, page=paging.page, page_size=paging.page_size)
if groups:
return jsonify(
status='ok',
page=paging.page,
pageSize=paging.page_size,
pages=paging.pages,
more=paging.has_more,
groups=[group.serialize for group in groups],
total=total
)
else:
return jsonify(
status='ok',
page=paging.page,
pageSize=paging.page_size,
pages=paging.pages,
more=paging.has_more,
message='not found',
groups=[],
total=0
) | [
245,
861
] |
def METHOD_NAME(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd' or opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer | [
129,
968
] |
def METHOD_NAME(self) -> list[BiosimulationsProject]:
projects: list[BiosimulationsProject]
if os.path.exists(self.projects_ndjson_file):
with open(self.projects_ndjson_file) as f:
projects = [BiosimulationsProject(**json.loads(line)) for line in f.readlines()]
else:
projects = []
return projects | [
203,
2847
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.