text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
return self._stack.pop(-1) | [
760,
913
] |
def METHOD_NAME(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True) | [
22,
1696,
202
] |
f METHOD_NAME(self): | [
9,
19,
1667,
44,
16963
] |
def METHOD_NAME(self,regexp_pattern=None):
assert True, \
"method available is not implemented at this time" | [
1272
] |
def METHOD_NAME(self):
"""The repository variable secured"""
return self.get_data("secured") | [
14657
] |
def METHOD_NAME(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(capfd, package):
"""Test using Amazon Braket to draw using ascii output is of the expected length."""
# pylint: disable=eval-used
circuit_wrapper(eval(f"{package}_bell")).draw(package="braket", output="ascii")
out, err = capfd.readouterr()
assert len(err) == 0
assert len(out) == 67 | [
9,
1240,
1241,
1100
] |
def METHOD_NAME(port: int, share: bool = False, tunnel_id=str):
address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
server = ThreadingHTTPServer((address, port), Handler)
def on_start(public_url: str):
print(f'Starting non-streaming server at public url {public_url}/api')
if share:
try:
try_start_cloudflared(port, tunnel_id, max_attempts=3, on_start=on_start)
except Exception:
pass
else:
print(
f'Starting API at http://{address}:{port}/api')
server.serve_forever() | [
22,
163
] |
def METHOD_NAME(self):
self.run_loop.cancel() | [
6578,
5473
] |
def METHOD_NAME(coordinates, expected):
out = irsa.core._parse_coordinates(coordinates)
for a, b in zip(out.split(), expected.split()):
try:
a = float(a)
b = float(b)
np.testing.assert_almost_equal(a, b)
except ValueError:
assert a == b | [
9,
214,
4645
] |
def METHOD_NAME(self, scope, name, key, value, recursive=False, *, session: "Optional[Session]" = None):
"""
Add metadata to data identifier.
:param scope: The scope name.
:param name: The data identifier name.
:param key: the key.
:param value: the value.
:param did: The data identifier info.
:param recursive: Option to propagate the metadata change to content.
:param session: The database session in use.
"""
pass | [
0,
773
] |
def METHOD_NAME(self):
queryset = super().METHOD_NAME()
if self.action == "list":
perm = WebhookPermission.create_scope_list(self.request)
queryset = perm.filter(queryset)
return queryset | [
19,
2386
] |
def METHOD_NAME(self):
runner = CliRunner()
db = Db()
expected_output = """\ | [
9,
2601,
697,
654,
240
] |
def METHOD_NAME(tensor1, tensor2, padding_index=-100):
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
return paddle.concat((tensor1, tensor2), axis=0)
# raise ValueError("Error")
# Let's figure out the new shape
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tuple(
tensor1.shape[2:]
)
# Now let's fill the result tensor
# result = tensor1.new_full(new_shape, padding_index)
result = paddle.full(new_shape, padding_index, dtype=tensor1.dtype)
result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
return result | [
3340,
2459,
61,
7595
] |
def METHOD_NAME(self):
# The vote() call is synchronous, which makes it difficult to
# coordinate the action of multiple threads that all call
# vote(). This method sends the vote call, then sets the
# event saying vote was called, then waits for the vote
# response.
future = self.storage._server.call('vote', id(self.trans), wait=False)
self.ready.set()
future.result(9) | [
-1
] |
def METHOD_NAME(version_set):
highest = get_highest(version_set)
assert highest == "1.1.1"
assert get_highest(set(["1.1.1"])) == "1.1.1" | [
9,
19,
1211
] |
def METHOD_NAME(c2d_x, c3d_x):
utility3d.ExpandFunctionTo3d(c2d_x, c3d_x).solve()
assert numpy.allclose(c3d_x.dat.data_ro.min(), 0.0)
assert numpy.allclose(c3d_x.dat.data_ro.max(), 2.0) | [
9,
215,
1085,
101,
24,
1529,
1104
] |
def METHOD_NAME(self):
"""Returns a list of categories that do not contain any recipes"""
return self.repos.categories.get_empty() | [
19,
75,
35
] |
def METHOD_NAME(self, stage):
return self.pipeline.METHOD_NAME(stage) | [
724
] |
def METHOD_NAME(self):
self.assertEqual(self.bezier.npoints, 26)
for x in [0., .25, .5, .75, 1.]:
for y in [0., .25, .5, .75, 1.]:
if x or y:
self.assertIn([x, y], self.bezier.coords.tolist())
self.assertIn([0., .125], self.bezier.coords.tolist())
self.assertIn([.125, 0.], self.bezier.coords.tolist()) | [
9,
182
] |
def METHOD_NAME(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol) | [
296,
462
] |
def METHOD_NAME(pathname, **kwargs):
mime_types = kwargs["mime_types"]
uid = kwargs["uid"]
gid = kwargs["gid"]
destination = kwargs["destination"]
logger = kwargs["logger"] if "logger" in kwargs and kwargs["logger"] else logging
logger.info(f"{scriptName}:\t👓\t{pathname}")
if os.path.isfile(pathname) and os.path.isdir(destination):
time.sleep(0.1)
try:
os.chown(pathname, uid, gid)
# get the file magic mime type
fileMime = magic.from_file(pathname, mime=True)
if fileMime in mime_types:
# looks like this is a compressed file, we're assuming it's a zeek log archive to be processed by filebeat
logger.info(f"{scriptName}:\t🖅\t{pathname} [{fileMime}] to {destination}")
shutil.move(pathname, os.path.join(destination, os.path.basename(pathname)))
else:
# unhandled file type uploaded, delete it
logger.warning(f"{scriptName}:\t🗑\t{pathname} [{fileMime}]")
os.unlink(pathname)
except Exception as genericError:
logger.error(f"{scriptName}:\texception: {genericError}") | [
171,
2422
] |
def METHOD_NAME(self):
"""
Returns the topmost commit id for the current branch.
:return: Commit id.
"""
return self.git_cmd("log --pretty=format:%H -1").stdout.strip() | [
19,
1635,
1160
] |
def METHOD_NAME(self, workspace: Workspace, context: TracimContext) -> None:
... | [
69,
1976,
1108
] |
def METHOD_NAME():
try:
from sklearn.metrics import cohen_kappa_score
return False
except ImportError:
gscript.warning(_(""))
return True | [
557,
-1
] |
def METHOD_NAME(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Args:
inputs: A list of input tensors.
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs) | [
7595
] |
def METHOD_NAME(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = "/etc/rc.d/{} onestatus".format(service)
results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name] | [
452
] |
def METHOD_NAME(
self,
table_name: str,
rule_name: str
) -> None:
"""Remove PBH rule from Config DB."""
self.config_db.delete_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name)) | [
188,
13435,
446
] |
def METHOD_NAME(self, trid):
return trid in self.__transports | [
954
] |
def METHOD_NAME(self, axis):
# for (1, 1), In cuNumeric, raises error in normalize_axis_tuple
expected_exc = ValueError
ndim = 2
with pytest.raises(expected_exc):
np.linalg.norm(np_arrays[ndim], axis=axis)
with pytest.raises(expected_exc):
num.linalg.norm(num_arrays[ndim], axis=axis) | [
9,
2227,
532,
99
] |
def METHOD_NAME(self):
another_app = self.make_app()
another_app.date_created = None
another_app.save()
self.addCleanup(another_app.delete)
response = self._assert_auth_get_resource(self.list_endpoint, allow_session_auth=True)
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(content["meta"], {
'limit': None, 'next': None, 'offset': 0, 'previous': None,
'total_count': 3
}) | [
9,
19,
245,
1051,
3053
] |
def METHOD_NAME(target_value: str, ranges: list) -> str:
value = parse_target_value(target_value)
value_origin_len = parse_target_value_length(target_value)
if not value:
return target_value.replace("<", "<")
sorted_ranges = sorted(ranges, key=lambda x: x["start"])
for range_ in sorted_ranges:
if range_["start"] > value_origin_len or range_["stop"] > value_origin_len:
return f'<em style="color:red;">{value}</em>'
if sorted_ranges and value and len(value) == value_origin_len:
final_str = []
str_dict = {ind: xss_prevent(str_) for ind, str_ in enumerate(value)}
for range_ in sorted_ranges:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
final_str = [x[1] for x in sorted(str_dict.items(), key=lambda kv: kv[0])]
return "".join(final_str)
if len(value) != AGENT_DEFAULT_LENGTH:
return f'<em style="color:red;">{value}</em>'
try:
if sorted_ranges and value and len(value) < value_origin_len:
begin_part_length = ceil((AGENT_DEFAULT_LENGTH - 3) / 2)
end_part_length = int((AGENT_DEFAULT_LENGTH - 3) / 2)
str_dict_begin = {ind: xss_prevent(str_) for ind, str_ in enumerate(value[:begin_part_length])}
str_dict_end = {
ind + (value_origin_len - end_part_length) + 3: xss_prevent(str_)
for ind, str_ in enumerate(value[-end_part_length:])
}
str_dict = {}
str_dict.update(str_dict_begin)
str_dict.update(str_dict_end)
str_dict[begin_part_length + 2] = "..."
for range_ in sorted_ranges:
if range_["start"] in str_dict and (range_["stop"] - 1) in str_dict:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
if range_["start"] in str_dict and (range_["stop"] - 1) not in str_dict:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[begin_part_length] = "</em>" + str_dict[begin_part_length]
str_dict[begin_part_length] = "</em>" + str_dict[begin_part_length]
if range_["start"] not in str_dict and (range_["stop"] - 1) in str_dict:
str_dict[value_origin_len - end_part_length] = (
'<em style="color:red;">' + str_dict[value_origin_len - end_part_length]
)
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
if range_["start"] not in str_dict or (range_["stop"]) not in str_dict:
str_dict[begin_part_length + 2] = '<em style="color:red;">...</em>'
final_str = [x[1] for x in sorted(str_dict.items(), key=lambda kv: kv[0])]
return "".join(final_str)
except KeyError as e:
logger.warning(e, exc_info=e)
return f'<em style="color:red;">{value}</em>' | [
8186,
1030,
99
] |
def METHOD_NAME(self, jobID):
if not jobID:
raise CaptchaBadJobID("CapSolver: Error bad job id to request task result.")
def _checkRequest(response):
self.checkErrorStatus(response, 'requestJob')
try:
if response.ok and response.json()['status'] == 'ready':
return True
except Exception:
pass
return None
response = polling2.poll(
lambda: self.session.post(
f'{self.host}/getTaskResult',
json={
'clientKey': self.api_key,
'taskId': jobID
},
timeout=30
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
try:
rPayload = response.json()['solution']
if 'token' in rPayload:
return rPayload['token']
else:
return rPayload['gRecaptchaResponse']
except Exception:
pass
raise CaptchaTimeout(
"CapSolver: Error failed to solve Captcha."
) | [
377,
202
] |
def METHOD_NAME(spikeThreshold):
TaskTracker.SpikeThreshold = spikeThreshold | [
0,
945,
853
] |
def METHOD_NAME(self) -> Any:
self.wait_window(self)
return self._result | [
697
] |
def METHOD_NAME(func_id, args):
"""Set the maximum number of threads."""
_internal_assert(func_id == "max_num_threads", "This function cannot be directly invoked!")
_internal_assert(args.__len__() <= 1, "At most one argument accepted!")
if args.__len__() == 0:
res = Target.current().METHOD_NAME
else:
_internal_assert(isinstance(args[0], _expr.IntImm), "In tvm bool should be uint")
res = Target.current(args[0].value).METHOD_NAME
return convert(res) | [
232,
181,
1573
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self):
return self.if_axis_available | [
19,
2227,
1272
] |
def METHOD_NAME():
if support.verbose:
sys.stdout.write('stopping HTTPS server\n')
server.stop()
if support.verbose:
sys.stdout.write('joining HTTPS thread\n')
server.join() | [
950
] |
def METHOD_NAME(self) -> str:
"""Returns a string, the captured stderr."""
return self.capsys.readouterr().err | [
3929
] |
def METHOD_NAME(self, action, *args):
def register_formatter_class(formatter_cls):
formatter = formatter_cls()
self.formatters[action] = formatter
self.choices.append((action, formatter.label))
if args:
# register_action has been invoked as register_action(action, label, message); create a LogFormatter
# subclass and register that
label, message = args
formatter_cls = type(
"_LogFormatter", (LogFormatter,), {"label": label, "message": message}
)
register_formatter_class(formatter_cls)
else:
# register_action has been invoked as a @register_action(action) decorator; return the function that
# will register the class
return register_formatter_class | [
372,
1006
] |
def METHOD_NAME(tmp_path):
file_path = tmp_path / "my-attachment.txt"
new_file_path = counter_duplicate_path(file_path)
assert new_file_path == file_path
file_path.write_text("some data")
new_file_path = counter_duplicate_path(file_path)
assert new_file_path != file_path
assert new_file_path.name == "my-attachment-2.txt"
new_file_path.write_text("some data 2")
newest_file_path = counter_duplicate_path(file_path)
assert newest_file_path.name == "my-attachment-3.txt" | [
9,
2469,
1119,
157
] |
def METHOD_NAME(self, text):
"""Tokenize a string."""
char_tokens = []
for s in text:
char_tokens.extend(s)
return char_tokens | [
4022
] |
def METHOD_NAME(self, **kwargs):
# figure out the MIME type
for ext in self.component.mime_types:
if self.office.name.lower().endswith(ext):
content_type = self.component.mime_types[ext]
break
response = HttpResponse(content_type=content_type)
self.sendfile(self.office, response)
return response | [
136,
17
] |
def METHOD_NAME(line, alist, cur=None, sql=None):
"""Update lines using only the boundary"""
to_up = []
bbox = Bbox()
aline = Line()
for area in alist:
bbox = area.bbox(bbox)
if (intersects(area.get_points(aline), line)) or (
area.contain_pnt(line[0], bbox)
):
to_up.append((line.cat, area.cat))
if (cur is not None) and (sql is not None):
cur.executemany(sql, to_up)
return to_up | [
86,
513
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters | [
539,
386
] |
def METHOD_NAME(self):
'''
Returns last Pose3d.
@return last JdeRobotTypes Pose3d saved
'''
self.lock.acquire()
pose = self.data
self.lock.release()
return pose | [
19,
4803
] |
f METHOD_NAME(self, mock_class): | [
9,
607,
19,
2538,
100
] |
def METHOD_NAME(self):
super(TestTOC, self).METHOD_NAME() | [
531,
481
] |
def METHOD_NAME():
print("> ====== Loading frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
sess = tf.Session(graph=detection_graph)
print("> ====== Inference graph loaded.")
return detection_graph, sess | [
557,
1748,
303
] |
def METHOD_NAME(length: float):
print(type(length))
return gf.components.straight(length=length) | [
-1,
-1
] |
def METHOD_NAME(clause):
def term2string(t):
if len(t)==0:
return "1"
return "*".join(["x("+str(v) +")" for v in t])
vars=tuple([v for v in clause if v>0])
negated_vars=tuple([-v for v in clause if v<0])
if len(negated_vars)>0:
terms=[tuple([negated_vars[i] for (i,j) in enumerate(combination) if j==1])\
+ vars for combination\
in Cartesian(list(repeat([0,1],len(negated_vars))))]
else:
terms=[vars]
res="+".join([term2string(t) for t in terms])
return res
#add_vars=[negated_var[i] for (i,j) in enumerate(combination) if j==1] | [
370,
9229,
4834
] |
def METHOD_NAME(Name, EnableCustomLogConfiguration = False, CustomLogObjects = None):
init_vars(CustomLogObjects)
return Test(EnableCustomLogConfiguration, CustomLogObjects) | [
9,
9043
] |
def METHOD_NAME(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config | [
214,
1671
] |
def METHOD_NAME(self):
bootstrap = Bootstrap()
bootstrap.ztps.set_config_response()
bootstrap.ztps.set_node_check_response()
bootstrap.ztps.set_definition_response(actions=[{"action": "test_action"}])
flash_filename = random_string()
bootstrap.ztps.set_action_response(
"test_action", fail_flash_file_action(bootstrap.flash, flash_filename)
)
with io.open(bootstrap.rc_eos, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
with io.open(bootstrap.startup_config, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
with io.open(bootstrap.boot_extensions, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
os.mkdir(bootstrap.boot_extensions_folder)
with io.open(
os.path.join(bootstrap.boot_extensions_folder, "my_extension"), "w", encoding="utf8"
) as fd:
fd.write(ensure_text(random_string()))
bootstrap.start_test()
try:
self.assertTrue(bootstrap.eapi_node_information_collected())
self.assertTrue(bootstrap.action_failure())
self.assertFalse(bootstrap.error)
self.assertFalse(os.path.isfile(os.path.join(bootstrap.flash, flash_filename)))
self.assertFalse(os.path.isfile(bootstrap.rc_eos))
self.assertFalse(os.path.isfile(bootstrap.startup_config))
self.assertFalse(os.path.isfile(bootstrap.boot_extensions))
self.assertFalse(os.path.isdir(bootstrap.boot_extensions_folder))
except AssertionError as assertion:
print("Output: {}".format(bootstrap.output))
print("Error: {}".format(bootstrap.error))
raise_exception(assertion)
finally:
bootstrap.end_test() | [
9,
1006,
374
] |
def METHOD_NAME(n):
"""
Generates the alternating group on ``n`` elements as a permutation group.
For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for
``n`` odd
and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.).
After the group is generated, some of its basic properties are set.
The cases ``n = 1, 2`` are handled separately.
Examples
========
>>> G = AlternatingGroup(4)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> len(a)
12
>>> all(perm.is_even for perm in a)
True
See Also
========
SymmetricGroup, CyclicGroup, DihedralGroup
References
==========
[1] Armstrong, M. "Groups and Symmetry"
"""
# small cases are special
if n in (1, 2):
return PermutationGroup([Permutation([0])])
a = list(range(n))
a[0], a[1], a[2] = a[1], a[2], a[0]
gen1 = a
if n % 2:
a = list(range(1, n))
a.append(0)
gen2 = a
else:
a = list(range(2, n))
a.append(1)
a.insert(0, 0)
gen2 = a
gens = [gen1, gen2]
if gen1 == gen2:
gens = gens[:1]
G = PermutationGroup([_af_new(a) for a in gens], dups=False)
if n < 4:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_alt = True
return G | [
5263,
846
] |
def METHOD_NAME(self, request, event, data): # pragma: no cover
"""
Hook for after an event is updated through the API.
The given event has been saved already, naturally.
:param request: The request that caused this event to be updated.
:type request: rest_framework.request.Request
:param event: The event that was updated.
:type event: events.models.Event
:param data: The data dict that was used to update the Event
:type data: dict
"""
pass | [
72,
86,
417
] |
def METHOD_NAME(self):
events = []
class Test(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.assertEqual(events, [])
events.append('setUp')
async def asyncSetUp(self):
self.assertEqual(events, ['setUp'])
events.append('asyncSetUp')
async def test_func(self):
self.assertEqual(events, ['setUp',
'asyncSetUp'])
events.append('test')
self.addAsyncCleanup(self.on_cleanup)
async def asyncTearDown(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test'])
events.append('asyncTearDown')
def tearDown(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown'])
events.append('tearDown')
async def on_cleanup(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown',
'tearDown'])
events.append('cleanup')
test = Test("test_func")
test.run()
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown',
'tearDown',
'cleanup']) | [
9,
324,
3351
] |
def METHOD_NAME(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', testutils.create_report_mocked())
def run_mocked(args):
if args == ['grub2-install', '/dev/vdb']:
raise_call_error(args)
else:
assert args == ['grub2-install', '/dev/vda']
monkeypatch.setattr(updategrubcore, 'run', run_mocked)
devices = ['/dev/vda', '/dev/vdb']
updategrubcore.update_grub_core(devices)
assert reporting.create_report.called
assert UPDATE_FAILED_TITLE == reporting.create_report.reports[0]['title']
summary = reporting.create_report.reports[0]['summary']
assert 'GRUB was successfully updated on the following devices: /dev/vda' in summary
assert 'however GRUB update failed on the following devices: /dev/vdb' in summary | [
9,
86,
7500,
2351,
1434
] |
def METHOD_NAME(self, component_config: Union[Dict,
TeslaBatSetup,
TeslaCounterSetup,
TeslaInverterSetup]) -> None:
if isinstance(component_config, Dict):
component_type = component_config["type"]
else:
component_type = component_config.type
component_config = dataclass_from_dict(COMPONENT_TYPE_TO_MODULE[
component_type].component_descriptor.configuration_factory, component_config)
if component_type in self.COMPONENT_TYPE_TO_CLASS:
self.components["component"+str(component_config.id)] = (self.COMPONENT_TYPE_TO_CLASS[component_type](
component_config))
else:
raise Exception(
"illegal component type " + component_type + ". Allowed values: " +
','.join(self.COMPONENT_TYPE_TO_CLASS.keys())
) | [
238,
1007
] |
def METHOD_NAME(
span, # type: Span
endpoint_name, # type: str
args, # type: Tuple[Any]
args_names, # type: Tuple[str]
args_traced, # type: Set[str]
):
# type: (...) -> None
if endpoint_name not in EXCLUDED_ENDPOINT:
exclude_set = EXCLUDED_ENDPOINT_TAGS.get(endpoint_name, frozenset()) # type: FrozenSet[str]
set_flattened_tags(
span,
items=((name, value) for (name, value) in zip(args_names, args) if name in args_traced),
exclude_policy=lambda tag: tag in exclude_set or tag.endswith("Body"),
processor=truncate_arg_value,
) | [
238,
1244,
718,
114
] |
def METHOD_NAME(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location")
_builder.set_prop("name", AAZStrType, ".perimeter_name")
_builder.set_prop("tags", AAZDictType, ".tags")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value) | [
459
] |
def METHOD_NAME(self):
with tmp_to_root_org():
queryset = self.model.get_user_related_tickets(self.request.user)
return queryset | [
19,
2386
] |
def METHOD_NAME(self):
return self.tgt_dict | [
1030,
2445
] |
def METHOD_NAME(self, key, modifiers):
"""Called whenever a key is pressed."""
if key == arcade.key.UP or key == arcade.key.W:
if self.physics_engine.can_jump():
self.player_sprite.change_y = PLAYER_JUMP_SPEED
elif key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED | [
69,
59,
2971
] |
def METHOD_NAME():
"""
Test present
"""
table_name = "awl"
name = "baruwa"
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
mock_true = MagicMock(return_value=True)
mock_false = MagicMock(return_value=False)
with patch.dict(
postgres_privileges.__salt__, {"postgres.has_privileges": mock_true}
):
comt = "The requested privilege(s) are already set"
ret.update({"comment": comt, "result": True})
assert postgres_privileges.present(name, table_name, "table") == ret
with patch.dict(
postgres_privileges.__salt__,
{
"postgres.has_privileges": mock_false,
"postgres.privileges_grant": mock_true,
},
):
with patch.dict(postgres_privileges.__opts__, {"test": True}):
comt = "The privilege(s): {} are set to be granted to {}".format(
"ALL", name
)
ret.update({"comment": comt, "result": None})
assert (
postgres_privileges.present(
name, table_name, "table", privileges=["ALL"]
)
== ret
)
with patch.dict(postgres_privileges.__opts__, {"test": False}):
comt = "The privilege(s): {} have been granted to {}".format("ALL", name)
ret.update(
{"comment": comt, "result": True, "changes": {"baruwa": "Present"}}
)
assert (
postgres_privileges.present(
name, table_name, "table", privileges=["ALL"]
)
== ret
) | [
9,
2541,
410
] |
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC | [
200,
1881
] |
def METHOD_NAME(self):
"""
Check if EFI mode is requested
:return: The requested EFI mode or None if no EFI mode requested
:rtype: str
"""
if self.firmware in Defaults.get_efi_capable_firmware_names():
return self.firmware | [
6205,
854
] |
def METHOD_NAME(vm_session, vm_iface):
"""
Check rx and tx package
:param vm_session: An session to VM
:param vm_iface: VM's interface
"""
cmd = "ip -s -json link show %s" % vm_iface
status, stdout = vm_session.cmd_status_output(cmd)
if status or not stdout:
raise exceptions.TestFail("Failed to run cmd - {}, status - {}, "
"output - {}.".format(cmd, status, stdout))
ip_info = eval(stdout.strip())
LOG.debug("VM iface's info: %s.", ip_info)
tx_info = ip_info[0]['stats64']['tx']['packets']
rx_info = ip_info[0]['stats64']['rx']['packets']
if rx_info != tx_info:
raise exceptions.TestFail("The value of rx and tx should be same.") | [
250,
2068,
2543,
2975
] |
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self):
self.failUnless(u'fmt' in self.riff_1)
self.failUnless(u'data' in self.riff_1)
self.failUnless(u'id3' in self.riff_1)
self.failUnless(u'fmt' in self.riff_2)
self.failUnless(u'data' in self.riff_2) | [
9,
220,
831
] |
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | [
24,
3
] |
def METHOD_NAME(url, expected):
"""Test get_name with different URLs."""
name = spack.cmd.create.get_name(None, url)
assert name == expected | [
9,
19,
156,
2248
] |
def METHOD_NAME():
assert_range(1, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(2, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(3, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(4, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(5, 75, [2, 3, 4, 5, 6, 7, 8])
assert_range(6, 75, [3, 4, 5, 6, 7, 8, 9])
assert_range(8, 75, [5, 6, 7, 8, 9, 10, 11])
assert_range(37, 75, [34, 35, 36, 37, 38, 39, 40])
assert_range(70, 75, [67, 68, 69, 70, 71, 72, 73])
assert_range(71, 75, [68, 69, 70, 71, 72, 73, 74])
assert_range(72, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(73, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(74, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(75, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(1, 8, [1, 2, 3, 4, 5, 6, 7, 8]) | [
9,
1174,
661
] |
def METHOD_NAME(self, X, y):
"""Fit time series regressor to training data.
private _fit containing the core logic, called from fit
Writes to self:
Sets fitted model attributes ending in "_".
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of float, of shape [n_instances] - regression labels for fitting
indices correspond to instance indices in X
Returns
-------
self : Reference to self.
"""
estimator = self._get_delegate()
estimator.fit(X=X, y=y)
return self | [
90
] |
def METHOD_NAME(dev):
file_name = "sg_write_same.sh"
guest_dir = "/tmp/"
deps_dir = virttest_data_dir.get_deps_dir() + "/thin-provision/"
host_file = os.path.join(deps_dir, file_name)
guest_file = guest_dir + file_name
vm.copy_files_to(host_file, guest_dir)
status, output = session.cmd_status_output(
"$SHELL " + guest_file + " " + dev)
if status != 0:
test.fail("run sg_write_same failed:" + output)
test.log.debug(output) | [
22,
8767,
77,
1101
] |
def METHOD_NAME(self) -> None:
pass | [
356,
176
] |
def METHOD_NAME(self, exp_op: OperatorBase) -> Union[list, float]:
r"""
Compute the variance of the expectation estimator. Because Aer takes this expectation
with matrix multiplication, the estimation is exact and the variance is always 0,
but we need to return those values in a way which matches the Operator's structure.
Args:
exp_op: The full expectation value Operator after sampling.
Returns:
The variances or lists thereof (if exp_op contains ListOps) of the expectation value
estimation, equal to 0.
"""
# Need to do this to mimic Op structure
def sum_variance(operator):
if isinstance(operator, ComposedOp):
return 0.0
elif isinstance(operator, ListOp):
return operator.combo_fn([sum_variance(op) for op in operator.oplist])
raise TypeError(f"Variance cannot be computed for {operator.__class__.__name__}.")
return sum_variance(exp_op) | [
226,
2873
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(plugin, item_id, sub_category_url, page, **kwargs):
resp = urlquick.get(sub_category_url + 'page/%s/' % page)
root = resp.parse("section", attrs={"class": "grid-container section-video"})
for video_datas in root.iterfind(".//div"):
if 'single-video' in video_datas.get('class'):
video_title = video_datas.find('.//img').get('title')
video_image = URL_ROOT % item_id + video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
root_change_pages = resp.parse()
if root_change_pages.find(".//a[@class='next page-numbers']") is not None:
yield Listitem.next_page(
item_id=item_id, sub_category_url=sub_category_url, page=str(int(page) + 1)) | [
245,
1343
] |
def METHOD_NAME(self, token):
"""
Apply the Legality Principle in combination with
Onset Maximization to return a list of syllables.
:param token: Single word or token
:type token: str
:return syllable_list: Single word or token broken up into syllables.
:rtype: list(str)
"""
syllables = []
syllable, current_onset = "", ""
vowel, onset = False, False
for char in token[::-1]:
char_lower = char.lower()
if not vowel:
syllable += char
vowel = bool(char_lower in self.vowels)
else:
if char_lower + current_onset[::-1] in self.legal_onsets:
syllable += char
current_onset += char_lower
onset = True
elif char_lower in self.vowels and not onset:
syllable += char
current_onset += char_lower
else:
syllables.append(syllable)
syllable = char
current_onset = ""
vowel = bool(char_lower in self.vowels)
syllables.append(syllable)
syllables_ordered = [syllable[::-1] for syllable in syllables][::-1]
return syllables_ordered | [
4022
] |
def METHOD_NAME(request):
"""Basic table with or without index on integer column a"""
T = _get_test_table()
if request.param:
T.add_index("a")
return T | [
11640
] |
METHOD_NAME(self): | [
9,
2236,
2964,
1541,
1635
] |
def METHOD_NAME(self, blob):
try:
json.loads(blob)
except ValueError:
return False
return True | [
137,
763
] |
def METHOD_NAME(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode | [
294,
61,
557,
578
] |
def METHOD_NAME(p):
x = p[..., 0]
y = p[..., 1]
val = 0.2-(np.abs(x-0.5)-0.5)*(np.abs(y-0.5)-0.5)
return val | [
10094
] |
def METHOD_NAME():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord('!'),
ord('~') + 1)) + list(range(
ord('¡'),
ord('¬') + 1)) + list(range(ord('®'),
ord('ÿ') + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | [
321,
24,
774
] |
def METHOD_NAME(cls, expr):
"""Normalizes all tags in an OR expression (and return it as list).
:param expr: OR expression to normalize and split (as string).
:return: Generator of normalized tags (as string)
"""
for tag in expr.strip().split(','):
yield cls.normalize_tag(tag) | [
1568,
114,
280,
894
] |
def METHOD_NAME(
*, db_session, case_severity: CaseSeverity, case_severity_in: CaseSeverityUpdate
) -> CaseSeverity:
"""Updates a case severity."""
case_severity_data = case_severity.dict()
update_data = case_severity_in.dict(skip_defaults=True, exclude={"project", "color"})
for field in case_severity_data:
if field in update_data:
setattr(case_severity, field, update_data[field])
if case_severity_in.color:
case_severity.color = case_severity_in.color.as_hex()
db_session.commit()
return case_severity | [
86
] |
def METHOD_NAME(
db: Session,
datadog_connection_config: ConnectionConfig,
datadog_dataset,
datadog_config,
) -> Generator:
fides_key = datadog_config["fides_key"]
datadog_connection_config.name = fides_key
datadog_connection_config.key = fides_key
datadog_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, datadog_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": datadog_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db) | [
7294,
126,
200
] |
def METHOD_NAME(self):
"""Overrides can return actual input device."""
return None | [
19,
362,
398
] |
def METHOD_NAME(frame, word):
if frame < 28:
return False
return True | [
12596
] |
f METHOD_NAME(self): | [
9,
3140,
684,
1591,
189
] |
f METHOD_NAME(self, z): | [
5862,
2079
] |
def METHOD_NAME(self) -> str:
"""
Returns the open lineage dataset name as per
https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
"""
return urllib.parse.urlsplit(self.path).path | [
7227,
126,
156
] |
def METHOD_NAME(lumen, color):
return lumen * ((1 / MAX_LIGHT_EFFICIENCY_EFFICACY) / srgb_to_luminance(color)) | [
1390,
1928,
1669
] |
def METHOD_NAME(env):
for conn in shardsConnections(env):
allConnected = False
while not allConnected:
res = conn.execute_command('timeseries.INFOCLUSTER')
nodes = res[4]
allConnected = True
for n in nodes:
status = n[17]
if status != b'connected' and status != b'uninitialized':
allConnected = False
if not allConnected:
time.sleep(0.1) | [
1162,
2059,
924
] |
def METHOD_NAME(self) -> int: ... | [
5661
] |
def METHOD_NAME(self, request: Request) -> Response:
"""List system tasks"""
tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)
return Response(TaskSerializer(tasks, many=True).data) | [
245
] |
def METHOD_NAME(self, context, opt):
"usage: quorum <bool>"
if not utils.verify_boolean(opt):
context.fatal_error("%s: bad boolean option" % opt)
return cib_status.set_quorum(utils.is_boolean_true(opt)) | [
74,
2682
] |
Subsets and Splits