text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass") | [
9,
2573,
215
] |
def METHOD_NAME(
state: "State",
host: "Host",
remote_filename,
filename_or_io,
remote_temp_filename=None, # ignored
print_output: bool = False,
print_input: bool = False,
**command_kwargs,
):
"""
Download a local file by copying it to a temporary location and then writing
it to our filename or IO object.
"""
_, temp_filename = mkstemp()
try:
# Copy the file using `cp` such that we support sudo/su
status, _, stderr = run_shell_command(
state,
host,
"cp {0} {1}".format(remote_filename, temp_filename),
print_output=print_output,
print_input=print_input,
**command_kwargs,
)
if not status:
raise IOError("\n".join(stderr))
# Load our file or IO object and write it to the temporary file
with open(temp_filename, encoding="utf-8") as temp_f:
with get_file_io(filename_or_io, "wb") as file_io:
data_bytes: bytes
data = temp_f.read()
if isinstance(data, str):
data_bytes = data.encode()
else:
data_bytes = data
file_io.write(data_bytes)
finally:
os.remove(temp_filename)
if print_output:
click.echo(
"{0}file copied: {1}".format(host.print_prefix, remote_filename),
err=True,
)
return True | [
19,
171
] |
def METHOD_NAME(self):
result = self.test_send_file()
pubnub.download_file().\
channel(CHANNEL).\
file_id(result.file_id).\
file_name(result.name).pn_async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNDownloadFileResult)
assert self.response.data.decode("utf-8") == self.file_upload_test_data["FILE_CONTENT"] | [
9,
353,
61,
136,
171
] |
async def METHOD_NAME(
self,
wallet_id: int,
spend: CoinSpend,
height: uint32,
) -> None:
"""
Appends (or replaces) entries in the DB. The new list must be at least as long as the existing list, and the
parent of the first spend must already be present in the DB. Note that this is not committed to the DB
until db_wrapper.commit() is called. However it is written to the cache, so it can be fetched with
get_all_state_transitions.
"""
async with self.db_wrapper.writer_maybe_transaction() as conn:
# find the most recent transition in wallet_id
rows = list(
await conn.execute_fetchall(
"SELECT transition_index, height, coin_spend "
"FROM pool_state_transitions "
"WHERE wallet_id=? "
"ORDER BY transition_index DESC "
"LIMIT 1",
(wallet_id,),
)
)
serialized_spend = bytes(spend)
if len(rows) == 0:
transition_index = 0
else:
existing = list(
await conn.execute_fetchall(
"SELECT COUNT(*) "
"FROM pool_state_transitions "
"WHERE wallet_id=? AND height=? AND coin_spend=?",
(wallet_id, height, serialized_spend),
)
)
if existing[0][0] != 0:
# we already have this transition in the DB
return
row = rows[0]
if height < row[1]:
raise ValueError("Height cannot go down")
prev = CoinSpend.from_bytes(row[2])
if spend.coin.parent_coin_info != prev.coin.name():
raise ValueError("New spend does not extend")
transition_index = row[0]
cursor = await conn.execute(
"INSERT OR IGNORE INTO pool_state_transitions VALUES (?, ?, ?, ?)",
(
transition_index + 1,
wallet_id,
height,
serialized_spend,
),
)
await cursor.close() | [
238,
10840
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self):
self.scriptedEffect.setParameter("ObjectScaleMm", self.objectScaleMmSlider.value) | [
86,
5243,
280,
2139
] |
def METHOD_NAME(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
scope_map_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScopeMapResult:
"""
Gets the properties of the specified scope map.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scope_map_name: The name of the scope map.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['scopeMapName'] = scope_map_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20221201:getScopeMap', __args__, opts=opts, typ=GetScopeMapResult).value
return AwaitableGetScopeMapResult(
actions=pulumi.get(__ret__, 'actions'),
creation_date=pulumi.get(__ret__, 'creation_date'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
913,
422
] |
def METHOD_NAME(self):
poly = self.setup()
p = np.array([2, 2])
inside = pp.geometry_property_checks.point_in_polygon(poly, p)
self.assertTrue(not inside[0]) | [
9,
261
] |
def METHOD_NAME(topology_st, request):
"""Create a user and make sure ou=pople exists
"""
sys.stdout = io.StringIO()
users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
users.ensure_state(properties=TEST_USER_PROPERTIES)
ou = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX)
ou.ensure_state(properties={'ou': 'people'}) | [
74,
102
] |
def METHOD_NAME(self, node=None):
"""Check that a user is able to execute `DROP SETTINGS PROFILE` with privileges are granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
Suite(run=drop_settings_profile,
examples=Examples("privilege grant_target_name user_name", [
tuple(list(row)+[user_name,user_name]) for row in drop_settings_profile.examples
], args=Args(name="privilege={privilege}", format_name=True))) | [
7969,
2321,
2322
] |
def METHOD_NAME(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
""" | [
1302,
1303
] |
def METHOD_NAME(
precision: None | SupportsIndex = ...,
threshold: None | int = ...,
edgeitems: None | int = ...,
linewidth: None | int = ...,
suppress: None | bool = ...,
nanstr: None | str = ...,
infstr: None | str = ...,
formatter: None | _FormatDict = ...,
sign: Literal[None, "-", "+", " "] = ...,
floatmode: None | _FloatMode = ...,
*,
legacy: Literal[None, False, "1.13", "1.21"] = ...
) -> _GeneratorContextManager[_FormatOptions]: ... | [
5228
] |
def METHOD_NAME(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
) | [
137,
1205,
9,
4015
] |
def METHOD_NAME(data):
try:
data = validate_format(data)
except Exception as e:
return
labels = data[0]
max_label_len = reduce(lambda x,y: max(x,len(y)), labels, 0)+1
data = data[1:]
subsequent_indent = ""
for i in range(max_label_len+3):
subsequent_indent += " "
fmt = " %-"+str(max_label_len)+"s "
for j, d in enumerate(data):
print("-")
for i, label in enumerate(labels):
val = '\n'.join(wrap(convert(d[i] or ""),
initial_indent = "",
subsequent_indent = subsequent_indent,
width=78
))
try:
print(utilities.render.color.colorize(fmt % (label+":"), utilities.render.color.color.LIGHTBLUE), val)
except UnicodeEncodeError:
print(utilities.render.color.colorize(fmt % (label+":"), utilities.render.color.color.LIGHTBLUE), val.encode("utf-8")) | [
38,
410,
235
] |
def METHOD_NAME(self, apps):
"""
Register non-plugins - i.e. modules that do not have a KolibriPluginBase derived
class in their kolibri_plugin.py module - these cannot be enabled and disabled
by the Kolibri plugin machinery, but may wish to still register Kolibri Hooks
"""
for app in apps:
app = parse_installed_app_entry(app)
if app not in self._apps:
try:
initialize_kolibri_plugin(app)
# Raise an error here because non-plugins should raise a PluginDoesNotExist exception
# if they are properly configured.
raise PluginExistsInApp(
"Django app {} contains a plugin definition".format(app)
)
except MultiplePlugins:
raise PluginExistsInApp(
"Django app {} contains multiple plugin definitions".format(app)
)
except PluginDoesNotExist:
# Register so that we don't do this twice.
self._apps[app] = None | [
372,
256,
1294
] |
def METHOD_NAME(self, js, video_id, timestamp):
data = self._parse_json(js, video_id, transform_source=js_to_json)
title = unescapeHTML(data['title'])
live_starter = try_get(data, lambda x: x['plugins']['liveStarter'], dict)
if live_starter:
data.update(live_starter)
formats = []
for tracks in data.get('tracks', {}).values():
for video in tracks:
video_url = video.get('src')
if not video_url:
continue
video_type = video.get('type')
ext = determine_ext(video_url, mimetype2ext(video_type))
if video_type == 'application/vnd.apple.mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif video_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id='dash', fatal=False))
else:
label = video.get('label')
height = self._search_regex(
r'^(\d+)[pP]', label or '', 'height', default=None)
format_id = ['http']
for f in (ext, label):
if f:
format_id.append(f)
formats.append({
'url': video_url,
'format_id': '-'.join(format_id),
'height': int_or_none(height),
})
self._sort_formats(formats)
return {
'id': data.get('mediaid') or video_id,
'title': title,
'description': data.get('description'),
'thumbnail': data.get('image'),
'duration': int_or_none(data.get('duration')),
'timestamp': int_or_none(timestamp),
'formats': formats
} | [
214,
1781,
773
] |
def METHOD_NAME(self, sheet_name: Any): ... | [
4501,
604,
156
] |
def METHOD_NAME(self):
categories = {'all': _('All'), 'changed': _('Changed')}
for setting, kwargs in self._registry.items():
category_slug = kwargs.get('category_slug', None)
if category_slug is None or category_slug in categories:
continue
if category_slug == 'user':
categories['user'] = _('User')
categories['user-defaults'] = _('User-Defaults')
else:
categories[category_slug] = kwargs.get('category', None) or category_slug
return categories | [
19,
3024,
2065
] |
f METHOD_NAME(self,*arg,**kw): | [
2671,
171
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(mock_agent_event_publisher):
return MSSQLFingerprinter(AGENT_ID, mock_agent_event_publisher) | [
-1
] |
def METHOD_NAME():
c = topology.Layer(thickness=150, stack_index=0)
r = topology.Stack(children=[c])
topology.create_topology([r], np.array([0, 0, 0]), np.array([100, 100, 100]))
return r, c | [
97,
94
] |
def METHOD_NAME(test_file_or_folder):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", "test_results", test_file_or_folder) | [
19,
9,
146
] |
def METHOD_NAME(diff_entry, new_object_hash):
check_call(
[
"git",
"update-index",
"--cacheinfo",
"%s,%s,%s"
% (diff_entry["dst_mode"], new_object_hash, diff_entry["src_path"]),
]
) | [
86,
171,
724
] |
def METHOD_NAME(command_id: str) -> Dict[str, Any]:
return get_task_info("command", command_id) | [
19,
462,
100
] |
def METHOD_NAME(tmp_path, inventory):
file = tmp_path / "foobar.lxd.yml"
file.touch()
assert inventory.verify_file(str(file)) is True | [
9,
1162,
171
] |
def METHOD_NAME(self, p):
"""
Insert input of this op
also maintain the output of previous op
p: a node or a list of node
"""
if isinstance(p, list):
for i in p:
self.prev[i.name] = i
i.ops[self.name] = self
elif isinstance(p, NetDefNode):
self.prev[p.name] = p
p.ops[self.name] = self | [
408,
362
] |
def METHOD_NAME(self):
# HACK: we still want to build modules when forcing static we just ignore them
result = super().METHOD_NAME
if "-static" in result:
result.remove("-static")
return result | [
235,
-1
] |
def METHOD_NAME(self):
"""
Descript. :
"""
return | [
263,
954
] |
def METHOD_NAME(self):
"""test scenario for TrunkedNetwork CRUD operations"""
call_scenario1(self) | [
9,
-1,
7851
] |
def METHOD_NAME(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) | [
9,
324,
1345
] |
def METHOD_NAME(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc | [
10981,
820,
718,
913
] |
def METHOD_NAME():
i: i32
i = isqrt(15)
assert i == 3 | [
9,
17199
] |
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, comp):
self.__target_comp = comp | [
0,
1030,
1653
] |
def METHOD_NAME(self) -> CDLL:
self.session._setSessionId() # noqa
return self._libraryInstance.lib # type: ignore | [
124
] |
def METHOD_NAME():
# all have short alias
all_alias = []
for field in _ContentWithShortNames.__fields__.values():
assert field.alias
assert field.alias not in all_alias
all_alias.append(field.alias) | [
9,
75,
4568,
342,
384,
1707,
61
] |
def METHOD_NAME(
user_api_client, checkout_with_items
):
checkout = checkout_with_items
line = checkout.lines.first()
second_line = checkout.lines.last()
first_line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
second_line_id = graphene.Node.to_global_id("CheckoutLine", second_line.pk)
lines_list = [first_line_id, second_line_id]
variables = {
"id": graphene.Node.to_global_id("Checkout", checkout.pk),
"linesIds": lines_list,
}
checkout.delete()
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
errors = content["data"]["checkoutLinesDelete"]["errors"][0]
assert errors["code"] == CheckoutErrorCode.NOT_FOUND.name | [
9,
2170,
513,
34,
532,
2170,
147
] |
def METHOD_NAME(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsEntityExtractor
"""
return super().METHOD_NAME | [
1461
] |
def METHOD_NAME(self):
"""Run for an appropriate number of steps to improve the current value.
If self.full is True, will run until no further improvements can
be found.
"""
if self.short_circuit():
return
if self.full:
prev = -1
while self.changes != prev:
prev = self.changes
self.run_step()
else:
self.run_step()
self.debug("COMPLETE") | [
22
] |
def METHOD_NAME() -> Gdk.RGBA:
"""
Generate a new random color. Alpha is always 1.
"""
return RGBA(random.uniform(0.0, 1.0),
random.uniform(0.0, 1.0),
random.uniform(0.0, 1.0),
1.0) | [
236,
36
] |
def METHOD_NAME(self) -> None:
self.assertEqual(job_run_result.JobRunResult.accumulate([]), []) | [
9,
6294,
41,
35,
245
] |
def METHOD_NAME(self):
assert hasattr(self, "_onedal_estimator")
self.labels_ = self._onedal_estimator.labels_
self.core_sample_indices_ = self._onedal_estimator.core_sample_indices_
self.components_ = self._onedal_estimator.components_
self.n_features_in_ = self._onedal_estimator.n_features_in_ | [
73,
177
] |
f METHOD_NAME(self): | [
567,
795
] |
def METHOD_NAME(self):
self.tb = gr.top_block() | [
0,
1
] |
def METHOD_NAME(self, get_nodes_plugin):
return [get_nodes_plugin, GetNodesPlugin(project_name="test2")] | [
19,
480,
1294
] |
def METHOD_NAME(self):
f = gen.tools.generate_base_points
pts = f(20, domain_size=[1, 1, 1], reflect=False)
assert pts.shape == (20, 3)
pts = f(20, domain_size=[1, 1, 1], reflect=True)
assert pts.shape == (140, 3)
pts = f(20, domain_size=[1, 1, 0], reflect=False)
assert pts.shape == (20, 3)
pts = f(20, domain_size=[1, 1, 0], reflect=True)
assert pts.shape == (100, 3)
pts = f(20, domain_size=[1, 1], reflect=False)
assert pts.shape == (20, 3)
pts = f(20, domain_size=[1, 1], reflect=True)
assert pts.shape == (120, 3)
pts = f(20, domain_size=[1, 0], reflect=False)
assert pts.shape == (20, 3)
pts = f(20, domain_size=[1, 0], reflect=True)
assert pts.shape == (40, 3)
pts = f(20, domain_size=[1], reflect=False)
assert pts.shape == (20, 3)
pts = f(20, domain_size=[1], reflect=True)
assert pts.shape == (40, 3) | [
9,
567,
414,
182,
61,
6044
] |
def METHOD_NAME(function, factory,
mesh, bc=None, bc_zero=None, name=None,
ops=None, coeffs=None,
negative=False, functional=False):
def assemble_op(f, name):
coeff, params = f.to_fenics(mesh)
return FenicsMatrixBasedOperator(factory(coeff), params,
bc=bc, bc_zero=bc_zero, functional=functional, name=name)
if isinstance(function, LincombFunction):
operators = [assemble_op(f, f'{name}_{i}') for i, f in enumerate(function.functions)]
cfs = [-c if negative else c for c in function.coefficients]
if ops is not None:
ops.extend(operators)
coeffs.extend(cfs)
else:
return LincombOperator(operators, cfs, name=name)
elif function is not None:
operator = assemble_op(function, name)
if ops is not None:
ops.append(operator)
coeffs.append(-1 if negative else 1.)
else:
return -operator if negative else operator | [
1893,
837
] |
def METHOD_NAME(self):
eio_1 = EnkelvoudigInformatieObjectFactory.create()
eio_1_url = f"http://example.com{reverse(eio_1)}"
eio_2 = EnkelvoudigInformatieObjectFactory.create()
eio_2_url = f"http://example.com{reverse(eio_2)}"
GebruiksrechtenCMISFactory(informatieobject=eio_1_url)
GebruiksrechtenCMISFactory(informatieobject=eio_2_url)
response = self.client.get(reverse("gebruiksrechten-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertTrue(
eio_1_url == response.data[0]["informatieobject"]
or eio_1_url == response.data[1]["informatieobject"]
)
self.assertTrue(
eio_2_url == response.data[0]["informatieobject"]
or eio_2_url == response.data[1]["informatieobject"]
) | [
9,
404,
107,
16909
] |
def METHOD_NAME(xml_list):
"""
Finds the difference between topology XML files, ignoring ordering and names in "connection" tags
Iterate through root tag elements
Create a dictionary with file_dict[tag] = [list of tag objects]
"""
if len(xml_list) < 2:
print("Less than two XML files were specified. Exiting.")
return
master_tag_dict = {}
for xml_path in xml_list:
# Create etree object
fd = open(xml_path, "r")
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
fd.close()
# Internal Parsing
xml_dict = recursive_xml_parse(element_tree.getroot())
for tag_obj in xml_dict[2]:
if tag_obj[0] == "connection":
tag_obj[1] = []
pickled_obj = pickle.dumps(tag_obj)
if pickled_obj not in master_tag_dict:
# del master_tag_dict[pickled_obj]
master_tag_dict[pickled_obj] = []
master_tag_dict[pickled_obj].append(xml_path)
# Separate by XML path
tag_to_object = {} # tag_to_object[xml_path] = [obj]
for pickled_obj in master_tag_dict:
if len(master_tag_dict[pickled_obj]) == 1:
if master_tag_dict[pickled_obj][0] not in tag_to_object:
tag_to_object[master_tag_dict[pickled_obj][0]] = []
tag_to_object[master_tag_dict[pickled_obj][0]].append(pickled_obj)
for xml_path in tag_to_object:
print(xml_path + "\n")
# sort pickled obj lists
tag_to_object[xml_path].sort()
for pickled_obj in tag_to_object[xml_path]:
tag_obj = pickle.loads(pickled_obj)
print(tag_object_to_string(tag_obj))
print("\n")
print("\n") | [
2443,
1537
] |
def METHOD_NAME(version_info, in_develop=True):
version = '.'.join(str(v) for v in version_info[:3])
if not in_develop and version_info[3] != 'final':
version += version_info[3][0] + str(version_info[4])
return version | [
7810,
281
] |
def METHOD_NAME(ast_lifted):
'''
Recursively resolve all setting references to concrete values.
There's not a lot of type safety in this resolution process because the
attributes themselves inherently aren't strongly typed. We rely on the type
checking done when freeze is called on an assembly.
'''
to_resolve = []
assembly = ast_lifted.assembly
new_settings = []
for s in assembly.configuration.settings:
if isinstance(s.value, AttributeReference):
to_resolve.append(s)
else:
new_settings.append(s)
def sub_resolve(setting, depth):
'''
Recursive function for resolving setting references. If a setting
is a reference to another setting we try and resolve the next one (depth first). If there
is a circular reference we error out. If any setting doesn't have
a setting to resolve to, but has a default attribute value we use that.
If there is no default and no setting then we 'forget' the alias so that we
can fall back to defaults that other attributes have. An attribute that doesn't
get a setting or a default will not generate a symbol in the generated template.
Thus if the attribute requires a setting the code compiler should generate an error later.
'''
if setting.value.reference in depth:
errstring = ""
for value in depth:
errstring += value + "<-"
raise ParseError('Loop detected in attribute references: %s<-...'
% (errstring+setting.value.reference), setting.location)
# TODO Refactor AttributeReference to handle namespacing better than a
# string containing '.' characters.
instance_name, attribute_name = setting.value.reference.rsplit('.', 1)
referents = [x for x in assembly.configuration.settings
if x.instance == instance_name and
x.attribute == attribute_name]
if len(referents) == 0:
# No existing settings for the attribute that our current attribute
# refers to. Check if it has a default value and use that.
attribute = assembly.get_attribute(instance_name, attribute_name)
if attribute is not None and attribute.default is not None:
setting.value = attribute.default
return True
# If we didn't find a default, then try and use our own default if we have one
attribute = assembly.get_attribute(setting.instance, setting.attribute)
if attribute is not None and attribute.default is not None:
setting.value = attribute.default
return True
setting.value = None
return False
elif len(referents) > 1:
raise ParseError('setting refers to an attribute that '
'is set multiple times', setting.location)
if isinstance(referents[0].value, AttributeReference):
if not sub_resolve(referents[0], depth + [setting.value.reference]):
setting.value = None
return False
setting.value = referents[0].value
elif setting.value.dict_lookup and isinstance(referents[0].value, dict):
value = referents[0].value
for key in setting.value.dict_lookup.lookup:
value = value[key]
setting.value = value
else:
setting.value = referents[0].value
return True
# Iterate through each setting we need to resolve
for setting in to_resolve:
if isinstance(setting.value, AttributeReference):
if sub_resolve(setting, []):
new_settings.append(setting)
elif setting.value != None:
# Already resolved
new_settings.append(setting)
assembly.configuration.settings = new_settings
assembly.claim_children() | [
1014
] |
def METHOD_NAME(self, issue):
self.data = (type(self).__name__,) + tuple(map(lambda t: str(t.children[0]), self.grammar.METHOD_NAME(issue.title).children))
self.body = issue.body | [
214
] |
def METHOD_NAME(x): return 1 | [
10027
] |
def METHOD_NAME(self, node: AsyncFunctionDef) -> None:
self.visit_FunctionDef(node) | [
716,
958,
559,
2483
] |
def METHOD_NAME(self, path):
"""Return full path to an Info.plist if 'path' is actually a bundle,
otherwise None."""
bundle_info_path = None
if os.path.isdir(path):
test_info_path = os.path.join(path, "Contents/Info.plist")
if os.path.exists(test_info_path):
try:
with open(test_info_path, "rb") as f:
plist = plistlib.load(f)
except Exception:
raise ProcessorError(
f"File {path} looks like a bundle, but its "
"'Contents/Info.plist' file cannot be parsed."
)
if plist:
bundle_info_path = test_info_path
return bundle_info_path | [
19,
1727,
100,
157
] |
def METHOD_NAME(self):
# We don't know if the task in question already exists, so we try a few times. Checking
# beforehand would not be an option because a concurrent unit test could potentially
# create the same task and make this unit test fail (i.e. getting a dataset and creating
# a task for it is not atomic).
compatible_datasets = self._get_compatible_rand_dataset()
for i in range(100):
try:
dataset_id = compatible_datasets[i % len(compatible_datasets)]
# TODO consider implementing on the diff task types.
task = create_task(
task_type=self.task_type,
dataset_id=dataset_id,
target_name=self._get_random_feature(dataset_id),
estimation_procedure_id=self.estimation_procedure,
)
task.publish()
TestBase._mark_entity_for_removal("task", task.id)
TestBase.logger.info(
"collected from {}: {}".format(__file__.split("/")[-1], task.id)
)
# success
break
except OpenMLServerException as e:
# Error code for 'task already exists'
# Should be 533 according to the docs
# (# https://www.openml.org/api_docs#!/task/post_task)
if e.code == 614:
continue
else:
raise e
else:
raise ValueError(
"Could not create a valid task for task type ID {}".format(self.task_type)
) | [
9,
172,
758
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(self, old, new):
if new > old:
for i in range(old, new):
self._add_row(i)
else:
for i in range(new, old):
self._remove_row(i)
self.request_redraw() | [
1346,
1180
] |
def METHOD_NAME(t_block, reference_run):
"""
Test that the ``voltmeter`` and ``spike_recorder`` yield identical results independent simulation time blocking.
"""
srs, vms = build_net()
with nest.RunManager():
while nest.biological_time < total_sim_time:
nest.Run(t_block)
srs_times = srs.get("events", "times")
vms_recs = vms.get("events", "V_m")
srs_reference, vms_reference = reference_run
# Test that recorders give identical results independent of simulation time blocking
nptest.assert_array_equal(srs_reference, srs_times)
nptest.assert_array_equal(vms_reference, vms_recs) | [
9,
944,
61,
2457,
3117,
1101,
146
] |
def METHOD_NAME(self,
out_dir,
filename_tmpl='epoch_{}.pth',
save_optimizer=True,
meta=None,
create_symlink=True):
"""Save the checkpoint.
Args:
out_dir (str): The directory that checkpoints are saved.
filename_tmpl (str, optional): The checkpoint filename template,
which contains a placeholder for the epoch number.
Defaults to 'epoch_{}.pth'.
save_optimizer (bool, optional): Whether to save the optimizer to
the checkpoint. Defaults to True.
meta (dict, optional): The meta information to be saved in the
checkpoint. Defaults to None.
create_symlink (bool, optional): Whether to create a symlink
"latest.pth" to point to the latest checkpoint.
Defaults to True.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
# Note: meta.update(self.meta) should be done before
# meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
# there will be problems with resumed checkpoints.
# More details in https://github.com/open-mmlab/mmcv/pull/1108
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
METHOD_NAME(self.model, filepath, optimizer=optimizer, meta=meta)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
if platform.system() != 'Windows':
mmcv.symlink(filename, dst_file)
else:
shutil.copy(filepath, dst_file) | [
73,
1830
] |
def METHOD_NAME(inputs: Iterable[Tuple[str, pd.DataFrame]]) -> str:
"""
Returns a data profiling string over input data frame.
:param inputs: Either a single "glimpse" DataFrame that contains the statistics, or a
collection of (title, DataFrame) pairs where each pair names a separate "glimpse"
and they are all visualized in comparison mode.
:return: a data profiling string such as Pandas profiling ProfileReport.
"""
truncated_input = [truncate_pandas_data_profile(*input) for input in inputs]
return pandas_renderer.get_html(truncated_input) | [
19,
2842,
365,
1348
] |
def METHOD_NAME(self):
"""
L{fdesc.setNonBlocking} sets a file description to non-blocking.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
fdesc.setNonBlocking(r)
self.assertTrue(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK) | [
9,
0,
256,
5999
] |
def METHOD_NAME(key, shape):
import numpy as np
x, y = np.ogrid[:shape[0], :shape[1]]
x = 2* x.astype('f')/shape[0] - 1
y = 2* y.astype('f')/shape[1] - 1
x = x - x.mean()
y = y - y.mean()
c = x + 1j*y
r = np.abs(c)
theta = np.angle(c)
return zernike(key, r, theta)*(r<1) | [
17403,
4033
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag") | [
431
] |
def METHOD_NAME(self) -> Dict[str, Any]:
responses = [message for (method, message) in self.transcript]
self.ensure_unique_response(responses)
return responses[0] | [
2768,
17
] |
f METHOD_NAME(self): | [
9,
2346,
99,
168,
2927,
3875
] |
def METHOD_NAME(routes: RouteTableDef) -> str:
fh = io.StringIO()
print(routes, file=fh)
for r in routes:
print(" ", r, file=fh)
return fh.getvalue() | [
19,
3968,
1179
] |
def METHOD_NAME(self):
for license in ("LICENSE", "NOTICE"):
copy(self, license, src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "cmake")) | [
360
] |
def METHOD_NAME(self):
tc = CMakeToolchain(self)
defs = self._public_defines
defs["TRACE"] = self.options.trace
# TODO: add as options and set libraries correctly
defs["LAPACK"] = False
defs["MKL"] = False
# Set default values
defs["SVR_REUSEADDR"] = True # reuse tcp server address
defs["NOCALLOC"] = False # use calloc for zero matrix
defs["CPUTIME_IN_GPST"] = False # cputime operated in gpst
defs["RRCENA"] = False # enable rrc correction
defs["OUTSTAT_AMB"] = False # output ambiguity parameters to solution status
defs["IERS_MODEL"] = False # use IERS tide model
if is_apple_os(self):
# Add baud rates missing from termios.h for stream.c
defs["B460800"] = 460800
defs["B921600"] = 921600
for k, v in defs.items():
if type(v) in (str, int):
tc.preprocessor_definitions[k] = v
elif v:
tc.preprocessor_definitions[k] = ""
tc.METHOD_NAME()
tc = CMakeDeps(self)
tc.METHOD_NAME() | [
567
] |
def METHOD_NAME(self):
pass | [
3160
] |
def METHOD_NAME(self):
pass | [
72,
479
] |
def METHOD_NAME(self, name: str, item: Any) -> None:
"""
Store the :code:`item` in this dataset with the name :code:`name` so it can be used in
:code:`__getitem__`. That is, you can retrieve the :code:`item` with the :code:`takes` argument
of :obj:`add_dynamic_item`.
.. code-block:: python
def tokenize_func(tokenizer, text):
return tokenizer(text)
self.add_tool("tokenizer", tokenizer)
self.add_dynamic_item(tokenize_func, takes=["tokenizer", "text"], provides="tokenized_ids")
You can also later retreive this tool by :obj:`get_tool` or :obj:`all_tools`
"""
self._tools[name] = item
self.add_dynamic_item(
partial(self._dynamic_tools, name=name), takes="id", provides=name
) | [
238,
3081
] |
def METHOD_NAME(self):
self.check('posprec',[1,2],[0,0],
"(int,long) (long,int)") | [
9,
-1
] |
def METHOD_NAME(self, cfg_transform):
scale = 1.
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
for item in cfg_transform:
if 'NormalizeImage' in item:
mean = np.array(
item['NormalizeImage']['mean'], dtype=np.float32)
std = np.array(item['NormalizeImage']['std'], dtype=np.float32)
if item['NormalizeImage'].get('is_scale', True):
scale = 1. / 255.
break
if self.data_format == 'NHWC':
self.scale = paddle.to_tensor(scale / std).reshape((1, 1, 1, 3))
self.bias = paddle.to_tensor(-mean / std).reshape((1, 1, 1, 3))
else:
self.scale = paddle.to_tensor(scale / std).reshape((1, 3, 1, 1))
self.bias = paddle.to_tensor(-mean / std).reshape((1, 3, 1, 1)) | [
557,
18140
] |
def METHOD_NAME(self, body: bytes | None = None, set_content_md5: bool = False) -> None: ... | [
4179,
431
] |
def METHOD_NAME():
target = Basket(fruits__banana=Fruit())
target = target.bind(request=None)
with pytest.raises(InvalidEndpointPathException) as e:
perform_post_dispatch(root=target, path='/banana', value='')
assert str(e.value) == "Target <tests.helpers.Fruit banana (bound) path:'banana'> has no registered post_handler" | [
9,
407,
72,
2506,
168,
277
] |
def METHOD_NAME(self):
# failure
with self.assertRaises(AssertionError):
self.test_instance.with_img_type(1234)
# success
result = self.test_instance.with_img_type(ImageTypeEnum.PNG)
self.assertEqual(8, len(result))
result = self.test_instance.with_img_type(ImageTypeEnum.GEOTIFF)
self.assertEqual(4, len(result)) | [
9,
41,
2029,
44
] |
def METHOD_NAME(self):
return self.PORT_START | [
237,
447
] |
def METHOD_NAME() -> torch.utils.data.Dataset:
training_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_data = torch.Tensor(training_data)
training_labels = np.array([0, 1, 1, 0], dtype=np.float32)
training_labels = torch.Tensor(training_labels)
return torch.utils.data.TensorDataset(training_data, training_labels) | [
93,
126
] |
def METHOD_NAME(self) -> _CommandResults: ... | [
8570
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Path to the module containing this hard fork.
"""
return getattr(self.mod, "__path__", None) | [
157
] |
def METHOD_NAME(s):
return '\x1b[33m{}\x1b[0m'.format(s) | [
16334
] |
def METHOD_NAME(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [
dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10),
]
self.scales = [
dict(
name="x",
domain=props["xlim"],
type="linear",
range="width",
),
dict(
name="y",
domain=props["ylim"],
type="linear",
range="height",
),
] | [
1452,
7272
] |
def METHOD_NAME(self):
"""
A method executed at the end of each model initialization, to execute code that needs the model's
modules properly initialized (such as weight initialization).
"""
self.init_weights() | [
72,
176
] |
def METHOD_NAME():
forwarded_for = request.headers.getlist("X-Forwarded-For")
if len(forwarded_for):
return forwarded_for[0]
return request.remote_addr | [
19,
2437,
1213
] |
def METHOD_NAME(self):
origin_query_text = "SELECT * FROM fake WHERE id IN (SELECT id FROM fake_2 LIMIT 200) LIMIT 200"
query_text = self.query_runner.apply_auto_limit(origin_query_text, True)
self.assertEqual(origin_query_text, query_text) | [
9,
231,
803,
1467,
1788,
384,
1467
] |
def METHOD_NAME(layer, func):
for name, child in layer.named_children():
func(child)
METHOD_NAME(child, func) | [
231
] |
def METHOD_NAME(**kwargs) -> None:
"""Prints kwargs that have same cell."""
first_letters = [join_first_letters(k) for k in kwargs]
if len(set(first_letters)) != len(first_letters):
print(
f"Possible name collision! {kwargs.keys()} "
f"repeats first letters {first_letters}"
"you can separate your arguments with underscores"
" (delta_length -> DL, delta_width -> DW"
) | [
38,
865,
6503,
3437
] |
async def METHOD_NAME(
concurrency_limit: schemas.actions.ConcurrencyLimitCreate,
response: Response,
db: PrefectDBInterface = Depends(provide_database_interface), | [
129,
6050,
1467
] |
def METHOD_NAME(coverage_obj) -> str:
report_data = io.StringIO()
coverage_obj.report(file=report_data)
return report_data.getvalue() | [
19,
339,
3
] |
def METHOD_NAME(self, hit):
if hasattr(hit.meta, "highlight"):
for key in hit.meta.highlight:
yield from hit.meta.highlight[key] | [
19,
8186
] |
def METHOD_NAME(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.event_hub_authorization_rule_id = AAZStrType(
serialized_name="eventHubAuthorizationRuleId",
)
properties.event_hub_name = AAZStrType(
serialized_name="eventHubName",
)
properties.logs = AAZListType()
properties.service_bus_rule_id = AAZStrType(
serialized_name="serviceBusRuleId",
)
properties.storage_account_id = AAZStrType(
serialized_name="storageAccountId",
)
properties.workspace_id = AAZStrType(
serialized_name="workspaceId",
)
logs = cls._schema_on_200.properties.logs
logs.Element = AAZObjectType()
_element = cls._schema_on_200.properties.logs.Element
_element.category = AAZStrType()
_element.enabled = AAZBoolType(
flags={"required": True},
)
return cls._schema_on_200 | [
56,
135,
69,
1072
] |
def METHOD_NAME(self):
"""
tests desispec.scripts.submit_night.get_completed_tiles(testfile)
to ensure that the tile selection logic matches the expectations.
The test varies ZDONE, SURVEY, EFFTIME_SPEC, and FAPRGRM.
"""
from desispec.scripts.submit_night import get_completed_tiles
rows = []
tiles_truth = []
row_names = ['ZDONE', 'SURVEY', 'EFFTIME_SPEC', 'GOALTIME',
'MINTFRAC', 'FAPRGRM', 'TILEID']
## Test zdone always gives true
## nominal dark
rows.append(['true', 'main', 1000., 1000., 0.85, 'dark', 1]) # pass
rows.append(['true', 'sv', 1000., 1000., 0.85, 'dark', 2]) # pass
## med efftime backup
rows.append(['true', 'main', 500., 1000., 0.85, 'dark', 3]) # pass
rows.append(['true', 'sv', 500., 1000., 0.85, 'dark', 4]) # pass
## low efftime dark
rows.append(['true', 'main', 10., 1000., 0.85, 'dark', 5]) # pass
rows.append(['true', 'sv', 10., 1000., 0.85, 'dark', 6]) # pass
## nominal bright
rows.append(['true', 'main', 180., 180., 0.85, 'bright', 7]) # pass
rows.append(['true', 'sv', 180., 180., 0.85, 'bright', 8]) # pass
## med efftime backup
rows.append(['true', 'main', 90., 180., 0.85, 'bright', 9]) # pass
rows.append(['true', 'sv', 90., 180., 0.85, 'bright', 10]) # pass
## low efftime bright
rows.append(['true', 'main', 10., 180., 0.85, 'bright', 11]) # pass
rows.append(['true', 'sv', 10., 180., 0.85, 'bright', 12]) # pass
## nominal backup
rows.append(['true', 'main', 60., 60., 0.85, 'backup', 13]) # pass
rows.append(['true', 'sv', 60., 60., 0.85, 'backup', 14]) # pass
## med efftime backup
rows.append(['true', 'main', 30., 60., 0.85, 'backup', 15]) # pass
rows.append(['true', 'sv', 30., 60., 0.85, 'backup', 16]) # pass
## low efftime backup
rows.append(['true', 'main', 3., 60., 0.85, 'backup', 17]) # pass
rows.append(['true', 'sv', 3., 60., 0.85, 'backup', 18]) # pass
tiles_truth.extend(list(range(1,19)))
## Test other criteria when zdone false
## nominal dark
rows.append(['false', 'main', 1000., 1000., 0.85, 'dark', 21]) # fail
rows.append(['false', 'sv', 1000., 1000., 0.85, 'dark', 22]) # pass
## med efftime backup
rows.append(['false', 'main', 500., 1000., 0.85, 'dark', 23]) # fail
rows.append(['false', 'sv', 500., 1000., 0.85, 'dark', 24]) # pass
## low efftime dark
rows.append(['false', 'main', 10., 1000., 0.85, 'dark', 25]) # fail
rows.append(['false', 'sv', 10., 1000., 0.85, 'dark', 26]) # fail
## nominal bright
rows.append(['false', 'main', 180., 180., 0.85, 'bright', 27]) # fail
rows.append(['false', 'sv', 180., 180., 0.85, 'bright', 28]) # pass
## med efftime backup
rows.append(['false', 'main', 90., 180., 0.85, 'bright', 29]) # fail
rows.append(['false', 'sv', 90., 180., 0.85, 'bright', 30]) # pass
## low efftime bright
rows.append(['false', 'main', 10., 180., 0.85, 'bright', 31]) # fail
rows.append(['false', 'sv', 10., 180., 0.85, 'bright', 32]) # fail
## nominal backup
rows.append(['false', 'main', 60., 60., 0.85, 'backup', 33]) # pass
rows.append(['false', 'sv', 60., 60., 0.85, 'backup', 34]) # pass
## med efftime backup
rows.append(['false', 'main', 30., 60., 0.85, 'backup', 35]) # pass
rows.append(['false', 'sv', 30., 60., 0.85, 'backup', 36]) # pass
## low efftime backup
rows.append(['false', 'main', 3., 60., 0.85, 'backup', 37]) # fail
rows.append(['false', 'sv', 3., 60., 0.85, 'backup', 38]) # fail
tiles_truth.extend([22, 24, 28, 30, 33, 34, 35, 36])
test_table = Table(names=row_names, rows=rows)
testfile = f'test-{uuid4().hex}.ecsv'
test_table.write(testfile, overwrite=True)
tiles_test = list(get_completed_tiles(testfile))
if os.path.exists(testfile):
os.remove(testfile)
self.assertListEqual(tiles_truth, tiles_test) | [
9,
19,
3097,
299
] |
def METHOD_NAME(self) -> None:
with self.assertRaisesRegex(
Exception,
(
"manifest file test section 'dependencies' has "
"'foo = bar' but this section doesn't allow "
"specifying values for its entries"
),
):
ManifestParser(
"test",
""" | [
9,
99,
623,
2410,
1287
] |
def METHOD_NAME(self):
addon = addon_factory(summary='')
item = DiscoveryItem.objects.create(addon=addon)
assert self.serialize(item)['description_text'] is None
with override_settings(DRF_API_GATES={'v5': ('l10n_flat_input_output',)}):
assert self.serialize(item)['description_text'] == ''
# with a lang specified
with self.activate('fr'):
item.reload()
assert self.serialize(item, 'fr')['description_text'] is None
with override_settings(DRF_API_GATES={'v5': ('l10n_flat_input_output',)}):
assert self.serialize(item, 'fr')['description_text'] == '' | [
9,
1067,
526,
35
] |
def METHOD_NAME(self):
"""Ensure handlers initialize fine without real input/output files"""
for handler in [AbortHandler, FrozenJobErrorHandler, NumericalPrecisionHandler, UnconvergedScfErrorHandler]:
handler() | [
9,
1519,
11279
] |
def METHOD_NAME(url: str, dest: str = "."):
subprocess.Popen(f"wget -N {url} -P /tmp".split()).wait()
file_name = url.split("/")[-1]
file_path = f"/tmp/{file_name}"
assert os.path.exists(file_path)
bin_path = None
if file_name.endswith(".bin"):
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IEXEC)
subprocess.Popen(f"{file_path} -d {dest}".split()).wait()
bin_path = f"{dest}/bin"
elif file_name.endswith(".tgz"):
with tarfile.open(f"/tmp/{file_name}") as tar:
tar.extractall(dest)
rel_bin_path = next(
member.name for member in tar.getmembers() if "bin" in member.name)
bin_path = os.path.join(dest, rel_bin_path)
elif file_name.endswith(".jar"):
shutil.copyfile(file_path, os.path.join(dest, file_name))
if bin_path is not None:
append_bashrc(f"export PATH:$PATH:{bin_path}") | [
1047,
893
] |
def METHOD_NAME(self):
self.login()
params = {"ses": self.ses}
response = self.request("progress", params)
return response | [
19,
3064
] |
def METHOD_NAME(delete_node, dqlite_ep):
if len(delete_node) > 0 and "127.0.0.1" not in delete_node[0]:
for ep in dqlite_ep:
try:
cmd = (
"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
"-k {dbdir}/cluster.key -f json k8s".format(
snappath=snap_path, dbdir=cluster_dir
).split()
)
cmd.append(".remove {}".format(delete_node[0]))
subprocess.check_output(cmd)
break
except Exception as err:
print("Contacting node {} failed. Error:".format(ep))
print(repr(err))
exit(2) | [
34,
14822,
1716
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.