text
stringlengths 15
7.82k
| ids
listlengths 1
7
|
---|---|
def METHOD_NAME(model_id):
subprocess.Popen("heroku apps:destroy %s --confirm %s" % (model_id, model_id)) | [
2656
]
|
def METHOD_NAME(self):
# @TODO support eval here
expressions = {
# Unpacking non-sequence
'a, *b = 7': (TypeError, 'cannot unpack non-iterable int object'),
# Unpacking sequence too short
'a, *b, c, d, e = range(3)': (ValueError, 'not enough values to unpack (expected at least 4, got 3)'),
# Unpacking sequence too short and target appears last
'a, b, c, d, *e = range(3)' : (ValueError, 'not enough values to unpack (expected at least 4, got 3)'),
# Unpacking a sequence where the test for too long raises a different kind of error
'a, *b, c, d, e = BadSeq()' : (BozoError, ''),
# general tests all fail
'a, *b, c, *d, e = range(10)' : (SyntaxError, 'multiple starred expressions in assignment'),
'[*b, *c] = range(10)': (SyntaxError, 'multiple starred expressions in assignment'),
'a,*b,*c,*d = range(4)': (SyntaxError, 'multiple starred expressions in assignment'),
'*a = range(10)': (SyntaxError, 'starred assignment target must be in a list or tuple'),
'*a' : (SyntaxError, 'can\'t use starred expression here'),
'*1': (SyntaxError, 'can\'t use starred expression here'),
'x = *a': (SyntaxError, 'can\'t use starred expression here'),
}
jseval("Sk.retainGlobals = true") # use globals from this module
# use this to test syntax errors and their respective mssages
eval_alt = "Sk.importMainWithBody('test_unpack', false, '{0}', true)"
for expr, (error, msg) in expressions.items():
try:
jseval(eval_alt.format(expr))
except error as e:
self.assertIn(msg, str(e))
else:
self.fail(f'{error} not raised for {expr}')
jseval("Sk.retainGlobals = false") | [
9,
789,
216
]
|
def METHOD_NAME(gridfile):
"""
Run grdview by passing in a grid and setting a perspective viewpoint with
an azimuth from the SouthEast and an elevation angle 15 degrees from the
z-plane.
"""
fig = Figure()
fig.grdview(grid=gridfile, projection="Q15c+", perspective=[135, 15], frame=True)
return fig | [
9,
7052,
41,
6161
]
|
METHOD_NAME(self, exit_on_release): | [
0,
538,
69,
586
]
|
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.link_name = AAZStrArg(
options=["-n", "--name", "--link-name"],
help="The name of the NSP link.",
required=True,
id_part="child_name_1",
)
_args_schema.perimeter_name = AAZStrArg(
options=["--perimeter-name"],
help="The name of the network security perimeter.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
]
|
def METHOD_NAME(env):
cppdefines = env.get('CPPDEFINES', {})
if cppdefines is None:
return {}
if SCons.Util.is_Sequence(cppdefines):
result = {}
for c in cppdefines:
if SCons.Util.is_Sequence(c):
result[c[0]] = c[1]
else:
result[c] = None
return result
if not SCons.Util.is_Dict(cppdefines):
return {cppdefines : None}
return cppdefines | [
16518,
16519
]
|
def METHOD_NAME(self, tpc_phase, transaction_id, transaction):
"""
Undo a transaction identified by transaction_id.
transaction_id is the base 64 encoding of an 8 byte tid. Undo
by writing new data that reverses the action taken by the
transaction.
"""
# This is called directly from code in DB.py on a new instance
# (created either by new_instance() or a special
# undo_instance()). That new instance is never asked to load
# anything, or poll invalidations, so our storage cache is ineffective
# (unless we had loaded persistent state files)
#
# TODO: Implement 'undo_instance' to make this clear.
#
# A regular Connection going through two-phase commit will
# call tpc_begin(), do a bunch of store() from its commit(),
# then tpc_vote(), tpc_finish().
#
# During undo, we get a tpc_begin(), then a bunch of undo() from
# ZODB.DB.TransactionalUndo.commit(), then tpc_vote() and tpc_finish().
tpc_phase.METHOD_NAME(transaction_id, transaction) | [
2796
]
|
def METHOD_NAME(cls, source):
"""Get record by online harvested source.
:param source: the record source
:return: Organisation record or None.
"""
results = OrganisationsSearch().filter(
'term', online_harvested_source=source).scan()
try:
return Organisation.get_record_by_pid(next(results).pid)
except StopIteration:
return None | [
19,
148,
604,
6779,
15566,
1458
]
|
def METHOD_NAME(resize_input=True, normalize_input=True):
r"""InceptionV3 network for the evaluation of Inception Score (IS).
Args:
resize_input: whether or not to resize the input to (299, 299).
normalize_input: whether or not to normalize the input from range (0, 1) to range(-1, 1).
"""
from artist.models import InceptionV3
return InceptionV3(
output_blocks=(4, ),
resize_input=resize_input,
normalize_input=normalize_input,
requires_grad=False,
use_fid_inception=False).eval().requires_grad_(False) | [
19,
137,
819
]
|
def METHOD_NAME(self):
db = ZODB.DB(self.storage)
cn = db.open()
rt = cn.root()
# Create a bunch of objects; the Data.fs is about 100KB.
for i in range(50):
d = rt[i] = PersistentMapping()
transaction.commit()
for j in range(50):
d[j] = "a" * j
transaction.commit() | [
3914
]
|
def METHOD_NAME(logical=True):
import psutil
return psutil.cpu_count(logical=logical) | [
19,
2265,
29,
10274
]
|
def METHOD_NAME(self):
values = list(self.statistic.values())
self.avg = np.average(values)
self.std = np.std(values) | [
1407,
1576,
11528,
1260
]
|
def METHOD_NAME(df, col):
"""True if no duplicate values"""
return ~df.duplicated(subset=[col]).any() | [
654,
2968
]
|
def METHOD_NAME(values):
"""Encodes values using 4 bits per sample, packing the result into bytes.
"""
b = encode_4bit_base(values).reshape(-1, 2)
b <<= shift04
return b[:, 0] | b[:, 1] | [
421,
16662
]
|
def METHOD_NAME(self, include_optional):
"""Test V2beta1ListRunsResponse
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.v2beta1_list_runs_response.V2beta1ListRunsResponse() # noqa: E501
if include_optional :
return V2beta1ListRunsResponse(
runs = [
kfp_server_api.models.v2beta1_run.v2beta1Run(
experiment_id = '0',
run_id = '0',
display_name = '0',
storage_state = 'STORAGE_STATE_UNSPECIFIED',
description = '0',
pipeline_version_id = '0',
pipeline_spec = kfp_server_api.models.pipeline_spec.pipeline_spec(),
pipeline_version_reference = kfp_server_api.models.v2beta1_pipeline_version_reference.v2beta1PipelineVersionReference(
pipeline_id = '0',
pipeline_version_id = '0', ),
runtime_config = kfp_server_api.models.v2beta1_runtime_config.v2beta1RuntimeConfig(
parameters = {
'key' : None
},
pipeline_root = '0', ),
service_account = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
scheduled_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
state = 'RUNTIME_STATE_UNSPECIFIED',
error = kfp_server_api.models.googlerpc_status.googlerpcStatus(
code = 56,
message = '0',
details = [
kfp_server_api.models.protobuf_any.protobufAny(
type_url = '0',
value = 'YQ==', )
], ),
run_details = kfp_server_api.models.v2beta1_run_details.v2beta1RunDetails(
pipeline_context_id = '0',
pipeline_run_context_id = '0',
task_details = [
kfp_server_api.models.v2beta1_pipeline_task_detail.v2beta1PipelineTaskDetail(
run_id = '0',
task_id = '0',
display_name = '0',
create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
start_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
end_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
executor_detail = kfp_server_api.models.v2beta1_pipeline_task_executor_detail.v2beta1PipelineTaskExecutorDetail(
main_job = '0',
pre_caching_check_job = '0',
failed_main_jobs = [
'0'
],
failed_pre_caching_check_jobs = [
'0'
], ),
execution_id = '0',
inputs = {
'key' : kfp_server_api.models.v2beta1_artifact_list.v2beta1ArtifactList(
artifact_ids = [
'0'
], )
},
outputs = {
'key' : kfp_server_api.models.v2beta1_artifact_list.v2beta1ArtifactList()
},
parent_task_id = '0',
state_history = [
kfp_server_api.models.v2beta1_runtime_status.v2beta1RuntimeStatus(
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
pod_name = '0',
child_tasks = [
kfp_server_api.models.pipeline_task_detail_child_task.PipelineTaskDetailChildTask(
task_id = '0',
pod_name = '0', )
], )
], ),
recurring_run_id = '0',
state_history = [
kfp_server_api.models.v2beta1_runtime_status.v2beta1RuntimeStatus(
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
], )
],
total_size = 56,
next_page_token = '0'
)
else :
return V2beta1ListRunsResponse(
) | [
93,
89
]
|
def METHOD_NAME(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description="Set surface climatology fields.")
parser.add_argument(
"-p",
"--path-to-defns",
dest="path_to_defns",
required=True,
help="Path to var_defns file.",
)
parser.add_argument('-d', '--debug', action='store_true',
help='Script will be run in debug mode with more verbose output')
return parser.METHOD_NAME(argv) | [
214,
335
]
|
def METHOD_NAME(severity, label, message):
logger.warning(
"severity: %s, label: %s, message: %s.", severity, label, message
) | [
339,
417
]
|
def METHOD_NAME(email_address, sub_type, num_records=1):
bounced_email = BouncedEmail.objects.create(email=email_address)
for rec_num in range(0, num_records):
PermanentBounceMeta.objects.create(
bounced_email=bounced_email,
timestamp=datetime.datetime.utcnow(),
sub_type=sub_type,
) | [
129,
1377,
1094
]
|
def METHOD_NAME(self, inputs):
"""Shallow Copy the inputs.
The Variable copied is different from the original once, but the actual data and grad
are different.
"""
inps_cp = []
for inp in inputs:
i = inp.get_unlinked_variable(need_grad=inp.need_grad)
inps_cp.append(i)
return inps_cp | [
215,
1461
]
|
def METHOD_NAME(
fp,
proc_num,
proc_index,
index,
header_row,
offset,
chunk_size,
read_block_delimiter,
writer,
):
try:
total_size = fp.size()
except TypeError:
total_size = fp.size
part_size = (total_size - offset) // proc_num
begin = part_size * proc_index + offset
end = total_size if proc_index == proc_num - 1 else begin + part_size
# See Note [Semantic of read_block with delimiter].
if index == 0 and proc_index == 0:
begin -= int(header_row)
first_chunk = True
while begin < end:
buffer = read_block(
fp,
begin,
min(chunk_size, end - begin),
delimiter=read_block_delimiter,
)
if first_chunk:
# strip the UTF-8 BOM
if buffer[0:3] == b'\xef\xbb\xbf':
buffer = buffer[3:]
first_chunk = False
size = len(buffer)
if size <= 0:
break
begin += size
if size > 0:
chunk = writer.next(size)
vineyard.memory_copy(chunk, 0, buffer) | [
203,
1544,
37
]
|
def METHOD_NAME(pair):
"""
Returns True if a depends on b.
"""
a, b = pair
return any(bout in a.inputs for bout in b.outputs) or any(
METHOD_NAME((ainp.owner, b)) for ainp in a.inputs if ainp.owner
) | [
5253
]
|
def METHOD_NAME(d, config, rename_keys_prefix=rename_keys_prefix):
new_d = OrderedDict()
new_d["visual_bert.embeddings.position_ids"] = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
new_key = key
for name_pair in rename_keys_prefix:
new_key = new_key.replace(name_pair[0], name_pair[1])
new_d[new_key] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
new_d["cls.predictions.decoder.bias"] = new_d["cls.predictions.bias"]
return new_d | [
19,
80,
553
]
|
def METHOD_NAME(self):
url = 'https://kinozal.tv/details.php?id=1496310'
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 20, 1, 30)).astimezone(pytz.utc)
server_now = datetime(2017, 1, 20, 12, 0, 0, tzinfo=pytz.utc)
MockDatetime.mock_now = server_now
with patch('monitorrent.plugins.trackers.kinozal.datetime.datetime', MockDatetime):
assert self.tracker.get_last_torrent_update(url) == expected | [
9,
19,
679,
3564,
86,
43,
3758
]
|
def METHOD_NAME(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAccountKeysResult:
"""
Get the keys to use with the Maps APIs. A key is used to authenticate and authorize access to the Maps REST APIs. Only one key is needed at a time; two are given to provide seamless key regeneration.
:param str account_name: The name of the Maps Account.
:param str resource_group_name: The name of the Azure Resource Group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:maps/v20180501:listAccountKeys', __args__, opts=opts, typ=ListAccountKeysResult).value
return AwaitableListAccountKeysResult(
id=pulumi.get(__ret__, 'id'),
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_key=pulumi.get(__ret__, 'secondary_key')) | [
245,
598,
219
]
|
def METHOD_NAME(self):
test_edge = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2])
t1 = np.all(self.mesh2.edge_lengths == test_edge)
self.assertTrue(t1) | [
9,
540,
988,
227
]
|
def METHOD_NAME():
return datetime.utcnow() | [
4607
]
|
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
]
|
def METHOD_NAME(ctx, param, value):
"""Print version information."""
if not value or ctx.resilient_parsing:
return
v = packaging.version.Version(molecule.__version__)
color = "bright_yellow" if v.is_prerelease else "green"
msg = f"molecule [{color}]{v}[/] using python [repr.number]{sys.version_info[0]}.{sys.version_info[1]}[/] \n"
msg += f" [repr.attrib_name]ansible[/][dim]:[/][repr.number]{app.runtime.version}[/]"
for driver in drivers():
msg += f"\n [repr.attrib_name]{driver!s}[/][dim]:[/][repr.number]{driver.version}[/][dim] from {driver.module}"
if driver.required_collections:
msg += " requiring collections:"
for name, version in driver.required_collections.items():
msg += f" [repr.attrib_name]{name}[/]>=[repr.number]{version}[/]"
msg += "[/]"
console.print(msg, highlight=False)
ctx.exit() | [
38,
281
]
|
def METHOD_NAME(self, input: Any) -> Any:
pass | [
90,
1542
]
|
def METHOD_NAME():
"""Main method."""
# Set the values of the client ID, tenant ID, and client secret of the AAD application as
# environment variables:
# AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, CONFIDENTIALLEDGER_ENDPOINT
try:
ledger_endpoint = os.environ["CONFIDENTIALLEDGER_ENDPOINT"]
except KeyError:
LOG.error(
"Missing environment variable 'CONFIDENTIALLEDGER_ENDPOINT' - "
"please set it before running the example"
)
sys.exit(1)
# Under the current URI format, the ledger id is the first part of the ledger endpoint.
# i.e. https://<ledger id>.confidential-ledger.azure.com
ledger_id = ledger_endpoint.replace("https://", "").split(".")[0]
identity_service_client = ConfidentialLedgerCertificateClient() # type: ignore[call-arg]
ledger_certificate = identity_service_client.get_ledger_identity(ledger_id)
# The Confidential Ledger's TLS certificate must be written to a file to be used by the
# ConfidentialLedgerClient. Here, we write it to a temporary file so that is is cleaned up
# automatically when the program exits.
with tempfile.TemporaryDirectory() as tempdir:
ledger_cert_file = os.path.join(tempdir, f"{ledger_id}.pem")
with open(ledger_cert_file, "w") as outfile:
outfile.write(ledger_certificate["ledgerTlsCertificate"])
print(
f"Ledger certificate has been written to {ledger_cert_file}. "
"It will be deleted when the script completes."
)
# Build a client through AAD
ledger_client = ConfidentialLedgerClient(
ledger_endpoint,
credential=DefaultAzureCredential(),
ledger_certificate_path=ledger_cert_file,
)
# Write a ledger entry and wait for the transaction to be committed.
try:
entry_contents = "Hello world!"
post_poller = ledger_client.begin_create_ledger_entry( # type: ignore[attr-defined]
{"contents": entry_contents}
)
post_entry_result = post_poller.result()
transaction_id = post_entry_result["transactionId"]
print(
f"Wrote '{entry_contents}' to the ledger at transaction {transaction_id}."
)
except HttpResponseError as e:
if e.response != None:
print("Request failed: {}".format(e.response.json())) # type: ignore[union-attr]
else:
print("No response found")
raise
# Get a receipt for a ledger entry.
# A receipt can be retrieved for any transaction id to provide cryptographic proof of the
# contents of the transaction.
try:
print(
f"Retrieving a receipt for {transaction_id}. The receipt may be used to "
"cryptographically verify the contents of the transaction."
)
print(
"For more information about receipts, please see "
"https://microsoft.github.io/CCF/main/audit/receipts.html#receipts"
)
get_receipt_poller = ledger_client.begin_get_receipt(transaction_id) # type: ignore[attr-defined]
get_receipt_result = get_receipt_poller.result()
print(f"Receipt for transaction id {transaction_id}: {get_receipt_result}")
except HttpResponseError as e:
if e.response != None:
print("Request failed: {}".format(e.response.json())) # type: ignore[union-attr]
else:
print("No response found")
raise
# Read content of service certificate file saved in previous step.
with open(ledger_cert_file, "r") as service_cert_file:
service_cert_content = service_cert_file.read()
# Optionally read application claims, if any
application_claims = get_receipt_result.get("applicationClaims", None)
try:
# Verify the contents of the receipt.
verify_receipt(
get_receipt_result["receipt"],
service_cert_content,
application_claims=application_claims,
)
print(f"Receipt for transaction id {transaction_id} successfully verified")
except ValueError:
print(f"Receipt verification for transaction id {transaction_id} failed")
raise | [
57
]
|
def METHOD_NAME(self, routerName: str) -> BgpLookingGlassServer:
"""!
@brief add looking glass node on the router identified by given name.
@param routerName name of the router
@returns self, for chaining API calls.
"""
self.__routers.add(routerName)
return self
return self | [
645
]
|
def METHOD_NAME(self, path, qref):
self.qrefs[path] = qref | [
372,
-1
]
|
def METHOD_NAME(self):
index = self.start_index
while index < self.blob_end:
yield index
index += self.chunk_size | [
19,
464,
2964
]
|
def METHOD_NAME(self):
if self.tb_writer is not None:
self.tb_writer.close() | [
1843
]
|
def METHOD_NAME(*, session: "Session"):
"""
Make a snapshot of current counters
:param session: Database session in use.
"""
select_counters_stmt = select(
models.AccountUsage.rse_id,
models.AccountUsage.account,
models.AccountUsage.files,
models.AccountUsage.bytes,
literal(datetime.datetime.utcnow()),
)
stmt = insert(
models.AccountUsageHistory
).from_select(
['rse_id', 'account', 'files', 'bytes', 'updated_at'],
select_counters_stmt
)
session.execute(stmt) | [
1917,
598,
2469,
351,
410
]
|
def METHOD_NAME(self):
# Verify that decode() will refuse to overwrite an existing file
f = None
try:
f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout))
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpin, 'rb')
self.assertRaises(uu.Error, uu.decode, f)
f.close()
finally:
self._kill(f) | [
9,
5510
]
|
def METHOD_NAME(self, key_event):
ret = super(PyDMDateTimeEdit, self).METHOD_NAME(key_event)
if key_event.key() in [QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter]:
self.returnPressed.emit()
return ret | [
59,
2971,
417
]
|
def METHOD_NAME(caplog):
class T(m.Product):
x = m.Bit
y = m.Bit
class U(m.Product):
x = T
y = T
class Foo(m.Circuit):
io = m.IO(A=m.Out(U), B=m.In(T), C=m.Out(m.Bit))
io.A.x @= io.B
io.C @= 0
with pytest.raises(Exception) as e:
m.compile("build/Foo", Foo)
assert caplog.messages[0] == "Foo.A not driven"
assert caplog.messages[1] == "Foo.A"
assert caplog.messages[2] == " Foo.A.x: Connected"
assert caplog.messages[3] == " Foo.A.y: Unconnected" | [
9,
1188,
2351,
612,
5955
]
|
def METHOD_NAME(filename):
print(f'checking if {filename} exists')
if filename == valid:
return True
return False | [
921
]
|
def METHOD_NAME(node):
""" instead this could be composed of multiple calls to 'get_indices_from_groupnum' function """
socket_index = 1
vis_dict = {}
vis_dict[0] = True # selector always visible, first socket
for group_index in range(node.max_groups):
vis_dict[socket_index] = group_index < node.num_visible_groups
socket_index += 1
for set_item in range(node.max_items_per_group):
vis_dict[socket_index] = (group_index < node.num_visible_groups and set_item < node.num_items_per_group)
socket_index += 1
# g = "".join(["01"[k] for k in node.values()])
# print(g)
return vis_dict | [
19,
1894,
3800,
427,
673,
2999
]
|
def METHOD_NAME(self):
from stig.client.ttypes import TorrentFilePriority
self.check_filter(FileFilter,
filter_names=('priority', 'prio'),
items=({'id': 1, 'priority': TorrentFilePriority(-2)}, # off
{'id': 2, 'priority': TorrentFilePriority(-1)}, # low
{'id': 3, 'priority': TorrentFilePriority(0)}, # normal
{'id': 4, 'priority': TorrentFilePriority(1)}), # high
test_cases=(('{name}', (1, 2, 4)),
('!{name}', (3,)),
('{name}=low', (2,)),
('{name}<low', (1,)),
('{name}<=low', (1, 2)),
('{name}>low', (3, 4)),
('{name}>=low', (2, 3, 4)))) | [
9,
2654
]
|
def METHOD_NAME(field, value):
return get_default_field_value(field, value) | [
89,
4560,
15699
]
|
async def METHOD_NAME(self, *tileables, **kwargs):
extra_config = kwargs.pop("extra_config", dict())
if kwargs:
unexpected_keys = ", ".join(list(kwargs.keys()))
raise TypeError(f"`fetch` got unexpected arguments: {unexpected_keys}")
self._check_options = self._extract_check_options(extra_config)
results = await super().METHOD_NAME(*tileables)
return results | [
1047
]
|
def METHOD_NAME(code, otherdata):
logger.info('#' * 75)
logger.info('Actual Rule: %s', code)
inputstream = InputStream(code)
lexer = comparatorLexer(inputstream)
stream = CommonTokenStream(lexer)
parser = comparatorParser(stream)
tree = parser.expression()
print('#' * 50)
print(tree.toStringTree(recog=parser))
children = []
for child in tree.getChildren():
children.append((child.getText()))
logger.info('*' * 50)
logger.debug("All the parsed tokens: %s", children)
r_i = RuleInterpreter(children, **otherdata)
print(r_i.compare()) | [
57,
2403
]
|
def METHOD_NAME(duthosts, rand_one_dut_hostname):
"""
Setup/teardown fixture for ntp server config
Args:
duthosts: list of DUTs.
rand_selected_dut: The fixture returns a randomly selected DuT.
"""
duthost = duthosts[rand_one_dut_hostname]
create_checkpoint(duthost)
init_ntp_servers = running_ntp_servers(duthost)
yield
try:
logger.info("Rolled back to original checkpoint")
rollback_or_reload(duthost)
cur_ntp_servers = running_ntp_servers(duthost)
pytest_assert(cur_ntp_servers == init_ntp_servers,
"ntp servers {} do not match {}.".format(cur_ntp_servers, init_ntp_servers))
finally:
delete_checkpoint(duthost) | [
102,
485
]
|
def METHOD_NAME(handler: Handler):
@functools.wraps(handler)
async def wrapper(request: web.Request) -> web.StreamResponse:
try:
return await handler(request)
except TagNotFoundError as exc:
raise web.HTTPNotFound(reason=f"{exc}") from exc
except TagOperationNotAllowed as exc:
raise web.HTTPUnauthorized(reason=f"{exc}") from exc
return wrapper | [
276,
114,
504
]
|
async def METHOD_NAME(producer):
sequence_body = [b'message', 123.456, True]
footer = {'footer_key': 'footer_value'}
properties = {"subject": "sequence"}
application_properties = {"body_type": "sequence"}
sequence_message = AmqpAnnotatedMessage(
sequence_body=sequence_body,
footer=footer,
properties=properties,
application_properties=application_properties
)
await producer.send_batch([sequence_message])
print("Message of sequence body sent.") | [
353,
771,
277
]
|
def METHOD_NAME(self):
"""Test with function calls"""
CODE_FUNCTION_CALL = """ | [
9,
559,
128,
925
]
|
def METHOD_NAME(self, locs):
"""Compute density field at locations.
"""
(npts, dim) = locs.shape
METHOD_NAME = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64)
return METHOD_NAME | [
2915
]
|
def METHOD_NAME(self, bucket):
self._buckets[bucket.name] = bucket.id_ | [
73,
2538
]
|
def METHOD_NAME(self, comp):
comp.set_solar_like(Z=0.02)
molar = comp.get_molar()
assert molar[Nucleus("he4")] == approx((0.3-0.02)/4.0) | [
9,
19,
3851
]
|
def METHOD_NAME(linter):
linter.register_checker(RedundantAssignmentChecker(linter)) | [
372
]
|
def METHOD_NAME(self, name: str) -> str:
"""Convert the given module name to safe snake case."""
return self.filters.METHOD_NAME(name) | [
298,
156
]
|
def METHOD_NAME(self, _manager: Manager, path: str) -> None:
logging.info(f"Device removed {path}")
for plugin in self.Plugins.get_loaded_plugins(AppletPlugin):
plugin.METHOD_NAME(path) | [
69,
398,
674
]
|
def METHOD_NAME(buffers, g=None):
"""reconstruct an object serialized by serialize_object from data buffers.
Parameters
----------
buffers : list of buffers/bytes
g : globals to be used when uncanning
Returns
-------
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
"""
bufs = list(buffers)
pobj = bufs.pop(0)
canned = pickle.loads(pobj) # noqa
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
for c in canned:
_restore_buffers(c, bufs)
newobj = uncan_sequence(canned, g)
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
newobj = {}
for k in sorted(canned):
c = canned[k]
_restore_buffers(c, bufs)
newobj[k] = uncan(c, g)
else:
_restore_buffers(canned, bufs)
newobj = uncan(canned, g)
return newobj, bufs | [
2696,
279
]
|
def METHOD_NAME(
cls: type[T],
data: bytes,
expected_identifier: int | None = None,
) -> T:
"""
Unpacks a series of packed bytes representing an application packet using
:meth:`struct.Struct.unpack`, which produces the packet identifier and array of data.
These values are then used to produce the new instance of the class.
Args:
data (bytes): The packed packet.
expected_identifier (Optional[int]): The identifier that is expected to
result from the packet. If ``None``, then the identifier is not verified.
If this value is passed and the identifiers do not match, then the below
error is thrown.
Raises:
ApplicationPacketWrongIdentifierException: If the ``expected_identifier``
does not match the identifier found in the packet, then this is raised.
Returns:
ApplicationPacket: The data represented as an application packet.
"""
payload_len = len(data) - 1
packet = cls(*struct.unpack(f"B{payload_len}s", data))
if expected_identifier is not None and expected_identifier != packet.identifier:
raise ApplicationPacketWrongIdentifierException(
packet.identifier,
expected_identifier,
)
return packet | [
280,
321
]
|
def METHOD_NAME(self):
self.run_until_breakpoint("lookupObjMethod")
try:
s_func_vec = utils.Global("HPHP::Func::s_funcVec")
except Exception:
# lowptr builds don't have a funcVec
return
smart_ptr = utils.get(s_func_vec, "m_vals")
self.assertEqual(utils.template_type(smart_ptr.type), "std::unique_ptr")
raw_ptr = utils.rawptr(smart_ptr)
self.assertEqual(utils.template_type(raw_ptr.type), "HPHP::detail::LowPtrImpl") | [
9,
9315,
1396,
2768,
2980
]
|
def METHOD_NAME(self):
return getattr(self, "settings_build", self.settings) | [
817,
56
]
|
def METHOD_NAME(self):
"""Fulfill quest step A"""
EvAdventureObject.create(
key="quest obj", location=self.character, tags=(("QuestA", "quests"),)
) | [
9459,
385
]
|
def METHOD_NAME():
url = f"{server_url}/api/v1/dataverses/sdsc-test-dataverse/contents"
try:
response = requests.get(url=url, headers=headers)
except errors.RequestError:
warnings.warn("Cannot clean up Dataverse datasets")
return
if response.status_code != 200:
warnings.warn("Cannot clean up Dataverse datasets")
return
datasets = response.json().get("data", [])
for dataset in datasets:
id = dataset.get("id")
if id is not None:
url = f"https://demo.dataverse.org/api/v1/datasets/{id}"
try:
requests.delete(url=url, headers=headers)
except errors.RequestError:
pass | [
188,
4146
]
|
def METHOD_NAME(self, ip, data):
try:
if self._data[ip][-1][26] == data[26]:
logging.warning("Skip this data item, duplicate of previous data item.")
if self._data[ip][-1] != data:
raise ValueError("Duplicate ID, but not duplicate data!")
return
# Every iCOM data item has an index, this check here is
# making sure that the currently received data item is
# incremented by exactly one compared to the most recently
# received data item.
if (self._data[ip][-1][26] + 1) % 256 != data[26]:
raise ValueError("Data stream appears to be arriving out of order")
self._data[ip].append(data)
except KeyError:
self._data[ip] = [data]
timestamp = data[8:26].decode()
shrunk_data, patient_id = extract.extract(data, "Patient ID")
shrunk_data, patient_name = extract.extract(shrunk_data, "Patient Name")
shrunk_data, machine_id = extract.extract(shrunk_data, "Machine ID")
logging.info( # pylint: disable = logging-fstring-interpolation
f"IP: {ip} | Timestamp: {timestamp} | "
f"Patient ID: {patient_id} | "
f"Patient Name: {patient_name} | Machine ID: {machine_id}"
)
try:
usage_start = self._usage_start[ip]
except KeyError:
usage_start = None
if patient_id is not None:
if usage_start is None:
self._current_patient_data[ip] = []
timestamp = data[8:26].decode()
iso_timestamp = f"{timestamp[0:10]}T{timestamp[10::]}"
self._usage_start[ip] = iso_timestamp
logging.debug(
"Starting data collection for patient id %(patient_id)s. "
"Recording started at %(usage_start)s.",
{"usage_start": self._usage_start[ip], "patient_id": patient_id},
)
self._current_patient_data[ip].append(data)
logging.debug(
"iCOM stream appended to the data being collected for "
"patient id %(patient_id)s.",
{"patient_id": patient_id},
)
elif not usage_start is None:
logging.debug(
"Delivery that started at %(usage_start)s appears to "
"have completed.",
{"usage_start": usage_start},
)
save_patient_data(
usage_start, self._current_patient_data[ip], self._output_dir
)
self._current_patient_data[ip] = None
self._usage_start[ip] = None
else:
logging.debug("No delivery is currently being recorded.") | [
86,
365
]
|
def METHOD_NAME(iToken, lTokens):
for iIndex in range(iToken, len(lTokens)):
if isinstance(lTokens[iIndex], parser.carriage_return):
return iIndex
return None | [
416,
15824,
1413
]
|
def METHOD_NAME(self, data, **kwargs):
method = data["method"]
method_callable = getattr(
BeliefsDataFrame, method, None
) # what if the object which is applied to is not a BeliefsDataFrame...
if not callable(method_callable):
raise ValidationError(
f"method {method} is not a valid BeliefsDataFrame method."
)
method_signature = signature(method_callable)
try:
args = data.get("args", []).copy()
_kwargs = data.get("kwargs", {}).copy()
args.insert(0, BeliefsDataFrame)
method_signature.bind(*args, **_kwargs)
except TypeError:
raise ValidationError(
f"Bad arguments or keyword arguments for method {method}"
) | [
187,
103,
128
]
|
def METHOD_NAME(e):
obj = e.get_target()
cont_a = lv.area_t()
obj.get_coords(cont_a)
#Add the faded area before the lines are drawn
dsc = e.get_draw_part_dsc()
if dsc.part == lv.PART.ITEMS:
if not dsc.p1 or not dsc.p2:
return
# Add a line mask that keeps the area below the line
line_mask_param = lv.draw_mask_line_param_t()
line_mask_param.points_init(dsc.p1.x, dsc.p1.y, dsc.p2.x, dsc.p2.y, lv.DRAW_MASK_LINE_SIDE.BOTTOM)
line_mask_id = lv.draw_mask_add(line_mask_param, None)
#Draw a rectangle that will be affected by the mask
draw_rect_dsc = lv.draw_rect_dsc_t()
draw_rect_dsc.init()
draw_rect_dsc.bg_opa = lv.OPA.COVER
draw_rect_dsc.bg_color = dsc.line_dsc.color
a = lv.area_t()
a.x1 = dsc.p1.x
a.x2 = dsc.p2.x
a.y1 = min(dsc.p1.y, dsc.p2.y)
a.y2 = cont_a.y2 - 13 # -13 cuts off where the rectangle draws over the chart margin. Without this an area of 0 doesn't look like 0
dsc.draw_ctx.rect(draw_rect_dsc, a)
# Remove the mask
lv.draw_mask_free_param(line_mask_param)
lv.draw_mask_remove_id(line_mask_id) | [
1100,
417,
905
]
|
def METHOD_NAME(self):
return {
"list": InviteListSerializer,
"retrieve": InviteListSerializer,
"create": InviteSerializer,
}.get(self.action, InviteListSerializer) | [
19,
1386,
2
]
|
def METHOD_NAME(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[3.0, 4.0, 0.0]], requires_grad=True)
self._internal_influence_test_assert(net, net.relu, inp, [[1.0, 1.0, 1.0, 1.0]]) | [
9,
53,
3680,
2026,
1962
]
|
def METHOD_NAME():
_fake_race_eth_data = [
['ethnicity', 'race'],
['Hispanic/Latino', 'American Indian/Alaska Native'],
['Hispanic/Latino', 'Asian'],
['Hispanic/Latino', 'Multiple/Other'],
['Hispanic/Latino', 'White'],
['Hispanic/Latino', 'Native Hawaiian/Other Pacific Islander'],
['Hispanic/Latino', 'Black'],
['Hispanic/Latino', 'Unknown'],
['Hispanic/Latino', 'Missing'],
['Non-Hispanic/Latino', 'American Indian/Alaska Native'],
['Non-Hispanic/Latino', 'Asian'],
['Non-Hispanic/Latino', 'Multiple/Other'],
['Non-Hispanic/Latino', 'White'],
['Non-Hispanic/Latino', 'Native Hawaiian/Other Pacific Islander'],
['Non-Hispanic/Latino', 'Black'],
['Non-Hispanic/Latino', 'Unknown'],
['Non-Hispanic/Latino', 'Missing'],
['Unknown', 'American Indian/Alaska Native'],
['Unknown', 'Asian'],
['Unknown', 'Multiple/Other'],
['Unknown', 'White'],
['Unknown', 'Native Hawaiian/Other Pacific Islander'],
['Unknown', 'Black'],
['Unknown', 'Unknown'],
['Unknown', 'Missing'],
['Missing', 'American Indian/Alaska Native'],
['Missing', 'Asian'],
['Missing', 'Multiple/Other'],
['Missing', 'White'],
['Missing', 'Native Hawaiian/Other Pacific Islander'],
['Missing', 'Black'],
['Missing', 'Unknown'],
['Missing', 'Missing'],
]
_expected_race_eth_combined_data = [
['race_ethnicity_combined'],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.HISP.value],
[std_col.Race.AIAN_NH.value],
[std_col.Race.ASIAN_NH.value],
[std_col.Race.MULTI_OR_OTHER_STANDARD_NH.value],
[std_col.Race.WHITE_NH.value],
[std_col.Race.NHPI_NH.value],
[std_col.Race.BLACK_NH.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
[std_col.Race.UNKNOWN.value],
]
df = gcs_to_bq_util.values_json_to_df(json.dumps(
_fake_race_eth_data), dtype=str).reset_index(drop=True)
expected_df = gcs_to_bq_util.values_json_to_df(
json.dumps(_expected_race_eth_combined_data), dtype=str).reset_index(drop=True)
df = cdc.combine_race_eth(df)
assert_frame_equal(df, expected_df, check_like=True) | [
9,
2003,
8579,
7801
]
|
async def METHOD_NAME(
self,
selector: type[Widget] | str | None | None = None,
offset: Offset = Offset(), | [
1935
]
|
def METHOD_NAME(file_name, content, repo, image, upstream_version):
"""Method to update for the provided file
along with file set lock to avoid race condition.
Args:
file_name: Name of the file to update
content: things to update in file
"""
with open(file_name, "w") as yaml_file:
try:
# To lock the file to avoid race condition
set_lock(file_name)
# update the file with write operation
yaml_file.write(yaml.dump(content, default_flow_style=False))
finally:
# unlock it so other processed can use it
unset_lock(file_name)
LOG.info(
f"Updated build info: \n repo:{repo} \n image:{image} \n ceph version:{upstream_version} in {file_name}"
) | [
86,
3315
]
|
def METHOD_NAME(self, model_instance, add):
# get fields to populate from and slug field to set
if not isinstance(self._populate_from, (list, tuple)):
self._populate_from = (self._populate_from,)
slug_field = model_instance._meta.get_field(self.attname)
# only set slug if empty and first-time save, or when overwrite=True
if add and not getattr(model_instance, self.attname) or self.overwrite:
# slugify the original field content and set next step to 2
def slug_for_field(field):
return self.slugify_func(getattr(model_instance, field))
slug = self.separator.join(map(slug_for_field, self._populate_from))
next_num = 2
else:
# get slug from the current model instance
slug = getattr(model_instance, self.attname)
# model_instance is being modified, and overwrite is False,
# so instead of doing anything, just return the current slug
return slug
# strip slug depending on max_length attribute of the slug field
# and clean-up
slug_len = slug_field.max_length
if slug_len:
slug = slug[:slug_len]
slug = self._slug_strip(slug)
if self.uppercase:
slug = slug.upper()
original_slug = slug
if self.allow_duplicates:
return slug
# exclude the current model instance from the queryset used in finding
# the next valid slug
queryset = self.get_queryset(model_instance.__class__, slug_field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to impliment any unique_together contraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
kwargs[self.attname] = slug
# increases the number while searching for the next valid slug
# depending on the given slug, clean-up
while not slug or queryset.filter(**kwargs):
slug = original_slug
end = "%s%s" % (self.separator, next_num)
end_len = len(end)
if slug_len and len(slug) + end_len > slug_len:
slug = slug[: slug_len - end_len]
slug = self._slug_strip(slug)
slug = "%s%s" % (slug, end)
kwargs[self.attname] = slug
next_num += 1
return slug | [
129,
1231
]
|
def METHOD_NAME(
sd_range=[2, 58], target_security_levels=[128], name="default_name"
):
"""
The top level function which we use to run the experiment
:param sd_range: a tuple (sd_min, sd_max) giving the values of sd for which to generate parameters
:param target_security_levels: a list of the target number of bits of security, 128 is default
:param name: a name to save the file
"""
if __name__ == "__main__":
D = ND.DiscreteGaussian
vals = range(sd_range[0], sd_range[1])
pool = multiprocessing.Pool(2)
init_params = LWE.Parameters(
n=1024, q=2 ** 64, Xs=D(0.50, -0.50), Xe=D(2 ** 55), m=oo, tag="params"
)
inputs = [
(init_params, (val, val), target_security_levels, name) for val in vals
]
_res = pool.starmap(generate_parameter_matrix, inputs)
return "done" | [
567,
-1,
-1
]
|
def METHOD_NAME(build_path, arguments):
build_cmd = ['cmake', '--build', build_path, '--config',
arguments.build_type, '-j', str(arguments.jobs)]
subprocess.run(build_cmd, check=True) | [
56,
155
]
|
def METHOD_NAME(tolx: "double") -> "void":
r"""
setXTolerance(double tolx)
Sets the termination threshold for the change in x.
"""
return _rootfind.METHOD_NAME(tolx) | [
0,
1104,
3926
]
|
def METHOD_NAME(self):
self._import("iris.analysis._area_weighted") | [
104,
689,
690,
691
]
|
def METHOD_NAME(x: TType) -> TType:
#? Type()
x
return x | [
3499,
4432,
44,
2273,
2147,
1680,
5504
]
|
def METHOD_NAME(fabric, *args: Any, **kwargs: Any):
kwargs["devices"] = nprocs
kwargs["num_nodes"] = num_nodes
if any(acc.is_available() for acc in mps_accelerators):
old_acc_value = kwargs.get("accelerator", "auto")
kwargs["accelerator"] = "cpu"
if old_acc_value != kwargs["accelerator"]:
warnings.warn("Forcing `accelerator=cpu` as MPS does not support distributed training.")
else:
kwargs["accelerator"] = "auto"
strategy = kwargs.get("strategy", None)
if strategy:
if isinstance(strategy, str):
if strategy == "ddp_spawn":
strategy = "ddp"
elif strategy == "ddp_sharded_spawn":
strategy = "ddp_sharded"
elif isinstance(strategy, tuple(strategies)) and strategy._start_method in ("spawn", "fork"):
raise ValueError("DDP Spawned strategies aren't supported yet.")
kwargs["strategy"] = strategy
return {}, args, kwargs | [
709,
667
]
|
def METHOD_NAME(self, speed):
"""
Sets the fan speed
Args:
speed: An integer, the percentage of full fan speed to set fan to,
in the range 0 (off) to 100 (full speed)
Returns:
A boolean, True if speed is set successfully, False if not
"""
print("Setting Fan speed is not allowed")
return False | [
0,
1942
]
|
def METHOD_NAME(self, conn_string):
storage = MySQLEventLogStorage.create_clean_storage(conn_string)
assert storage
try:
yield storage
finally:
storage.dispose() | [
417,
390,
948
]
|
def METHOD_NAME(self):
self.fresh_env = self.all_files = False
self.source_dir = self.build_dir = None
self.builder = 'html'
self.project = ''
self.version = ''
self.release = ''
self.today = ''
self.config_dir = None
self.link_index = False
self.copyright = '' | [
15,
1881
]
|
def METHOD_NAME(self) -> None:
with Loading(self.main_area):
if self.ebands_kpath is None or self.ebands_ibz is None: return
# SKW interpolation
r = self.ebands_ibz.interpolate(lpratio=self.lpratio, filter_params=None)
# Build plotter.
plotter = self.ebands_kpath.get_plotter_with("Ab-initio", "SKW interp", r.ebands_kpath)
mpl_pane = mpl(plotter.combiplot(**self.mpl_kwargs))
col = pn.Column(mpl_pane, sizing_mode="stretch_width")
self.main_area.objects = [col] | [
86,
57,
690
]
|
def METHOD_NAME(self):
if stdin_ready():
self.timer.Stop()
self.evtloop.Exit() | [
250,
2195
]
|
def METHOD_NAME(self, inputs):
test_dir = os.path.dirname(inputs['casmvs_inp_dir'])
scene = os.path.basename(inputs['casmvs_inp_dir'])
logger.info('depth fusion start')
pcd = pcd_depth_filter(
scene, test_dir, inputs['casmvs_res_dir'], thres_view=4)
logger.info('depth fusion end')
return pcd | [
1710
]
|
def METHOD_NAME(self):
data = np.empty((4, 2), dtype=object)
for iy, ix in np.ndindex(data.shape):
data[iy, ix] = np.array([[5, 10], [10, 5]])
self.vs = hs.signals.BaseSignal(data, ragged=True) | [
102,
103
]
|
f METHOD_NAME(self, database, table_name): | [
19,
410
]
|
async def METHOD_NAME() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
ts = [loop.create_task(pr._drain_helper()) for _ in range(5)]
assert not (await asyncio.wait(ts, timeout=0.5))[
0
], "All draining tasks must be pending"
assert pr._drain_waiter is not None
pr.resume_writing()
await asyncio.gather(*ts)
assert pr._drain_waiter is None | [
9,
1498,
7165,
8579,
405
]
|
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
Array of favorite lab resource ids
"""
return pulumi.get(self, "favorite_lab_resource_ids") | [
8065,
4293,
191,
308
]
|
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True) | [
1458
]
|
def METHOD_NAME(request, job_id):
"""
Callback endpoint for jobs to update status.
"""
try:
job = TethysJob.objects.filter(id=job_id)[0]
job.status
json = {"success": True}
except Exception:
json = {"success": False}
return JsonResponse(json) | [
86,
202,
452
]
|
def METHOD_NAME(self, stage):
if stage == 'fit':
nni.report_final_result(self.trainer.callback_metrics['val_accuracy'].item()) | [
1843
]
|
def METHOD_NAME(name, auth=None, **kwargs):
"""
Ensure a network exists and is up-to-date
name
Name of the network
provider
A dict of network provider options.
shared
Set the network as shared.
external
Whether this network is externally accessible.
admin_state_up
Set the network administrative state to up.
vlan
Vlan ID. Alias for provider
- physical_network: provider
- network_type: vlan
- segmentation_id: (vlan id)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
kwargs["name"] = name
network = __salt__["neutronng.network_get"](name=name)
if network is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Network will be created."
return ret
if "vlan" in kwargs:
kwargs["provider"] = {
"physical_network": "provider",
"network_type": "vlan",
"segmentation_id": kwargs["vlan"],
}
del kwargs["vlan"]
if "project" in kwargs:
projectname = kwargs["project"]
project = __salt__["keystoneng.project_get"](name=projectname)
if project:
kwargs["project_id"] = project.id
del kwargs["project"]
else:
ret["result"] = False
ret["comment"] = "Project:{} not found.".format(projectname)
return ret
network = __salt__["neutronng.network_create"](**kwargs)
ret["changes"] = network
ret["comment"] = "Created network"
return ret
changes = __salt__["neutronng.compare_changes"](network, **kwargs)
# there's no method for network update in shade right now;
# can only delete and recreate
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Project will be updated."
return ret
__salt__["neutronng.network_delete"](name=network)
__salt__["neutronng.network_create"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated network"
return ret | [
2541
]
|
def METHOD_NAME(self):
log.debug("---- %s ----" % self._testMethodName) | [
0,
1
]
|
def METHOD_NAME(sender, old_email=None, **kwargs):
# All subs with the new email
subs = Subscriber.objects.filter(email=sender.email, user__isnull=True)
for sub in subs:
# Find existing user subs on that newsletter
exists = Subscriber.objects.filter(
user=sender, newsletter=sub.newsletter
).exists()
if exists:
# Delete email sub in favor of existing user sub
sub.delete()
else:
# Change email sub to user sub
sub.email = None
sub.name = ""
sub.user = sender
sub.save() | [
21,
487,
1180
]
|
def METHOD_NAME(fh, field, data):
r"""Save a YTArray to an open hdf5 file or group.
Save a YTArray to an open hdf5 file or group, and save the
units to a "units" attribute.
Parameters
----------
fh : an open hdf5 file or hdf5 group
The hdf5 file or group to which the data will be written.
field : str
The name of the field to be saved.
data : YTArray
The data array to be saved.
Returns
-------
dataset : hdf5 dataset
The created hdf5 dataset.
"""
dataset = fh.create_dataset(str(field), data=data)
units = ""
if isinstance(data, YTArray):
units = str(data.units)
dataset.attrs["units"] = units
return dataset | [
15333,
877,
6734
]
|
def METHOD_NAME(self):
export_conandata_patches(self) | [
294,
505
]
|
async def METHOD_NAME(self, async_client: "AsyncClient") -> None:
response = await async_client.get("/api/v1/me/workspaces")
assert response.status_code == 401 | [
9,
245,
5826,
1273,
529,
1970
]
|
def METHOD_NAME():
def rally_process(p):
return (
p.name() == "esrally"
or p.name() == "rally"
or (
p.name().lower().startswith("python")
and any("esrally" in e for e in p.cmdline())
and not any("esrallyd" in e for e in p.cmdline())
)
)
kill_all(rally_process) | [
643,
1340,
12526,
2553
]
|
def METHOD_NAME(self, func, expected):
s = StringIO.StringIO()
save_stdout = sys.stdout
sys.stdout = s
dis.dis(func)
sys.stdout = save_stdout
got = s.getvalue()
# Trim trailing blanks (if any).
lines = got.split('\n')
lines = [line.rstrip() for line in lines]
expected = expected.split("\n")
import difflib
if expected != lines:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff(expected,
lines))) | [
74,
16949,
9
]
|
def METHOD_NAME(self) -> Optional[str]:
return pulumi.get(self, "project") | [
155
]
|
def METHOD_NAME(self):
a = A()
dict_trait = a.traits()["adict"]
self.assertEqual(dict_trait.default_kind, "dict") | [
9,
2551,
553,
235,
1253
]
|
def METHOD_NAME():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
ft1 = relay.FuncType([b], c, [a])
assert_vars_match(all_type_vars(ft1), [a, b, c])
ft2 = relay.FuncType([], relay.TupleType([a, b, c]), [])
assert_vars_match(all_type_vars(ft2), [a, b, c])
w = relay.Var("w")
x = relay.Var("x", a)
y = relay.Var("y", b)
z = relay.Var("z", c)
f1 = relay.Function([x], y, b, [a])
assert_vars_match(all_type_vars(f1), [a, b])
f2 = relay.Function([x], relay.Let(y, x, z))
assert_vars_match(all_type_vars(f2), [a, b, c])
f3 = relay.Function([], relay.Tuple([x, y, z]), ret_type=relay.TupleType([a, b, c]))
assert_vars_match(all_type_vars(f3), [a, b, c])
f4 = relay.Function([w], relay.Tuple([]), type_params=[a, b, c])
assert_vars_match(all_type_vars(f4), [a, b, c])
f5 = relay.Function([w], w)
assert len(all_type_vars(f5)) == 0 | [
9,
75,
44,
1659
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.