text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(config: OpenMetadataConnection) -> AuthenticationProvider:
warn_auth_deprecation(config.authProvider)
return AzureAuthenticationProvider.create(config) | [
1507,
2433,
176
] |
def METHOD_NAME(self, request, user, data):
return data | [
1356
] |
def METHOD_NAME(self) -> str:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME(self, ctx:STIXPatternParser.OrderableLiteralContext):
return self.visitChildren(ctx) | [
716,
15914,
1479
] |
def METHOD_NAME(test, params, env):
"""
Executes dd with defined parameters and checks the return number and output
Test steps:
1). wait guest boot up
2). run dd command in guest with special params(eg. oflag, bs and so on)
3). check command exit stauts and output
"""
def _get_file(filename, select, test=test):
""" Picks the actual file based on select value """
if filename == "NULL":
return "/dev/null"
elif filename == "ZERO":
return "/dev/zero"
elif filename == "RANDOM":
return "/dev/random"
elif filename == "URANDOM":
return "/dev/urandom"
elif filename in params.objects("images"):
drive_id = params["blk_extra_params_%s" % filename].split("=")[1]
drive_path = utils_misc.get_linux_drive_path(session, drive_id)
if drive_path:
return drive_path
test.error("Failed to get '%s' drive path" % filename)
else:
# get all matching filenames
try:
disks = sorted(session.cmd("ls -1d %s" % filename).split('\n'))
except aexpect.ShellCmdError: # No matching file (creating new?)
disks = [filename]
if disks[-1] == '':
disks = disks[:-1]
try:
return disks[select]
except IndexError:
err = ("Incorrect cfg: dd_select out of the range (disks=%s,"
" select=%s)" % (disks, select))
test.log.error(err)
test.error(err)
def _check_disk_partitions_number():
""" Check the data disk partitions number. """
del partitions[:]
partitions.extend(re.findall(
r'%s\d+' % dev_id, ' '.join(utils_disk.get_linux_disks(session, True))))
return len(partitions) == bs_count
vm = env.get_vm(params['main_vm'])
timeout = int(params.get("login_timeout", 360))
error_context.context("Wait guest boot up", test.log.info)
session = vm.wait_for_login(timeout=timeout)
dd_keys = ['dd_if', 'dd_of', 'dd_bs', 'dd_count', 'dd_iflag',
'dd_oflag', 'dd_skip', 'dd_seek']
dd_params = {key: params.get(key, None) for key in dd_keys}
if dd_params['dd_bs'] is None:
dd_params['dd_bs'] = '512'
dd_params['dd_bs'] = dd_params['dd_bs'].split()
bs_count = len(dd_params['dd_bs'])
dd_timeout = int(params.get("dd_timeout", 180))
dd_output = params.get("dd_output", "")
dd_stat = int(params.get("dd_stat", 0))
dev_partitioned = []
for arg in ['dd_if', 'dd_of']:
filename = dd_params[arg]
path = _get_file(filename,
int(params.get('%s_select' % arg, '-1')))
if (bs_count > 1
and filename in params.objects('images')):
psize = float(
utils_numeric.normalize_data_size(
params.get("partition_size", '2G')
)
)
start = 0.0
dev_id = os.path.split(path)[-1]
dev_partitioned.append(dev_id)
utils_disk.create_partition_table_linux(session, dev_id, 'gpt')
for i in range(bs_count):
utils_disk.create_partition_linux(session, dev_id,
'%fM' % psize,
'%fM' % start)
start += psize
partitions = []
if not utils_misc.wait_for(_check_disk_partitions_number, 30, step=3.0):
test.error('Failed to get %d partitions on %s.' % (bs_count, dev_id))
partitions.sort()
dd_params[arg] = [path.replace(dev_id, part)
for part in partitions]
else:
dd_params[arg] = [path]
if bs_count > 1 and not dev_partitioned:
test.error('with multiple bs, either dd_if or \
dd_of must be a block device')
dd_cmd = ['dd']
for key in dd_keys:
value = dd_params[key]
if value is None:
continue
arg = key.split('_')[-1]
if key in ['dd_if', 'dd_of', 'dd_bs']:
part = '%s=%s' % (arg, '{}')
else:
part = '%s=%s' % (arg, value)
dd_cmd.append(part)
dd_cmd = ' '.join(dd_cmd)
remaining = [dd_params[key] for key in ['dd_if', 'dd_of', 'dd_bs']]
if len(dd_params['dd_if']) != bs_count:
fillvalue = dd_params['dd_if'][-1]
else:
fillvalue = dd_params['dd_of'][-1]
cmd = [dd_cmd.format(*t) for t in
zip_longest(*remaining, fillvalue=fillvalue)]
cmd = ' & '.join(cmd)
test.log.info("Using '%s' cmd", cmd)
try:
error_context.context("Execute dd in guest", test.log.info)
try:
(stat, out) = session.cmd_status_output(cmd, timeout=dd_timeout)
except aexpect.ShellTimeoutError:
err = ("dd command timed-out (cmd='%s', timeout=%d)"
% (cmd, dd_timeout))
test.fail(err)
except aexpect.ShellCmdError as details:
stat = details.status
out = details.output
error_context.context("Check command exit status and output",
test.log.info)
test.log.debug("Returned dd_status: %s\nReturned output:\n%s",
stat, out)
if stat != dd_stat:
err = ("Return code doesn't match (expected=%s, actual=%s)\n"
"Output:\n%s" % (dd_stat, stat, out))
test.fail(err)
if dd_output not in out:
err = ("Output doesn't match:\nExpected:\n%s\nActual:\n%s"
% (dd_output, out))
test.fail(err)
test.log.info("dd test succeeded.")
finally:
for dev_id in dev_partitioned:
utils_disk.clean_partition_linux(session, dev_id)
session.close() | [
22
] |
async def METHOD_NAME(cc_targets: AllCCTargets) -> CCFilesMapping:
stripped_file_per_target = await MultiGet(
Get(StrippedFileName, StrippedFileNameRequest(tgt[CCSourceField].file_path))
for tgt in cc_targets
)
stripped_files_to_addresses: dict[str, Address] = {}
stripped_files_with_multiple_owners: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt, stripped_file in zip(cc_targets, stripped_file_per_target):
if stripped_file.value in stripped_files_to_addresses:
stripped_files_with_multiple_owners[stripped_file.value].update(
{stripped_files_to_addresses[stripped_file.value], tgt.address}
)
else:
stripped_files_to_addresses[stripped_file.value] = tgt.address
# Remove files with ambiguous owners.
for ambiguous_stripped_f in stripped_files_with_multiple_owners:
stripped_files_to_addresses.pop(ambiguous_stripped_f)
mapping_not_stripped = {tgt[CCSourceField].file_path: tgt.address for tgt in cc_targets}
return CCFilesMapping(
mapping=FrozenDict(sorted(stripped_files_to_addresses.items())),
ambiguous_files=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(stripped_files_with_multiple_owners.items())
),
mapping_not_stripped=FrozenDict(mapping_not_stripped),
) | [
422,
1298,
1537
] |
def METHOD_NAME(self):
"""
_tearDownCouch_
call this in tearDown to erase all evidence of your couch misdemeanours
"""
for database in self.databases:
self.couch.drop(database) | [
531,
481,
4400
] |
def METHOD_NAME(
self, mock_set_form_data, mock_update_job
):
app._got_first_request = False
async_query_manager.init_app(app)
user = security_manager.find_user("gamma")
form_data = {}
job_metadata = {
"channel_id": str(uuid4()),
"job_id": str(uuid4()),
"user_id": user.id,
"status": "pending",
"errors": [],
}
with pytest.raises(SupersetException):
load_explore_json_into_cache(job_metadata, form_data)
mock_set_form_data.assert_called_once_with(form_data)
errors = ["The dataset associated with this chart no longer exists"]
mock_update_job.assert_called_once_with(job_metadata, "error", errors=errors) | [
9,
557,
1176,
763,
409,
596,
168
] |
def METHOD_NAME(self):
FecDataWriter.METHOD_NAME(self)
self._set_min_delay(1) | [
248
] |
def METHOD_NAME(cli, ckan_config):
"""Config file not ignored when displaying usage.
"""
result = cli.invoke(ckan, [u'-c', ckan_config[u'__file__'], u'-h'])
assert not result.exit_code | [
9,
668,
200,
41,
40
] |
def METHOD_NAME(cls, key, catalog):
assert key
assert catalog
return catalog.key + '/' + key | [
56,
157
] |
def METHOD_NAME(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal | [
5107,
6265
] |
def METHOD_NAME():
return {
salt_monitor: {
"__salt__": {
"test.ping": mock_test_ping,
"test.version": mock_test_version,
"cmd.run": mock_test,
"test.false": mock_test_false,
}
}
} | [
111,
467,
468
] |
def METHOD_NAME(x, bins):
"""Pearson's Chi-squared test."""
x = np.ravel(x)
n = len(x)
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
expected = n / float(bins)
return np.sum(np.square(histogram - expected) / expected) | [
10356,
9702
] |
def METHOD_NAME(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, mismatched tests. Expected id.suite_name.test0 but got None",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test1"
),
],
) | [
9,
6227,
4030,
1571
] |
def METHOD_NAME(port, http_request):
request = http_request("POST", "http://localhost:{}/basic/string".format(port))
with RequestsTransport() as sender:
response = sender.send(request)
auto_headers = response.internal_response.request.headers
assert "Content-Type" not in auto_headers | [
9,
311,
803,
2131
] |
def METHOD_NAME(X: Series) -> bool:
type_family = get_type_family_raw(X.dtype)
if type_family != "object":
return False
if len(X) > 5000:
# Sample to speed-up type inference
X = X.sample(n=5000, random_state=0)
X_unique = X.unique()
num_unique = len(X_unique)
num_rows = len(X)
unique_ratio = num_unique / num_rows
if unique_ratio <= 0.01:
return False
try:
avg_words = Series(X_unique).str.split().str.len().mean()
except AttributeError:
return False
if avg_words < 3:
return False
return True | [
250,
217,
7138,
964
] |
def METHOD_NAME(self, ins: t.Any = None) -> t.Callable:
func = self.func
if ins is not None:
func = functools.partial(func, ins)
return func | [
1179,
717
] |
def METHOD_NAME():
net = pn.case30()
pp.runpp(net)
assert net.converged
_ppc_element_test(net, 30, 41, 6, True) | [
9,
-1
] |
def METHOD_NAME(self):
""" See :meth:`AdaptiveCovarianceMC._generate_proposal()`. """
proposed = np.random.multivariate_normal(
self._current, np.exp(self._log_lambda) *
self._sigma[self._proposal_count])
return proposed | [
567,
4229
] |
def METHOD_NAME(rebulk: Rebulk,
config: dict,
options: dict = None):
"""
Load patterns defined in given config.
:param rebulk: Rebulk builder to use.
:param config: dict containing pattern definition.
:param options: Additional pattern options to use.
:type options: Dict[Dict[str, str]] A dict where key is the pattern type (regex, string, functional) and value is
the default kwargs options to pass.
:return:
"""
if options is None:
options = {}
for value, raw_entries in config.items():
entries = raw_entries if isinstance(raw_entries, list) else [raw_entries]
for entry in entries:
if isinstance(entry, dict) and "callable" in entry.keys():
_process_callable_entry(entry.pop("callable"), rebulk, entry)
continue
entry_decl = _build_entry_decl(entry, options, value)
for pattern_type in _pattern_types:
patterns = entry_decl.get(pattern_type)
if not patterns:
continue
if not isinstance(patterns, list):
patterns = [patterns]
patterns_entry_decl = dict(entry_decl)
for pattern_type_to_remove in _pattern_types:
patterns_entry_decl.pop(pattern_type_to_remove, None)
current_pattern_options = dict(options)
current_pattern_options[None] = patterns_entry_decl
load_patterns(rebulk, pattern_type, patterns, current_pattern_options) | [
557,
200,
5230
] |
def METHOD_NAME(self, command):
"""send command via serial"""
self.serial_needed()
self.log.debug("Send serial command: "+command)
serial_str=command+"\n"
self._serial.write(bytes(serial_str, 'ascii')); | [
-1
] |
def METHOD_NAME(self, pyramid_request):
return AdminOrganizationViews(pyramid_request) | [
4632
] |
def METHOD_NAME(cls, filename: Union[os.PathLike, str]):
"""Load the default configuration."""
new = cls()
mapping = _read_config_file(filename)
# Add defaults that are not available in esmvalcore/config-user.yml
mapping['check_level'] = CheckLevels.DEFAULT
mapping['config_file'] = filename
mapping['diagnostics'] = None
mapping['extra_facets_dir'] = tuple()
mapping['max_datasets'] = None
mapping['max_years'] = None
mapping['resume_from'] = []
mapping['run_diagnostic'] = True
mapping['skip_nonexistent'] = False
new.update(mapping)
return new | [
557,
235,
200
] |
def METHOD_NAME(self):
self.zoom_x_limits = self.ax.get_xlim()
self.zoom_y_limits = self.ax.get_ylim() | [
2560,
2093
] |
f METHOD_NAME(self, grad, var): | [
231,
3829
] |
async def METHOD_NAME(self) -> None:
for container_name in list(self._log_processor_tasks.keys()):
await self.stop_log_fetching(container_name) | [
631,
7715
] |
def METHOD_NAME(self, es_results):
results = []
time_change = datetime.timedelta(days=92)
for x in es_results.aggs.to_dict().get("time_period", {}).get("buckets", []):
date = datetime.datetime.strptime(x.get("key_as_string"), "%Y-%m-%d")
date = date + time_change
if self.group == "month":
time_period = {"fiscal_year": f"{date.year}", self.group: f"{date.month}"}
elif self.group == "quarter":
time_period = {"fiscal_year": f"{date.year}", self.group: f"{int(date.month/3) + (date.month % 3>0)}"}
else:
time_period = {"fiscal_year": f"{date.year}"}
results.append(
{
"new_award_count_in_period": x.get("award_count", {}).get("value", 0),
"time_period": time_period,
}
)
results = self.complete_missing_periods(results)
return results | [
275,
51
] |
def METHOD_NAME(rise_times, test=False):
"""
duplicate the plot from Figure 2.7 of Kris Vorren's thesis.
need to fit the e_ftp peak to the HPGe peakshape function (same as in
calibration.py) and plot the resulting FWHM^2 vs. the ramp time.
"""
out_dir = "~/Data/cage"
opt_file = f"{out_dir}/cage_ds3_optimize.h5"
print("input file:", opt_file)
# match keys to settings; should maybe do this in prev function as attrs.
with pd.HDFStore(opt_file, 'r') as store:
keys = [key[1:] for key in store.keys()] # remove leading '/'
settings = {keys[i] : rise_times[i] for i in range(len(keys))}
# loop over the keys and fit each e_ftp spectrum to the peakshape function
fwhms = {}
for key, rt in settings.items():
t2df = pd.read_hdf(opt_file, key=key)
# histogram spectrum near the uncalibrated peak -- have to be careful here
xlo, xhi, xpb = 2550, 2660, 1
hE, xE, vE = ph.get_hist(t2df["e_ftp"], range=(xlo, xhi), dx=xpb, trim=False)
# set initial guesses for the peakshape function. most are pretty rough
mu = xE[np.argmax(hE)]
sigma = 5
hstep = 0.001
htail = 0.5
tau = 10
bg0 = np.mean(hE[:20])
amp = np.sum(hE)
x0 = [mu, sigma, hstep, htail, tau, bg0, amp]
xF, xF_cov = pf.fit_hist(pf.radford_peak, hE, xE, var=vE, guess=x0)
fwhms[key] = xF[1] * 2.355
if test:
plt.cla()
# peakshape function
plt.plot(xE, pf.radford_peak(xE, *x0), c='orange', label='guess')
plt.plot(xE, pf.radford_peak(xE, *xF), c='r', label='peakshape')
plt.axvline(mu, c='g')
# plot individual components
# tail_hi, gaus, bg, step, tail_lo = pf.radford_peak(xE, *xF, components=True)
# gaus = np.array(gaus)
# step = np.array(step)
# tail_lo = np.array(tail_lo)
# plt.plot(xE, gaus * tail_hi, ls="--", lw=2, c='g', label="gaus+hi_tail")
# plt.plot(xE, step + bg, ls='--', lw=2, c='m', label='step + bg')
# plt.plot(xE, tail_lo, ls='--', lw=2, c='k', label='tail_lo')
plt.plot(xE[1:], hE, ls='steps', lw=1, c='b', label="data")
plt.plot(np.nan, np.nan, c='w', label=f"fwhm = {results['fwhm']:.2f} uncal.")
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.legend(loc=2)
plt.show() | [
5107,
5770
] |
def METHOD_NAME(self, step: int, epoch: int, train_metrics: Dict) -> None:
if len(jax.devices()) > 1:
train_metrics = jax_utils.unreplicate(train_metrics)
metrics = {"train/loss": train_metrics["loss"], "epoch": epoch}
wandb.log(metrics, step=step + 1) | [
390,
2277
] |
def METHOD_NAME(
self, angle_list: List[float], q_controls: Sequence[QubitSpecifier], q_target: QubitSpecifier
):
r"""Attach a uniformly controlled (also called multiplexed) Ry rotation gate to a circuit.
The decomposition is base on https://arxiv.org/pdf/quant-ph/0406176.pdf by Shende et al.
Args:
angle_list (List[float]): list of (real) rotation angles :math:`[a_0,...,a_{2^k-1}]`
q_controls (Sequence[QubitSpecifier]): list of k control qubits
(or empty list if no controls). The control qubits are ordered according to their
significance in increasing order: For example if ``q_controls=[q[0],q[1]]``
(with ``q = QuantumRegister(2)``), the rotation ``Ry(a_0)`` is performed if ``q[0]``
and ``q[1]`` are in the state zero, the rotation ``Ry(a_1)`` is performed if ``q[0]``
is in the state one and ``q[1]`` is in the state zero, and so on
q_target (QubitSpecifier): target qubit, where we act on with
the single-qubit rotation gates
Returns:
QuantumCircuit: the uniformly controlled rotation gate is attached to the circuit.
Raises:
QiskitError: if the list number of control qubits does not correspond to the provided
number of single-qubit unitaries; if an input is of the wrong type
"""
if isinstance(q_controls, QuantumRegister):
q_controls = q_controls[:]
if isinstance(q_target, QuantumRegister):
q_target = q_target[:]
if len(q_target) == 1:
q_target = q_target[0]
else:
raise QiskitError(
"The target qubit is a QuantumRegister containing more than one qubit."
)
# Check if q_controls has type "list"
if not isinstance(angle_list, list):
raise QiskitError("The angles must be provided as a list.")
num_contr = math.log2(len(angle_list))
if num_contr < 0 or not num_contr.is_integer():
raise QiskitError(
"The number of controlled rotation gates is not a non-negative power of 2."
)
# Check if number of control qubits does correspond to the number of rotations
if num_contr != len(q_controls):
raise QiskitError(
"Number of controlled rotations does not correspond to the number of control-qubits."
)
return self.append(UCRYGate(angle_list), [q_target] + q_controls, []) | [
-1
] |
def METHOD_NAME(self, request, format=None):
# List invitations sent by user.
username = request.user.username
invitations = []
for e in Invitation.objects.get_by_inviter(username):
invitations.append(e.to_dict())
return Response(invitations) | [
19
] |
def METHOD_NAME(self, title, message, sound=None, userKey=None, apiKey=None, force=False):
"""
Sends a pushover notification based on the provided info or SC config
title: The title of the notification to send
message: The message string to send
sound: The notification sound to use
userKey: The userKey to send the notification to
apiKey: The apiKey to use to send the notification
force: Enforce sending, for instance for testing
"""
if not settings.USE_PUSHOVER and not force:
logger.debug("Notification for Pushover not enabled, skipping this notification")
return False
logger.debug("Sending notification for " + message)
return self._send_pushover(message, title, sound=sound, userKey=userKey, apiKey=apiKey) | [
959,
7004
] |
async def METHOD_NAME(self, app, prefix_message_text):
handler = self.make_default_handler(
self.callback_regex2, filters=filters.Regex("one") & filters.Regex("two")
)
await self._test_context_args_or_regex(app, handler, prefix_message_text) | [
9,
198,
107,
211
] |
def METHOD_NAME(self):
energy_list = []
with open(os.path.join(output_dir, "{}_energy.dat".format(header))) as f:
lines = f.readlines()
for line in lines:
words = line.split()
if len(words) != 0 and words[0] == "Energy":
energy_list.append(float(words[1]))
self.energy_list = energy_list
self.ene_min = energy_list[0]
self.ene_max = energy_list[len(energy_list)-1]
for T in T_list:
eta_ene = np.exp(-(self.ene_max-self.ene_min)/T)
print("T = {}: exp[-beta(ene_max-ene_mix)] = {}".format(T, eta_ene))
if eta_ene > eta:
print("Warning: At T = {}, eta_ene is larger than eta.".format(T))
return energy_list | [
19,
1811
] |
def METHOD_NAME(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:dbformariadb/v20180601privatepreview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type')) | [
19,
547,
841,
550
] |
def METHOD_NAME(self):
return ["{}.envs".format(x) for x in self.backends] | [
219
] |
def METHOD_NAME(self, name: str, *args: Any) -> None:
self._bus.METHOD_NAME(None, self._path, self._interface_name, name,
self._prepare_arguments(self._signals[name], args)) | [
2648,
900
] |
async def METHOD_NAME(api: OT3API, sig_name: str) -> None:
assert sig_name in SIGNAL_TEST_NAMES
if sig_name == "nsync":
stop = MoveStopCondition.sync_line
else:
stop = MoveStopCondition.none
_move_group_nsync = _build_move_group(MOVING_DISTANCE, MOVING_SPEED, stop)
runner = MoveGroupRunner(move_groups=[[_move_group_nsync]])
if api.is_simulator:
# test that the required functionality exists
assert runner.run
else:
backend: OT3Controller = api._backend # type: ignore[assignment]
messenger = backend._messenger
if sig_name == "nsync":
engage = api._backend.release_sync # type: ignore[union-attr]
release = api._backend.engage_sync # type: ignore[union-attr]
elif sig_name == "estop":
engage = api._backend.engage_estop # type: ignore[union-attr]
release = api._backend.release_estop # type: ignore[union-attr]
async def _sleep_then_activate_stop_signal() -> None:
if "external" in sig_name:
print("waiting for EXTERNAL E-Stop button")
return
pause_seconds = MOVE_SECONDS / 2
print(
f"pausing {round(pause_seconds, 1)} second before activating {sig_name}"
)
await asyncio.sleep(pause_seconds)
try:
print(f"activating {sig_name}")
await engage()
print(f"pausing 1 second before deactivating {sig_name}")
await asyncio.sleep(1)
finally:
print(f"deactivating {sig_name}")
await release()
await asyncio.sleep(0.5)
async def _do_the_moving() -> None:
print(f"moving {MOVING_DISTANCE} at speed {MOVING_SPEED}")
try:
await runner.run(can_messenger=messenger)
except MotionFailedError:
print("caught MotionFailedError from estop")
await asyncio.sleep(0.25) # what is this doing?
move_coro = _do_the_moving()
stop_coro = _sleep_then_activate_stop_signal()
await asyncio.gather(stop_coro, move_coro) | [
132,
61,
1771,
41,
900
] |
def METHOD_NAME(self, X, y, queue=None):
return super()._fit(X, y, _backend.linear_model.regression, queue) | [
90
] |
def METHOD_NAME(fn):
'''Indicate that a function is final and cannot be overridden.'''
fn._rfm_final = True
return fn | [
2316
] |
def METHOD_NAME(help: dict, scope: str) -> tuple[frozenset[str], frozenset[str]]:
"""scope represents the goal or subsystem of interest Returns a tuple containing the scoped
options, followed by unscoped."""
scoped_help_info = help["scope_to_help_info"][scope]
scoped_options = []
unscoped_options = []
for option in scoped_help_info["basic"] + scoped_help_info["advanced"]:
scoped_options.extend(option["scoped_cmd_line_args"])
unscoped_options.extend(option["unscoped_cmd_line_args"])
return (
frozenset(scoped_options),
frozenset(unscoped_options),
) | [
214,
40,
43,
1881
] |
def METHOD_NAME(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Infer the attacked feature.
:param x: Input to attack. Includes all features except the attacked feature.
:param y: Not used.
:param values: Possible values for attacked feature.
:type values: list
:param priors: Prior distributions of attacked feature values. Same size array as `values`.
:type priors: list
:return: The inferred feature values.
:rtype: `np.ndarray`
"""
priors: Optional[list] = kwargs.get("priors")
values: Optional[list] = kwargs.get("values")
# Checks:
if self.estimator.input_shape[0] != x.shape[1] + 1: # pragma: no cover
raise ValueError("Number of features in x + 1 does not match input_shape of classifier")
if priors is None or values is None: # pragma: no cover
raise ValueError("`priors` and `values` are required as inputs.")
if len(priors) != len(values): # pragma: no cover
raise ValueError("Number of priors does not match number of values")
if self.attack_feature >= x.shape[1]: # pragma: no cover
raise ValueError("attack_feature must be a valid index to a feature in x")
n_samples = x.shape[0]
# Calculate phi for each possible value of the attacked feature
# phi is the total number of samples in all tree leaves corresponding to this value
phi = self._calculate_phi(x, values, n_samples)
# Will contain the probability of each value
prob_values = []
for i, value in enumerate(values):
# prepare data with the given value in the attacked feature
v_full = np.full((n_samples, 1), value).astype(x.dtype)
x_value = np.concatenate((x[:, : self.attack_feature], v_full), axis=1)
x_value = np.concatenate((x_value, x[:, self.attack_feature :]), axis=1)
# find the relative probability of this value for all samples being attacked
prob_value = [
(
(self.estimator.get_samples_at_node(self.estimator.get_decision_path([row])[-1]) / n_samples)
* priors[i]
/ phi[i]
)
for row in x_value
]
prob_values.append(prob_value)
# Choose the value with highest probability for each sample
return np.array([values[np.argmax(list(prob))] for prob in zip(*prob_values)]) | [
1852
] |
def METHOD_NAME():
key = "SYSTEM\\CurrentControlSet\\Services\\NTDS\\Parameters"
vname = "LdapEnforceChannelBinding"
setting = "Always"
win_reg.delete_value(hive="HKLM", key=key, vname=vname)
try:
win_lgpo.set_computer_policy(name=vname, setting=setting)
result = win_reg.read_value(hive="HKLM", key=key, vname=vname)
assert result["vdata"] == 2
finally:
win_reg.delete_value(hive="HKLM", key=key, vname=vname) | [
9,
5817,
307,
4320,
2659
] |
def METHOD_NAME(self):
"""Check with path from str and database directory as Path"""
path = "does/not/exist"
location_name = "test_location_A"
mapset_name = "test_mapset_1"
full_path = Path(path) / location_name / mapset_name
mapset_path = MapsetPath(
path=str(full_path),
directory=Path(path),
location=location_name,
mapset=mapset_name,
)
# Paths are currently stored as is (not resolved).
self.assertEqual(mapset_path.directory, path)
self.assertEqual(mapset_path.location, location_name)
self.assertEqual(mapset_path.mapset, mapset_name)
self.assertEqual(mapset_path.path, Path(path) / location_name / mapset_name) | [
9,
3934,
280,
3
] |
def METHOD_NAME(file, data, type):
if type not in TypeNames.keys():
raise ValueError(
f"Trying to write number with unknown dataformat.\n"
f"Input: {type}\n"
f"Supported types: {list(TypeNames.keys())}"
)
np.array(data).astype(TypeNames[type]).tofile(file) | [
77,
5455
] |
def METHOD_NAME(self):
try:
self.ws.close()
except websocket.WebSocketConnectionClosedException:
pass | [
602,
4703
] |
def METHOD_NAME(client: Client):
res = btc.verify_message(
client,
"Bitcoin",
"1KzXE97kV7DrpxCViCN3HbGbiKhzzPM7TQ",
bytes.fromhex(
"1cc694f0f23901dfe3603789142f36a3fc582d0d5c0ec7215cf2ccd641e4e37228504f3d4dc3eea28bbdbf5da27c49d4635c097004d9f228750ccd836a8e1460c0"
),
"\u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy",
)
assert res is True | [
9,
1162,
7364
] |
def METHOD_NAME(self) -> bytes:
return (
self.ground_level.METHOD_NAME(2, "little", signed=True)
+ self.dungeon_id.METHOD_NAME(1, "little", signed=False)
+ self.unk2.METHOD_NAME(1, "little", signed=False)
+ self.unk3.METHOD_NAME(4, "little", signed=False)
) | [
24,
321
] |
def METHOD_NAME(opts):
"""
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE.
If it changes while the proxy is running the salt-master will get
really confused and may stop talking to this minion
"""
r = salt.utils.http.query(
opts["proxy"]["url"] + "id", decode_type="json", decode=True
)
return r["dict"]["id"].encode("ascii", "ignore") | [
147
] |
def METHOD_NAME(process, network, timestamp=True):
DPT = DESCRIPTOR_PIPELINE_TEMPLATE
return DPT.format(process=process, network=network) + (
"connect from images.timestamp to descriptors.timestamp\n"
if timestamp else ''
) | [
56,
2701,
1148
] |
def METHOD_NAME(self) -> typing.Dict[str, ROIExtractionProfile]:
warnings.warn("segmentation_profiles is deprecated, use roi_profiles", DeprecationWarning, stacklevel=2)
return self.roi_profiles | [
2738,
1348
] |
def METHOD_NAME(self):
# Invalid JAnalyzer, make sure we get an exception.
with self.assertRaises(TypeError):
Analyzer('str') | [
9,
532,
2224,
291
] |
def METHOD_NAME(data):
"""deserialize_data"""
return pyarrow.deserialize(data, context=context) | [
2696,
365
] |
def METHOD_NAME(self):
self.conf.fill(0) | [
656
] |
def METHOD_NAME():
circuits = [circ, 3 * circ]
true_values = [-2.0, -2.0]
num_qubits = len(qreg)
p0 = 0.2
p1 = 0.1
inverse_confusion_matrix = generate_inverse_confusion_matrix(
num_qubits, p0=p0, p1=p1
)
@rem_decorator(inverse_confusion_matrix=inverse_confusion_matrix)
def noisy_readout_batched(circuits) -> List[MeasurementResult]:
return [noisy_readout_executor(c, p0=p0, p1=p1) for c in circuits]
noisy_executor = partial(noisy_readout_executor, p0=p0, p1=p1)
base_values = [
raw_execute(c, noisy_executor, observable) for c in circuits
]
rem_values = Executor(noisy_readout_batched).evaluate(circuits, observable)
for true_val, base, rem_val in zip(true_values, base_values, rem_values):
assert abs(true_val - rem_val) < abs(true_val - base)
assert np.isclose(true_val, rem_val, atol=0.05) | [
9,
10771,
972,
3781
] |
def METHOD_NAME():
register_env("my_custom_multi_env_v1", make_custom_multi_env_func)
global_model_factory().register_encoder_factory(make_custom_encoder) | [
372,
343,
811
] |
def METHOD_NAME(self, nvt_pre_eq_job):
name = 'npt'
job = AMSNPTJob.restart_from(
nvt_pre_eq_job,
name=name,
use_prerun=True,
settings=self.settings,
nsteps=self.nsteps[name],
**self.kwargs,
)
#@add_to_instance(job)
#def prerun(self): # noqa F811
#self.get_velocities_from(nvt_pre_eq_job, update_molecule=True)
self.children[name] = job
return self.children[name] | [
129,
14160,
202
] |
f METHOD_NAME(node): | [
7810,
2189
] |
def METHOD_NAME(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"name": datasets.Value("string"),
"page_no": datasets.Value("int32"),
"text": datasets.features.Sequence(datasets.Value("string")),
"bbox": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))),
"segment_bbox": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))),
"segment_id": datasets.features.Sequence(datasets.Value("int32")),
"image": datasets.Value("string"),
"width": datasets.Value("int32"),
"height": datasets.Value("int32"),
"md5sum": datasets.Value("string"),
"qas": datasets.features.Sequence(
{
"question_id": datasets.Value("int32"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
"answer_end": datasets.Value("int32"),
}
),
}
),
}
),
supervised_keys=None,
homepage="https://adamharley.com/rvl-cdip/",
license=_LICENSE,
citation=_CITATION,
) | [
100
] |
def METHOD_NAME(self) -> None:
self.proto.lineReceived(b"echo test_`echo test`_test_`echo test`_test")
self.assertEqual(self.tr.value(), b"test_test_test_test_test\n" + PROMPT) | [
9,
1605,
462,
-1
] |
def METHOD_NAME(self, size):
a = c_char * size | [
9,
1953,
877
] |
def METHOD_NAME(self, layer, event):
pos = self._get_mouse_coordinates(event)
dims_displayed = self._dims_displayed
if event.button == 1: # left mouse click
orig_pos = pos.copy()
# recenter the point in the center of the image pixel
pos[dims_displayed] = np.floor(pos[dims_displayed]) + 0.5
if not self.overlay.points:
self._first_point_pos = np.array(event.pos)
prev_point = (
self.overlay.points[-2] if self._num_points > 1 else None
)
# Add a new point only if it differs from the previous one
if prev_point is None or np.linalg.norm(pos - prev_point) > 0:
self.overlay.points = self.overlay.points[:-1] + [
pos.tolist(),
# add some epsilon to avoid points duplication,
# the latest point is used only for visualization of the cursor
(orig_pos + 1e-3).tolist(),
]
elif event.button == 2 and self._num_points > 0: # right mouse click
if self._num_points < 3:
self.overlay.points = []
else:
self.overlay.points = self.overlay.points[:-2] + [pos.tolist()] | [
69,
2571,
2971
] |
def METHOD_NAME(self):
self.assertEqual("Sweets", self.harvester_class.category()) | [
9,
253
] |
def METHOD_NAME(stations_dict: dict, param_dict: dict) -> pl.DataFrame:
data = [
pl.DataFrame(
schema={
Columns.DATE.value: pl.Datetime(time_zone="UTC"),
Columns.PARAMETER.value: pl.Utf8,
Columns.VALUE.value: pl.Float64,
Columns.DISTANCE.value: pl.Float64,
Columns.STATION_ID.value: pl.Utf8,
}
)
]
for parameter, param_data in param_dict.items():
param_df = pl.DataFrame({Columns.DATE.value: param_data.values.get_column(Columns.DATE.value)})
results = []
for row in param_data.values.select(pl.all().exclude("date")).iter_rows(named=True):
results.append(apply_summary(row, stations_dict, parameter))
results = pl.DataFrame(
results,
schema={
Columns.PARAMETER.value: pl.Utf8,
Columns.VALUE.value: pl.Float64,
Columns.DISTANCE.value: pl.Float64,
Columns.STATION_ID.value: pl.Utf8,
},
)
param_df = pl.concat([param_df, results], how="horizontal")
data.append(param_df)
df = pl.concat(data)
df = df.with_columns(pl.col(Columns.VALUE.value).round(2), pl.col(Columns.DISTANCE.value).round(2))
return df.sort(
by=[
Columns.PARAMETER.value,
Columns.DATE.value,
]
) | [
1593,
2718
] |
def METHOD_NAME(self, message: bytes) -> Signature:
"""
Signs the given message and returns a signature.
:param message: The message to sign
:return: Signature
"""
return Signer(self._privkey).METHOD_NAME(message) | [
2452
] |
def METHOD_NAME(self, *args):
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_action = "callout.commcarehq.org.dummycallout.LAUNCH"
module.case_details.short.lookup_extras = [
{'key': 'action_0', 'value': 'com.biometrac.core.SCAN'},
{'key': "action_1", 'value': "com.biometrac.core.IDENTIFY"},
]
module.case_details.short.lookup_responses = [
{"key": "match_id_0"},
{"key": "match_id_1"},
]
expected = """
<partial>
<lookup action="callout.commcarehq.org.dummycallout.LAUNCH">
<extra key="action_0" value="com.biometrac.core.SCAN"/>
<extra key="action_1" value="com.biometrac.core.IDENTIFY"/>
<response key="match_id_0"/>
<response key="match_id_1"/>
</lookup>
</partial>
"""
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
) | [
9,
331,
245,
1906,
2284,
6735,
61
] |
def METHOD_NAME(self):
return NodeStates | [
1716,
4085
] |
def METHOD_NAME(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalMAP,
metric_functional=retrieval_average_precision,
) | [
9,
1582,
2265
] |
def METHOD_NAME(self):
dask_array = da.random.random((4, 4, 10, 15), chunks=(1, 1, 10, 15))
s = LazyCorrelation2D(dask_array)
s.compute()
assert s.__class__ == Correlation2D
assert dask_array.shape == s.data.shape | [
9,
6089,
365,
226
] |
def METHOD_NAME(state):
start_batch = state.batch
# Horovod: adjust number of steps based on number of GPUs.
for batch_idx, (images, labels) in enumerate(dataset.skip(state.batch).take(100 // hvd.size())):
state.batch = start_batch + batch_idx
loss_value = training_step(images, labels)
if state.batch % 10 == 0 and hvd.rank() == 0:
print('Step #%d\tLoss: %.6f' % (state.batch, loss_value))
# Horovod: commit state at the end of each batch
state.commit() | [
849
] |
def METHOD_NAME(opt):
"""
Selection function for available networks.
Args:
opt: argparse.Namespace, contains all training-specific training parameters.
Returns:
Network of choice
"""
if opt.arch == "resnet50":
network = ResNet50(opt)
else:
raise Exception("Network {} not available!".format(opt.arch))
if opt.resume:
weights = torch.load(os.path.join(opt.save_path, opt.resume))
weights_state_dict = weights["state_dict"]
if torch.cuda.device_count() > 1:
encoder_state_dict = OrderedDict()
for k, v in weights_state_dict.items():
k = k.replace("module.", "")
encoder_state_dict[k] = v
network.load_state_dict(encoder_state_dict)
else:
network.load_state_dict(weights_state_dict)
# print("=================== network =======================")
# for parameter in network.parameters():
# parameter.requires_grad = False
# for parameter in network.layer_blocks[-1].parameters():
# parameter.requires_grad = True
return network | [
-1
] |
def METHOD_NAME(string):
_reader.putString(string) | [
408,
526
] |
def METHOD_NAME(test_file):
"""
Test setting the owner of a file with test=True
"""
expected = {
"comment": "",
"changes": {"owner": "Backup Operators"},
"name": str(test_file),
"result": None,
}
with patch.dict(win_dacl.__opts__, {"test": True}):
result = win_file.check_perms(
path=str(test_file), owner="Backup Operators", inheritance=None
)
assert result == expected | [
9,
250,
5363,
0,
2013,
9,
2019
] |
def METHOD_NAME(key, obj):
value = obj.value.value_()
return key - value | [
2014
] |
def METHOD_NAME(self, node, results):
one = results["while"]
one.replace(Name(u"True", prefix=one.prefix)) | [
1053,
795
] |
def METHOD_NAME():
"""
Get already existing authors in the file system
"""
global contest_to_authors
default_value = {
"normal": {},
"gym": {}
}
contest_to_authors = default_value
try:
file_obj = open(get_metadata_filename(), "r")
except IOError:
return
contest_to_authors = json.loads(file_obj.read())
file_obj.close() | [
19,
2471,
3654
] |
def METHOD_NAME(iterable):
for x in iterable:
x.a = x.a + x.b | [
148,
84,
2124,
9677
] |
def METHOD_NAME():
del os.environ["DJANGO_SUPERUSER_PASSWORD"] | [
656,
485
] |
def METHOD_NAME():
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".jpg")
os.close(fh)
commands = [
'set theFile to (open for access POSIX file "'
+ filepath
+ '" with write permission)',
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile",
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = Image.open(filepath)
im.load()
os.unlink(filepath)
return im
elif sys.platform == "win32":
fmt, data = Image.core.grabclipboard_win32()
if fmt == "file": # CF_HDROP
import struct
o = struct.unpack_from("I", data)[0]
if data[16] != 0:
files = data[o:].decode("utf-16le").split("\0")
else:
files = data[o:].decode("mbcs").split("\0")
return files[: files.index("")]
if isinstance(data, bytes):
import io
data = io.BytesIO(data)
if fmt == "png":
from . import PngImagePlugin
return PngImagePlugin.PngImageFile(data)
elif fmt == "DIB":
from . import BmpImagePlugin
return BmpImagePlugin.DibImageFile(data)
return None
else:
if shutil.which("wl-paste"):
args = ["wl-paste"]
elif shutil.which("xclip"):
args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"]
else:
msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux"
raise NotImplementedError(msg)
fh, filepath = tempfile.mkstemp()
subprocess.call(args, stdout=fh)
os.close(fh)
im = Image.open(filepath)
im.load()
os.unlink(filepath)
return im | [
11867
] |
def METHOD_NAME(self, id_: str) -> "StepBuilder":
super().METHOD_NAME(id_)
return self | [
0,
147
] |
METHOD_NAME(self, ticket, count=1, observed=False): | [
238,
374
] |
def METHOD_NAME(self):
return self._port_to_i2cbus_mapping | [
237,
24,
13347,
445
] |
def METHOD_NAME(self):
# Simulates a multi-instance test
component = "foo"
orig_contents1 = "bar = 42"
orig_contents2 = "bar = 17"
contents_to_append = "baz = 101"
# Setup
filename1 = self.write_user_nl_file(component, orig_contents1, suffix="_0001")
filename2 = self.write_user_nl_file(component, orig_contents2, suffix="_0002")
# Exercise
user_nl_utils.append_to_user_nl_files(
caseroot=self._caseroot, component=component, contents=contents_to_append
)
# Verify
expected_contents1 = orig_contents1 + "\n" + contents_to_append + "\n"
expected_contents2 = orig_contents2 + "\n" + contents_to_append + "\n"
self.assertFileContentsEqual(
expected_contents1, os.path.join(self._caseroot, filename1)
)
self.assertFileContentsEqual(
expected_contents2, os.path.join(self._caseroot, filename2)
) | [
9,
1459,
107,
1537
] |
def METHOD_NAME(self, *args):
"""Show popup dialog above the mouse cursor position."""
pos = QCursor().pos() # mouse position
szhint = self.sizeHint()
pos -= QPoint(szhint.width() // 2, szhint.height() + 14)
self.move(pos)
self.show() | [
697,
4927,
2571
] |
def METHOD_NAME(self):
# Attempting to clear an executing frame is forbidden.
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear() | [
9,
537,
4445
] |
def METHOD_NAME(self, data):
"""
The shape of data is [batch_size, mem_size], and the content is the id of each word
"""
q = np.ndarray([self.batch_size, self.edim], dtype=np.float32)
q.fill(self.init_hid)
q = paddle.to_tensor(q)
time = np.ndarray([self.batch_size, self.mem_size], dtype=np.int64)
for i in range(self.mem_size):
time[:, i] = i
time = paddle.to_tensor(time)
for hop in range(self.nhop):
A_in_c = self.A(data) # [batch_size, mem_size, edim]
A_in_t = self.T_A(time) # [batch_size, mem_size, edim]
A_in = paddle.add(A_in_c, A_in_t) # [batch_size, mem_size, edim]
q_in = q.reshape([-1, 1, self.edim]) # [batch, 1, edim]
A_out3d = paddle.matmul(q_in, A_in, transpose_y=True) # [batch, 1, mem_size]
A_out2d = A_out3d.reshape([-1, self.mem_size])
p = nn.functional.softmax(A_out2d) # [batch, mem_size]
C_in_c = self.C(data)
C_in_t = self.T_C(time)
C_in = paddle.add(C_in_c, C_in_t) # [batch_size, mem_size, edim]
p_3d = p.reshape([-1, 1, self.mem_size]) # [batch, 1, mem_size]
C_out3d = paddle.matmul(p_3d, C_in) # [batch, 1, edim]
C_out2d = C_out3d.reshape([-1, self.edim]) # [batch, edim]
# Linear mapping and addition
q_mapped = self.H(q)
q_out = paddle.add(C_out2d, q_mapped)
if self.lindim == self.edim:
q = q_out
elif self.lindim == 0:
q = nn.functional.relu(q_out)
else:
F = q_out[:, : self.lindim]
G = q_out[:, self.lindim :]
K = nn.functional.relu(G)
q = paddle.concat([F, K], axis=-1)
predict = self.W(q)
return predict | [
76
] |
def METHOD_NAME(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return self.form_id | [
157
] |
def METHOD_NAME():
assert background_colour_string("#123456") == "\x1b[48:2::18:52:86m \x1b[49m"
assert background_colour_string("#123") == "\x1b[48:2::1:2:3m \x1b[49m" | [
9,
2272,
7604,
144
] |
def METHOD_NAME(tmpdir):
"""``verify_task_schema`` raises if the task doesn't match the schema."""
path = os.path.join(tmpdir, "schema.json")
with open(path, "w") as fh:
fh.write(json.dumps(FAKE_SCHEMA))
config = {"foo": {"bar": path}}
client.verify_task_schema(config, {"list-of-strings": ["a"]}, "foo.bar")
with pytest.raises(TaskVerificationError):
client.verify_task_schema(config, {"list-of-strings": ["a", "a"]}, "foo.bar")
with pytest.raises(TaskVerificationError):
client.verify_task_schema(config, {"list-of-strings": ["a", "a"]}, "nonexistent_path") | [
9,
1162,
758,
135
] |
def METHOD_NAME(self, info_buckets: List[dict]) -> List[dict]:
temp_results = {}
child_results = []
for bucket in info_buckets:
child = self._build_child_json_result(bucket)
child_results.append(child)
for child in child_results:
result = self._build_json_result(child)
child.pop("parent_data")
if result["id"] in temp_results.keys():
temp_results[result["id"]] = {
"id": int(result["id"]),
"code": result["code"],
"description": result["description"],
"award_count": temp_results[result["id"]]["award_count"] + result["award_count"],
# the count of distinct awards contributing to the totals
"obligation": temp_results[result["id"]]["obligation"] + result["obligation"],
"outlay": temp_results[result["id"]]["outlay"] + result["outlay"],
"face_value_of_loan": bucket["count_awards_by_dim"]["sum_loan_value"]["value"],
"children": temp_results[result["id"]]["children"] + result["children"],
}
else:
temp_results[result["id"]] = result
results = [x for x in temp_results.values()]
return results | [
56,
5811,
1571
] |
def METHOD_NAME(path: str,
epoch: int,
model: torch.nn.Module,
optimizer: Optional[ColossalaiOptimizer] = None,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
torch_load_kwargs: Optional[Dict] = None,
load_state_dict_kwargs: Optional[Dict] = None):
"""load_checkpoint
load a model, whose parameters are `ColoTensor`s.
Args:
path (str): directory to save the checkpoint files.
epoch (int): the number of epoch
model (torch.nn.Module): a torch module initialized by ColoInitContext
optimizer (ColossalaiOptimizer, optional): optimizers. Defaults to None.
lr_scheduler (torch.optim.lr_scheduler._LRScheduler, optional): lr schedule. Defaults to None.
torch_load_kwargs: (dict, optional): The kwargs of torch.load inside the function
load_state_dict_kwargs (dict, optional): The kwargs of load_state_dict inside the function
"""
# initialize the default parameters
if not torch_load_kwargs:
torch_load_kwargs = dict()
if not load_state_dict_kwargs:
load_state_dict_kwargs = dict()
rank = dist.get_rank()
mapping = dict()
for n, p in model.named_parameters():
if isinstance(p, ColoTensor):
mapping[n] = p.dist_spec
gather_tensor(p)
if rank == 0:
load_state = torch.load(path + '/epoch_{}_model.pth'.format(epoch), **torch_load_kwargs)
model.load_state_dict(load_state['model'], **load_state_dict_kwargs)
dist.barrier()
# scatter loaded parameters
for n, p in model.named_parameters():
if isinstance(p, ColoTensor):
scatter_tensor(p, mapping[n])
if rank == 0:
assert hasattr(p, 'save_ready')
delattr(p, 'save_ready')
del mapping
if optimizer is not None:
mapping = dict()
for k, v in optimizer.state_dict()['state'].items():
for n, t in v.items():
if isinstance(t, ColoTensor):
mapping[(k, n)] = t.dist_spec
gather_tensor(t)
if rank == 0:
colo_checkpoint = torch.load(path + '/epoch_{}_optim.pth'.format(epoch), **torch_load_kwargs)
optimizer.load_state_dict(colo_checkpoint['optim'], **load_state_dict_kwargs)
dist.barrier()
for k, v in optimizer.state_dict()['state'].items():
for n, t in v.items():
if isinstance(t, ColoTensor):
scatter_tensor(t, mapping[(k, n)])
del mapping | [
557,
1830
] |
f METHOD_NAME(O, event): | [
69,
86
] |
def METHOD_NAME(msg):
print("\nINFO: {}".format(msg)) | [
100
] |
def METHOD_NAME(caught_warnings, category: Type[Warning], message: str) -> bool:
return any(issubclass(w.category, category) and str(w.message) == message for w in caught_warnings) | [
1992,
3437
] |
def METHOD_NAME(tmpdir):
# GIVEN
output = Path(tmpdir) / "test.bin"
allocator = MemoryAllocator()
# WHEN
with Tracker(output):
with Pool(3) as p:
p.map(multiproc_func, [1, 10, 100, 1000, 2000, 3000, 4000, 5000])
allocator.valloc(1234)
allocator.free()
relevant_records = list(
filter_relevant_allocations(FileReader(output).get_allocation_records())
)
assert len(relevant_records) == 2
vallocs = [
record
for record in relevant_records
if record.allocator == AllocatorType.VALLOC
]
assert len(vallocs) == 1
(valloc,) = vallocs
assert valloc.size == 1234
frees = [
record for record in relevant_records if record.allocator == AllocatorType.FREE
]
assert len(frees) == 1
# No files created by child processes
child_files = Path(tmpdir).glob("test.bin.*")
assert list(child_files) == [] | [
9,
8784,
41,
8785
] |
def METHOD_NAME(self) -> None:
self.layout.paint()
ui.refresh() | [
5932
] |
def METHOD_NAME(self, obj):
return format_html('<a href="{0}" target="_blank">{0}</a>', obj.path) | [
4643,
157
] |
def METHOD_NAME(alias: str) -> int:
"""
We perform the calculation in a different way to verify correctness
"""
offset_dict = dict(zip(["A", "B", "C", "D", "R"], range(0, 32, 6)))
return offset_dict[alias[0]] + int(alias[1:]) | [
1407,
307,
6166
] |
def METHOD_NAME():
bf = field.Boolean()
assert not bf.deserialize("false")
assert not bf.deserialize(False)
assert not bf.deserialize("")
assert not bf.deserialize(0)
assert bf.deserialize(True)
assert bf.deserialize("true")
assert bf.deserialize(1) | [
9,
201,
13057
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.