text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(cluster):
with pytest.raises(AssertionError):
importers.build_gcp_query(cluster, service_id="not-a-service-id") | [
9,
4212,
539,
348,
532,
549
] |
def METHOD_NAME(self, args=None):
return self._parse_args_impl(args, True) | [
214,
3478,
335
] |
def METHOD_NAME(self):
test_loss = AverageMeter()
test_acc = AccuracyMeter()
self.net.eval()
with torch.no_grad():
for data, label in self.test_loader:
data = data.to(self.device)
label = label.to(self.device)
output = self.net(data)
loss = F.cross_entropy(output, label)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, label)
return test_loss.average, test_acc.accuracy | [
1195
] |
def METHOD_NAME(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response | [
276,
532,
558
] |
def METHOD_NAME(self, name) -> Optional[date]:
"""
Add Chinese New Year's Day Four.
https://en.wikipedia.org/wiki/Chinese_New_Year
"""
return self._add_chinese_calendar_holiday(
name, self._chinese_calendar.lunar_new_year_date(self._year), days_delta=+3
) | [
238,
1183,
80,
1895,
1724,
6497
] |
def METHOD_NAME(subcommand_callback):
@functools.wraps(subcommand_callback)
@click.pass_context
def wrapped_subcommand_callback(ctx, *args, **kwargs):
if ctx.find_root().command_path != 'bugwarrior':
old_command = ctx.command_path
new_command = ctx.command_path.replace('-', ' ')
log.warning(
f'Deprecation Warning: `{old_command}` is deprecated and will '
'be removed in a future version of bugwarrior. Please use '
f'`{new_command}` instead.')
return ctx.invoke(subcommand_callback, *args, **kwargs)
return wrapped_subcommand_callback | [
3116,
615,
3527,
3437
] |
def METHOD_NAME(inst, idnum):
inst.add_s(Entry(
('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), {
'objectClass': 'top account posixAccount'.split(' '),
'cn': 'user',
'uid': 'user%s' % idnum,
'homeDirectory': '/home/user%s' % idnum,
'loginShell': '/bin/nologin',
'gidNumber': '-1',
'uidNumber': '-1',
})
)) | [
129,
21
] |
def METHOD_NAME(statement: str, sql_bind_parameters: SqlBindParameters) -> str:
message = f"Running query:\n\n{indent_log_line(statement)}"
if len(sql_bind_parameters.param_dict) > 0:
message += (
f"\n"
f"\n"
f"with parameters:\n"
f"\n"
f"{indent_log_line(pformat_big_objects(sql_bind_parameters.param_dict))}"
)
return message | [
275,
22,
539,
390,
277
] |
def METHOD_NAME() -> SchemaItem:
return SchemaItem(
kw=ExtJobKeys.MAX_RUNNING_MINUTES,
type_map=[SchemaItemType.INT],
required_set=False,
) | [
232,
1340,
3710,
2069
] |
def METHOD_NAME(languages):
if not languages:
return
if not isinstance(languages, list):
languages = [languages]
all_scripts = []
for language in languages:
if language in language2scripts:
METHOD_NAME = language2scripts[language]
elif '_' in language:
l = language.split('_')[0]
METHOD_NAME = language2scripts[l]
if not METHOD_NAME:
return
all_scripts += METHOD_NAME
return list(set(all_scripts)) | [
2942
] |
def METHOD_NAME (self):
fcisolvers = [fci.solver (molsym, symm=True, singlet=False) for i in range (2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B1'
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (msym, 4, 4), fcisolvers, [0.25,]*4)
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5) | [
9,
1403,
1404
] |
def METHOD_NAME(self):
return self._material_resource.get_int_property('F_ALPHA_TEST', 0) | [
1139,
9
] |
def METHOD_NAME(collect_trial_profiles: Callable[[int], None]) -> None:
config = conf.load_config(conf.gan_examples_path("dcgan_tf_keras/const.yaml"))
config = conf.set_max_length(config, {"batches": 200})
config = conf.set_min_validation_period(config, {"batches": 200})
config = conf.set_slots_per_trial(config, 8)
config = conf.set_tf2_image(config)
config = conf.set_profiling_enabled(config)
exp_id = exp.run_basic_test_with_temp_config(
config, conf.gan_examples_path("dcgan_tf_keras"), 1
)
trial_id = exp.experiment_trials(exp_id)[0].trial.id
collect_trial_profiles(trial_id) | [
22,
554,
4098,
14514,
1441
] |
def METHOD_NAME():
utils.pmdastatsd_remove()
utils.setup_dbpmdarc()
command = '(sleep 8;' + composed_command + '; cat) | sudo valgrind --trace-children=yes --leak-check=full --log-file=' + valgrind_out_path + ' dbpmda -e -q 60 -i 2>&1 >>' + dbpmda_out_path;
for config in testconfigs:
utils.print_test_section_separator()
utils.set_config(config)
p = Popen(command, cwd=utils.pmdastatsd_dir, stdout=PIPE, stdin=PIPE, bufsize=1, text=True, close_fds=ON_POSIX, shell=True)
time.sleep(4)
# get pmdastatsdpid
pmdastatsd_pid = utils.get_pmdastatsd_pids_ran_by_dbpmda()[0]
# send payloads
for payload in payloads:
sock.sendto(payload.encode("utf-8"), (ip, port))
# wait to make sure the agent handles the payloads AND dbpmda gets delayed echo statements
time.sleep(8)
# trigger cleanup in agent by sending SIGINT
utils.send_INT_to_pid(pmdastatsd_pid)
# again, wait for cleanup
time.sleep(5)
valgrind_pmdastatsd_output = valgrind_out_path.replace("%p", pmdastatsd_pid)
f = open(valgrind_pmdastatsd_output, "r")
show_next_line = 0
for line in f:
if 'LEAK SUMMARY' in line:
sys.stdout.write(line.replace("=={}==".format(pmdastatsd_pid), ""))
show_next_line = 1
elif show_next_line:
sys.stdout.write(line.replace("=={}==".format(pmdastatsd_pid), ""))
show_next_line = 0
# sometimes agent hangs due to dbpmda exit probably? Doesn't happen when its './Remove'd
p.kill()
# don't clean up valgrind output files ... leave that to the
# QA test so we have a a chance to triage in the event of failure
utils.restore_config() | [
22,
9
] |
def METHOD_NAME(tokens, candidates):
"""Merge in the reverse binary tree."""
best_id = tf.argmin(candidates, output_type=tf.int32)
# Perform the merge at position best_id.
tokens = tf.concat(
[tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]],
axis=0)
# Recompute the merge candidates.
# Only the neighbors of best_id need to be recomputed.
empty = tf.zeros([0], dtype=candidates.dtype)
def _MergeLeft():
return tf.concat(
[candidates[:best_id - 1],
_MergeOneToken(tokens, best_id - 1)],
axis=0)
left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft)
def _MergeRight():
return tf.concat(
[_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0)
right_candidates = tf.cond(
tf.greater_equal(best_id,
tf.size(tokens) - 1), lambda: empty, _MergeRight)
candidates = tf.concat([left_candidates, right_candidates], axis=0)
return tokens, candidates | [
411,
2217
] |
def METHOD_NAME():
test1()
test2()
test3()
test4()
test5() | [
57
] |
def METHOD_NAME(pytester):
pytester.makepyfile(myscript="""
from openeo.internal.warnings import test_warnings
test_warnings()
test_warnings(2)
""")
result = pytester.runpython("myscript.py")
stderr = "\n".join(result.errlines)
assert "This is a UserDeprecationWarning (stacklevel 1)" in stderr
assert "myscript.py:3: UserDeprecationWarning: This is a UserDeprecationWarning (stacklevel 2)" in stderr | [
9,
21,
3527,
3437
] |
def METHOD_NAME() -> None:
def objective(trial: optuna.trial.Trial) -> float:
callback = PyTorchLightningPruningCallback(trial, monitor="accuracy")
trainer = pl.Trainer(
max_epochs=2,
accelerator="cpu",
enable_checkpointing=False,
callbacks=[callback],
)
model = Model()
trainer.fit(model)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
assert study.trials[0].value == 1.0 | [
9,
3299,
10437,
2421,
1076
] |
def METHOD_NAME(key: str) -> Any: ... | [
198
] |
def METHOD_NAME(self):
struct = []
for attr in self.attributes:
struct.append((attr.name, *attr.get_numpy_type()))
return np.dtype(struct) | [
567,
2028,
1249
] |
def METHOD_NAME(self):
"""
Test the value of the atomic mass unit.
"""
amu = 1.660538921e-27
assert round(abs(constants.amu / amu - 1.0), 6) == 0, "{0} != {1}".format(constants.amu, amu) | [
9,
956,
2858,
805
] |
def METHOD_NAME(self, event=None):
SearchDialogBase.METHOD_NAME(self, event)
self.text.tag_remove("hit", "1.0", "end") | [
1462
] |
def METHOD_NAME(self):
if self.options.get_safe("shared") or self.options.header_only:
self.options.rm_safe("fPIC")
if self.options.header_only:
self.options.rm_safe("shared") | [
111
] |
def METHOD_NAME(self, count: int) -> tuple[list[bytes], bool]:
""" Get transactions or blocks from the newest to the oldest
:param count: Number of transactions or blocks to be returned
:return: List of tx hashes and a boolean indicating if has more txs
"""
raise NotImplementedError | [
19,
4017
] |
def METHOD_NAME(
self,
epoch: int,
shuffle: bool = None,
) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:
per_sample_loader = self.per_sample_iter_factory.METHOD_NAME(epoch, shuffle)
if shuffle is None:
shuffle = self.shuffle
state = np.random.RandomState(epoch + self.seed)
# NOTE(kamo):
# This iterator supports multiple chunk lengths and
# keep chunks for each lengths here until collecting specified numbers
cache_chunks_dict = {}
cache_id_list_dict = {}
for ids, batch in per_sample_loader:
# Must be per-sample-loader
assert len(ids) == 1, f"Must be per-sample-loader: {len(ids)}"
assert all(len(x) == 1 for x in batch.values())
# Get keys of sequence data
sequence_keys = []
for key in batch:
if key + "_lengths" in batch:
sequence_keys.append(key)
# Remove lengths data and get the first sample
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
id_ = ids[0]
for key in sequence_keys:
if len(batch[key]) != len(batch[sequence_keys[0]]):
raise RuntimeError(
f"All sequences must has same length: "
f"{len(batch[key])} != {len(batch[sequence_keys[0]])}"
)
L = len(batch[sequence_keys[0]])
# Select chunk length
chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]
if len(chunk_lengths) == 0:
logging.warning(
f"The length of '{id_}' is {L}, but it is shorter than "
f"any candidates of chunk-length: {self.chunk_lengths}"
)
continue
W = int(state.choice(chunk_lengths, 1))
cache_id_list = cache_id_list_dict.setdefault(W, [])
cache_chunks = cache_chunks_dict.setdefault(W, {})
# Shift width to the next chunk
S = int(W * self.chunk_shift_ratio)
# Number of chunks
N = (L - W) // S + 1
if shuffle:
Z = state.randint(0, (L - W) % S + 1)
else:
Z = 0
# Split a sequence into chunks.
# Note that the marginal frames divided by chunk length are discarded
for k, v in batch.items():
if k not in cache_chunks:
cache_chunks[k] = []
if k in sequence_keys:
# Shift chunks with overlapped length for data augmentation
cache_chunks[k] += [v[Z + i * S : Z + i * S + W] for i in range(N)]
else:
# If not sequence, use whole data instead of chunk
cache_chunks[k] += [v for _ in range(N)]
cache_id_list += [id_ for _ in range(N)]
if len(cache_id_list) > self.num_cache_chunks:
cache_id_list, cache_chunks = yield from self._generate_mini_batches(
cache_id_list,
cache_chunks,
shuffle,
state,
)
cache_id_list_dict[W] = cache_id_list
cache_chunks_dict[W] = cache_chunks
else:
for W in cache_id_list_dict:
cache_id_list = cache_id_list_dict.setdefault(W, [])
cache_chunks = cache_chunks_dict.setdefault(W, {})
yield from self._generate_mini_batches(
cache_id_list,
cache_chunks,
shuffle,
state,
) | [
56,
84
] |
def METHOD_NAME(
self: Display | resource.Resource, deviceid: int, property: int, type: int, offset: int, length: int, delete: bool = False
) -> XIGetProperty: ... | [
19,
398,
1042
] |
def METHOD_NAME(self, *args, **kw):
"""Set blob expiration
See `metadata.MetaDB.expire` for more details
"""
self.metadb.METHOD_NAME(*args, **kw) | [
7740
] |
def METHOD_NAME(self):
client = Client()
url = reverse("oidc_provider_token_endpoint")
request = dict(
client_assertion = self.ca_jws,
client_assertion_type = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
grant_type="authorization_code",
code = "code",
code_verifier = "code_verifier"
)
res = client.post(url, request)
self.assertTrue(res.status_code == 400)
self.assertTrue("invalid_request" == res.json().get("error")) | [
9,
466,
841,
654,
340,
147
] |
def METHOD_NAME(self, other: Self) -> bool: ... | [
1782,
47
] |
def METHOD_NAME(cls, x):
"A class method defined in A." | [
385,
11486
] |
def METHOD_NAME(self, subnet_id):
"""Delete data from etcd for a subnet that is no longer wanted."""
LOG.info("Deleting subnet %s", subnet_id)
# Delete the etcd key for this subnet.
key = datamodel_v2.key_for_subnet(subnet_id, self.region_string)
if not self.delete_from_etcd(key):
# Already gone, treat as success.
LOG.debug("Key %s, which we were deleting, disappeared", key) | [
1782,
1108
] |
def METHOD_NAME(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
response = client.get(url)
if project_id in view_membership_permission_map.get(username, []):
assert response.status_code == 200
if username == 'user':
assert sorted([item['id'] for item in response.json()]) == []
else:
values_list = Membership.objects.filter(project_id=project_id) \
.order_by('id').values_list('id', flat=True)
assert sorted([item['id'] for item in response.json()]) == list(values_list)
else:
assert response.status_code == 404 | [
9,
245
] |
def METHOD_NAME(self, thread):
return reverse("misago:api:thread-post-list", kwargs={"thread_pk": thread.pk}) | [
19,
600,
10177,
58,
274
] |
def METHOD_NAME(self):
"""Return the pose's orientation.
Returns
-------
float
The angle of the pose
"""
return self[2] | [
5354
] |
def METHOD_NAME(K):
setattr(K, "deprecated", True)
return K | [
291
] |
def METHOD_NAME(self) -> float:
"""
Return the calculated score of the hit record, now the score is equal to distance.
:return float:
The score of the hit record.
"""
return self._hit.METHOD_NAME | [
747
] |
def METHOD_NAME():
try:
METHOD_NAME = request.json.get('username') or request.json['email']
password = request.json['password']
except KeyError:
raise ApiError("must supply 'username' and 'password'", 401)
if not password:
raise ApiError('password not allowed to be empty', 401)
try:
if '\\' in METHOD_NAME:
domain, username = METHOD_NAME.split('\\')
else:
username, domain = METHOD_NAME.split('@')
except ValueError:
if current_app.config['LDAP_DEFAULT_DOMAIN']:
username = METHOD_NAME
domain = current_app.config['LDAP_DEFAULT_DOMAIN']
else:
raise ApiError('expected username with domain', 401)
# Validate LDAP domain
if (domain not in current_app.config['ALLOWED_EMAIL_DOMAINS']
and domain not in current_app.config['LDAP_DOMAINS']):
raise ApiError('unauthorized domain', 403)
# LDAP certificate settings
if current_app.config['LDAP_CACERT']:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, current_app.config['LDAP_CACERT'])
# Allow LDAP server to use a self-signed certificate
if current_app.config['LDAP_ALLOW_SELF_SIGNED_CERT']:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
# Set LDAP Timeout
if current_app.config['LDAP_TIMEOUT']:
ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, current_app.config['LDAP_TIMEOUT'])
# Set custom config options
for k, v in current_app.config['LDAP_CONFIG'].items():
ldap.set_option(getattr(ldap, k), v)
# Initialise ldap connection
try:
trace_level = 2 if current_app.debug else 0 # XXX - do not set in production environments
ldap_connection = ldap.initialize(current_app.config['LDAP_URL'], trace_level=trace_level)
except Exception as e:
raise ApiError(str(e), 500)
# bind user credentials
ldap_bind_username = current_app.config['LDAP_BIND_USERNAME']
ldap_bind_password = current_app.config['LDAP_BIND_PASSWORD']
if ldap_bind_username:
try:
ldap_connection.simple_bind_s(ldap_bind_username, ldap_bind_password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid ldap bind credentials', 500)
# Set default base DN for user and group search
base_dn = current_app.config['LDAP_BASEDN']
# If user search filter exist
# Search the user using the provided User Search filter for the current domain
# If one user is found
# Set the DN as the one found
# Set email retreived from AD
# If more than one user is found
# Except: Search query is bad defined
# Else
# Set the DN as the one found in LDAP_DOMAINS variable
user_filter = current_app.config['LDAP_USER_FILTER']
user_base_dn = current_app.config['LDAP_USER_BASEDN']
user_attrs = [
current_app.config['LDAP_USER_NAME_ATTR'],
current_app.config['LDAP_USER_EMAIL_ATTR']
]
if user_filter:
result = [r for r in ldap_connection.search_s(
base=user_base_dn or base_dn,
scope=ldap.SCOPE_SUBTREE,
filterstr=user_filter.format(username=username),
attrlist=user_attrs
) if None not in r]
if len(result) > 1:
raise ApiError(f'invalid search query for domain "{domain}"', 500)
elif len(result) == 0:
raise ApiError('invalid username or password', 401)
user_dn = result[0][0]
name = result[0][1][current_app.config['LDAP_USER_NAME_ATTR']][0].decode('utf-8', 'ignore')
email = result[0][1][current_app.config['LDAP_USER_EMAIL_ATTR']][0].decode('utf-8', 'ignore')
email_verified = bool(email)
else:
if '%' in current_app.config['LDAP_DOMAINS'][domain]:
user_dn = current_app.config['LDAP_DOMAINS'][domain] % username
else:
user_dn = current_app.config['LDAP_DOMAINS'][domain].format(username)
name = username
email = f'{username}@{domain}'
email_verified = False
# Authenticate user logging in
try:
ldap_connection.simple_bind_s(user_dn, password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid username or password', 401)
METHOD_NAME = email or username
user = User.find_by_username(username=METHOD_NAME)
if not user:
user = User(name=name, METHOD_NAME=METHOD_NAME, password='', email=email,
roles=current_app.config['USER_ROLES'], text='LDAP user', email_verified=email_verified)
user = user.create()
else:
user.update(METHOD_NAME=METHOD_NAME, email=email, email_verified=email_verified)
if ldap_bind_username:
try:
ldap_connection.simple_bind_s(ldap_bind_username, ldap_bind_password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid ldap bind credentials', 500)
# Assign customers & update last login time
group_filter = current_app.config['LDAP_GROUP_FILTER']
group_base_dn = current_app.config['LDAP_GROUP_BASEDN']
groups = list()
if group_filter:
result = ldap_connection.search_s(
base=group_base_dn or base_dn,
scope=ldap.SCOPE_SUBTREE,
filterstr=group_filter.format(username=username, email=email, userdn=user_dn),
attrlist=[current_app.config['LDAP_GROUP_NAME_ATTR']]
)
for group_dn, group_attrs in result:
if current_app.config['LDAP_GROUP_NAME_ATTR'] in group_attrs.keys():
groups.extend([g.decode('utf-8', 'ignore') for g in group_attrs[current_app.config['LDAP_GROUP_NAME_ATTR']]])
else:
groups.append(group_dn)
# Check user is active
if user.status != 'active':
raise ApiError(f'User {METHOD_NAME} not active', 403)
if not_authorized('ALLOWED_LDAP_GROUPS', groups):
raise ApiError(f'User {METHOD_NAME} is not authorized', 403)
user.update_last_login()
scopes = Permission.lookup(METHOD_NAME=METHOD_NAME, roles=user.roles + groups)
customers = get_customers(METHOD_NAME=METHOD_NAME, groups=groups + ([user.domain] if user.domain else []))
auth_audit_trail.send(current_app._get_current_object(), event='basic-ldap-login', message='user login via LDAP',
user=METHOD_NAME, customers=customers, scopes=scopes, roles=user.roles, groups=groups,
resource_id=user.id, type='user', request=request)
# Generate token
token = create_token(user_id=user.id, name=user.name, METHOD_NAME=user.email, provider='ldap',
customers=customers, scopes=scopes, roles=user.roles, groups=groups,
email=user.email, email_verified=user.email_verified)
return jsonify(token=token.tokenize()) | [
273
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
job_name=job_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(self, t):
return t.size()[1] * t.size()[2] * t.size()[3] | [
768,
1318
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the public IP address.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(self):
res = self.call(self.filename, self.blank_file)[0]
self.failIf(res)
original_id3 = ID3(self.filename)
copied_id3 = ID3(self.blank_file)
self.assertEqual(copied_id3.version, (2, 3, 0))
# XXX: the v2.3 frame contains duplicate TPE1 frames which get merged
# when saving to v2.3 again
frame = copied_id3["TPE1"]
frame.text = frame.text[0].split("/")
self.failUnlessEqual(original_id3, copied_id3)
for key in original_id3:
# Go through every tag in the original file, and check that it's
# present and correct in the copy
self.failUnless(key in copied_id3)
self.failUnlessEqual(copied_id3[key], original_id3[key]) | [
9,
215
] |
def METHOD_NAME(self):
"""
Generates a random color in hex using the format #ABC123
"""
red = hex(random.randint(0,255)).lstrip("0x")
green = hex(random.randint(0,255)).lstrip("0x")
blue = hex(random.randint(0,255)).lstrip("0x")
return "#"+red+green+blue | [
236,
696,
36
] |
def METHOD_NAME(self, **attributes):
attributes.setdefault('name', 'thing')
return type('ThingResourceType', (base.ResourceType,), attributes) | [
93,
191,
44,
9260
] |
def METHOD_NAME(self):
gt_instances_data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
)
kie_data_sample = KIEDataSample()
gt_instances = InstanceData(data=gt_instances_data)
kie_data_sample.gt_instances = gt_instances
assert 'gt_instances' in kie_data_sample
del kie_data_sample.gt_instances
assert 'gt_instances' not in kie_data_sample
kie_data_sample.pred_instances = gt_instances
assert 'pred_instances' in kie_data_sample
del kie_data_sample.pred_instances
assert 'pred_instances' not in kie_data_sample | [
9,
9989
] |
def METHOD_NAME(cert_path, issuer_chain_path):
"""
Verify a certificate using OCSP and CRL
:param cert_path:
:param issuer_chain_path:
:return: True if valid, False otherwise
"""
with open(cert_path, "rt") as c:
try:
cert = parse_certificate(c.read())
except ValueError as e:
current_app.logger.error(e)
return None
# OCSP is our main source of truth, in a lot of cases CRLs
# have been deprecated and are no longer updated
verify_result = None
ocsp_err = 0
crl_err = 0
try:
verify_result = ocsp_verify(cert, cert_path, issuer_chain_path)
except Exception as e:
capture_exception()
current_app.logger.warning(e)
ocsp_err = 1
if verify_result is None:
try:
verify_result = crl_verify(cert, cert_path)
except Exception as e:
capture_exception()
current_app.logger.warning(e)
crl_err = 1
if verify_result is None:
current_app.logger.warning("Failed to verify {}".format(cert.serial_number))
return verify_result, ocsp_err, crl_err | [
1162
] |
async def METHOD_NAME(self):
self.settings = await self.config.all() | [
6578,
557
] |
f METHOD_NAME(self): | [
9,
126,
6412
] |
def METHOD_NAME():
print('usage: %s [--verify]' % sys.argv[0], file=sys.stderr)
sys.exit(2) | [
38,
558,
61,
538
] |
def METHOD_NAME():
try:
args = parse_commandline()
args.cmd(vars(args))
return os.EX_OK
except ExternalTestNotFound as exception:
print(f"Error: {exception}", file=sys.stderr)
return os.EX_NOINPUT
except RuntimeError as exception:
print(f"Error: {exception}", file=sys.stderr)
return 1 | [
57
] |
def METHOD_NAME(N, arr):
arr[:int(N)] -= 1 | [
14086,
5367
] |
def METHOD_NAME(ptype, value):
# ensure types
retval = None
if ptype == "double":
try:
retval = float(value)
except ValueError:
raise RuntimeError("Parameter of type double with invalid value \"%s\""%str(value))
elif ptype == "int":
try:
retval = int(value)
except ValueError:
raise RuntimeError("Parameter of type int with invalid value \"%s\""%str(value))
elif ptype == "bool":
if value is True:
retval = value
elif value is False:
retval = value
elif value == "true":
retval = True
elif value == "True":
retval = True
elif value == "TRUE":
retval = True
elif value == "false":
retval = False
elif value == "False":
retval = False
elif value == "FALSE":
retval = False
else:
raise RuntimeError("Parameter of type bool with invalid value \"%s\""%str(value))
elif ptype == "string":
try:
assert type(value) is str
except AssertionError:
raise RuntimeError("Parameter of type string with invalid value \"%s\""%str(value))
else:
retval = str(value)
return retval | [
1205,
511,
280,
144
] |
def METHOD_NAME(self, p, k, m, zvec, nzvec):
"""
Count the bad-typeII solutions of `Q(x) = m` (mod `p^k`) satisfying
the additional congruence conditions described in
:meth:`QuadraticForm.count_congruence_solutions_as_vector`.
INPUT:
- ``p`` -- prime number > 0
- ``k`` -- an integer > 0
- ``m`` -- an integer (depending only on mod `p^k`)
- ``zvec``, ``nzvec`` -- lists of integers up to dim(`Q`)
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,2,3])
sage: Q.count_congruence_solutions__bad_type_II(3, 1, 0, None, None)
2
"""
return CountAllLocalTypesNaive(self, p, k, m, zvec, nzvec)[5] | [
29,
15451,
4766,
1068,
44,
2490
] |
def METHOD_NAME(session, filter_data=None):
"""Generate query based on filter data
:param filter_data: Filter data
:type filter_data: `dict`
:return:
:rtype:
"""
if filter_data is None:
filter_data = {}
query = session.query(Session)
# it doesn't make sense to search in a boolean column :P
if filter_data.get("active", None):
if isinstance(filter_data.get("active"), list):
filter_data["active"] = filter_data["active"][0]
query = query.filter_by(active=str2bool(filter_data["active"]))
return query.order_by(Session.id) | [
240,
567,
539
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME(self, bitstream):
polarity = 1
for has_edge in bitstream:
if has_edge:
polarity *= -1
yield polarity | [
3902
] |
METHOD_NAME(self, *k, **kw): | [
128
] |
def METHOD_NAME(self, controller_name, model_name, model_uuid):
self.__controller_name = controller_name
self.__model_name = model_name
self.__model_uuid = model_uuid | [
0,
578
] |
def METHOD_NAME(self):
"""
Return ``True`` because ``self`` is not finite.
EXAMPLES::
sage: CartanType(['A', NN]).is_finite()
False
sage: CartanType(['A', ZZ]).is_finite()
False
"""
return False | [
137,
4516
] |
def METHOD_NAME(self):
expected_message = (
f"ABC is an invalid project type option value, the value should be one "
f"of the following {[ptype.value for ptype in ProjectTypes]} "
)
context_map = {"project_type": "ABC"}
context = SamCliContext(
command_options_map=context_map,
sam_command_name="",
is_guided=False,
is_debugging=False,
profile={},
region="",
)
iac_factory = IaCFactory(context)
with self.assertRaises(InvalidProjectTypeException) as ctx:
iac_factory.get_iac()
self.assertEqual(str(ctx.exception), expected_message) | [
9,
532,
155,
44
] |
def METHOD_NAME(yamlFile):
# pretty print scenario name
scenarioName=os.path.splitext(os.path.basename(yamlFile))[0]
print("Testing Scenario " + scenarioName)
yamlFile=os.path.abspath(yamlFile)
# parse what configuration we expect to be optimal
expected=getStringFromFile(yamlFile, '.* [eE]xpect.*({.*})')
# build and execute command for simulation
# append tuningArg list (if nothing is set this is empty)
command=[simulation, "--log-level", logLevel , "--no-end-config" , "--yaml-filename" , yamlFile] + tuningArg
print(" ".join(command))
outputFile=os.path.join(outputDir, scenarioName + '.out')
with open(outputFile, 'w+') as outputLocation:
subprocess.call(command, stdout=outputLocation, shell=False)
# scenario might not include expectation
if expected:
# parse time of expected config
expectedTime=int(getStringFromFile(outputFile, expected + ".* Reduced value: ([0-9]+)"))
else:
expectedTime=-1
# parse time of selected config
selected=getStringFromFile(outputFile, '.* Selected Configuration +({.*})')
selectedTime=int(getStringFromFile(outputFile, selected + ".* Reduced value: ([0-9]+)"))
# print result
if not expected:
print(RED + "Scenario did not contain expected configuration!" + ENDCOLOR)
expected="{ }"
elif expected == selected:
print(GREEN + "Expected Configuration selected!" + ENDCOLOR)
elif expectedTime > selectedTime:
print(YELLOW + "Selected configuration faster than expected! Estimates maybe invalid for this hardware!" + ENDCOLOR)
elif (expectedTime - selectedTime) < (expectedTime / 100):
print(YELLOW + "Selected configuration less than 1% slower than expected one!" + ENDCOLOR)
else:
print(RED + "Inefficient configuration selected!" + ENDCOLOR)
print("Selected: " + selected + " : " + str(selectedTime))
print("Expected: " + expected + " : " + str(expectedTime))
print() | [
9,
7061
] |
def METHOD_NAME(self, project):
select_snapshots(project) | [
9,
1472,
5570
] |
def METHOD_NAME(self, context):
for cats in _node_categories.values():
cats[1](self, context) | [
1100,
1716,
2065,
2470
] |
def METHOD_NAME(self):
self.assertRaises(ValueError, uvindex.UVIndex, -1234567,
self.__test_location,
self.__test_uv_intensity,
self.__test_reception_time) | [
9,
176,
216,
1646,
272,
104,
137
] |
def METHOD_NAME(func: ProfileFunction | None) -> None: ... | [
7054
] |
def METHOD_NAME(self, src_path: Text, dst_path: Text, create: bool = ...) -> None: ... | [
-1
] |
def METHOD_NAME(self, now):
if now > self.last_tick > 0:
elapsed = now - self.last_tick
gameobject = self.gameobject_instance
if gameobject:
if not gameobject.is_spawned and gameobject.initialized:
self._update_respawn(elapsed)
else:
self._update_respawn(elapsed)
self.last_tick = now | [
86
] |
def METHOD_NAME():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
(3, 9, 10),
((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
roi_func="constant",
constant_roi=30.0,
)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE) | [
9,
422,
24,
753,
928,
65
] |
def METHOD_NAME(
a,
axis=None,
dtype=UNSUPPORTED,
out=UNSUPPORTED,
keepdims=False,
*,
where=UNSUPPORTED,
):
return mean(a, axis=axis, keepdims=keepdims) | [
7955,
7956,
2581,
314
] |
def METHOD_NAME(img, tx, ty):
if tx == args.divide * width - 1 or ty == args.divide * width - 1 or tx == 0 or ty == 0:
return img
img[tx, ty] = (img[tx, ty] + img[tx + 1, ty] + img[tx, ty + 1] + img[tx - 1, ty] + img[tx, ty - 1] + img[tx + 1, ty - 1] + img[tx - 1, ty + 1] + img[tx - 1, ty - 1] + img[tx + 1, ty + 1]) / 9
return img | [
3772,
9791
] |
def METHOD_NAME(app):
with app.app_context():
db = get_db()
test_entities = {
"users": [
make_user(1, "user1", "wallet1"),
make_user(2, "user2", "wallet2"),
make_user(3, "user3", "wallet3"),
make_user(4, "user4", "wallet4"),
make_user(5, "user5", "wallet5"),
make_user(
6,
"user6",
"wallet6",
profile_picture="Qm0123456789abcdef0123456789abcdef0123456789ab",
),
make_user(
7,
"user7",
"wallet7",
profile_picture_sizes="Qm0123456789abcdef0123456789abcdef0123456789ab",
),
make_user(
8,
"user8",
"wallet8",
cover_photo="Qm0123456789abcdef0123456789abcdef0123456789ab",
),
make_user(
9,
"user9",
"wallet9",
cover_photo_sizes="Qm0123456789abcdef0123456789abcdef0123456789ab",
),
make_user(
10,
"user10",
"wallet10",
profile_picture="Qm0123456789abcdef0123456789abcdef0123456789ab",
cover_photo="Qm0123456789abcdef0123456789abcdef0123456789cd",
),
],
"follows": [
make_follow(2, 1),
make_follow(3, 1),
make_follow(5, 1),
make_follow(1, 5),
make_follow(2, 6),
make_follow(3, 7),
make_follow(4, 8),
make_follow(5, 9),
make_follow(10, 4),
],
}
populate_mock_db(db, test_entities)
with db.scoped_session() as session:
user_signals = _get_user_signals(session, "user1")
assert user_signals["num_followers"] == 3
assert user_signals["num_following"] == 1
assert user_signals["has_profile_picture"] == False
assert user_signals["has_cover_photo"] == False
assert user_signals["wallet"] == "wallet1"
user_signals = _get_user_signals(session, "user6")
assert user_signals["num_followers"] == 1
assert user_signals["num_following"] == 0
assert user_signals["has_profile_picture"] == True
assert user_signals["has_cover_photo"] == False
assert user_signals["wallet"] == "wallet6"
user_signals = _get_user_signals(session, "user7")
assert user_signals["num_followers"] == 1
assert user_signals["num_following"] == 0
assert user_signals["has_profile_picture"] == True
assert user_signals["has_cover_photo"] == False
assert user_signals["wallet"] == "wallet7"
user_signals = _get_user_signals(session, "user8")
assert user_signals["num_followers"] == 1
assert user_signals["num_following"] == 0
assert user_signals["has_profile_picture"] == False
assert user_signals["has_cover_photo"] == True
assert user_signals["wallet"] == "wallet8"
user_signals = _get_user_signals(session, "user9")
assert user_signals["num_followers"] == 1
assert user_signals["num_following"] == 0
assert user_signals["has_profile_picture"] == False
assert user_signals["has_cover_photo"] == True
assert user_signals["wallet"] == "wallet9"
user_signals = _get_user_signals(session, "user10")
assert user_signals["num_followers"] == 0
assert user_signals["num_following"] == 1
assert user_signals["has_profile_picture"] == True
assert user_signals["has_cover_photo"] == True
assert user_signals["wallet"] == "wallet10" | [
9,
19,
21,
7958
] |
def METHOD_NAME(tools_handler, mock_session):
tool_used_subquery = MagicMock()
agent_count_subquery = MagicMock()
total_usage_subquery = MagicMock()
tool_used_subquery.c.tool_name = 'Tool1'
tool_used_subquery.c.agent_id = 1
agent_count_subquery.c.tool_name = 'Tool1'
agent_count_subquery.c.unique_agents = 1
total_usage_subquery.c.tool_name = 'Tool1'
total_usage_subquery.c.total_usage = 5
tools_handler.get_tool_and_toolkit = MagicMock()
tools_handler.get_tool_and_toolkit.return_value = {'Tool1': 'Toolkit1'}
mock_session.query().filter_by().subquery.return_value = tool_used_subquery
mock_session.query().group_by().subquery.return_value = agent_count_subquery
mock_session.query().group_by().subquery.return_value = total_usage_subquery
result_obj = MagicMock()
result_obj.tool_name = 'Tool1'
result_obj.unique_agents = 1
result_obj.total_usage = 5
mock_session.query().join().all.return_value = [result_obj]
result = tools_handler.calculate_tool_usage()
assert isinstance(result, list)
expected_output = [{'tool_name': 'Tool1', 'unique_agents': 1, 'total_usage': 5, 'toolkit': 'Toolkit1'}]
assert result == expected_output | [
9,
1593,
3081,
558
] |
def METHOD_NAME(self):
structure = self.project.create.structure.ase.bulk("Cu", cubic=True).repeat(5)
self.job.potential = "2001--Mishin-Y--Cu-1--LAMMPS--ipr1"
self.job.structure = structure
self.job.write_structure(structure, "test.dump", ".")
self.assertEqual(os.path.exists("test.dump"), True)
self.assertEqual(self.job._number_of_structures(), 2) | [
9,
77,
1011
] |
def METHOD_NAME():
assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "EMode.EFirstMode"
assert str(m.ClassWithUnscopedEnum.EFirstMode) == "EMode.EFirstMode"
f = m.ClassWithUnscopedEnum.test_function
first = m.ClassWithUnscopedEnum.EFirstMode
second = m.ClassWithUnscopedEnum.ESecondMode
assert f(first) == 1
assert f(first) == f(first)
assert not f(first) != f(first)
assert f(first) != f(second)
assert not f(first) == f(second)
assert f(first) == int(f(first))
assert not f(first) != int(f(first))
assert f(first) != int(f(second))
assert not f(first) == int(f(second))
# noinspection PyDictCreation
x = {f(first): 1, f(second): 2}
x[f(first)] = 3
x[f(second)] = 4
# Hashing test
assert str(x) == "{EMode.EFirstMode: 3, EMode.ESecondMode: 4}" | [
9,
2273,
1719
] |
def METHOD_NAME(self):
for port in self.all_ports:
bl_pin = self.cell_inst[0].get_pin(self.cell.get_bl_name(port))
self.add_layout_pin(text="bl_{0}_{1}".format(port, 0),
layer=bl_pin.layer,
offset=bl_pin.ll().scale(1, 0),
width=bl_pin.width(),
height=self.height)
bl_pin = self.cell_inst[0].get_pin(self.cell.get_br_name(port))
self.add_layout_pin(text="br_{0}_{1}".format(port, 0),
layer=bl_pin.layer,
offset=bl_pin.ll().scale(1, 0),
width=bl_pin.width(),
height=self.height)
for port in self.all_ports:
for row in range(self.total_size):
wl_pin = self.cell_inst[row].get_pin(self.cell.get_wl_name(port))
self.add_layout_pin(text="wl_{0}_{1}".format(port, row),
layer=wl_pin.layer,
offset=wl_pin.ll().scale(0, 1),
width=self.width,
height=wl_pin.height()) | [
238,
571,
3783
] |
def METHOD_NAME(self, lst):
"""
insert self between each element of the list `lst`
Parameter
---------
lst : list
the list to insert self between its elements
Example
-------
>>> a = [
... CMacro("m"),
... CStringExpression(LiteralString("the macro is: ")),
... LiteralString("."),
... ]
>>> b = CStringExpression("?").join(a)
...
... # is the same as:
...
>>> b = CStringExpression(
... CMacro("m"),
... CStringExpression("?"),
... CStringExpression(LiteralString("the macro is: ")),
CStringExpression("?"),
... LiteralString("."),
... )
"""
result = CStringExpression()
if not lst:
return result
result += lst[0]
for elm in lst[1:]:
result += self
result += elm
return result | [
2831
] |
def METHOD_NAME(self):
"""Checks the duration when mixing an echo longer than the capture."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['longer']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(
self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix)) | [
9,
250,
2374,
2205,
41,
5062,
1605
] |
def METHOD_NAME(flow_with_http, test_img_1, test_img_2):
with flow_with_http:
time.sleep(0.5)
r = requests.post(
f'http://localhost:{flow_with_http.port}/index',
json={'data': {'docs': [{'uri': test_img_1}, {'uri': test_img_2}]}},
)
assert r.status_code == 200
resp = r.json()
assert 'data' in resp
assert len(resp['data']) == 2
assert resp['data'][0]['uri'] == test_img_1 | [
9,
14,
724
] |
def METHOD_NAME(argv):
###############################################################################
parser = argparse.ArgumentParser()
setup_standard_logging_options(parser)
parser.add_argument("caseroot", default=os.getcwd(), help="Case directory")
args = parse_args_and_handle_standard_logging_options(argv, parser)
return args.caseroot | [
214,
362
] |
def METHOD_NAME(self):
payload = uuid4().hex
response = "123"
task_service = TestService("testservice")
task_id = self.workflow.add_task(self.dialog_id, task_service, payload, 1)
workflow_record, task = self.workflow.complete_task(task_id, response)
self.assertTrue(isinstance(task, dict))
self.assertTrue(isinstance(workflow_record, dict))
self.assertEqual(task["service"].name, task_service.name)
self.assertEqual(task["dialog"], workflow_record["dialog"].id) | [
9,
676,
758
] |
def METHOD_NAME(model_path):
max_ckpt = 0
for filename in os.listdir(model_path):
if filename.endswith('.pdz'):
files = filename[:-4]
a1, a2, it = files.split("_")
if int(it) > max_ckpt:
max_ckpt = int(it)
return max_ckpt | [
416,
232,
737
] |
def METHOD_NAME(sender, instance, created, **kwargs): # pylint: disable=unused-argument
"""
Add site configuration changes to site configuration history.
Recording history on updates and deletes can be skipped by first setting
the `skip_history_when_saving` attribute on the instace, e.g.:
site_config.skip_history_when_saving = True
site_config.save()
Args:
sender: sender of the signal i.e. SiteConfiguration model
instance: SiteConfiguration instance associated with the current signal
created (bool): True if a new record was created.
**kwargs: extra key word arguments
"""
# Skip writing history when asked by the caller. This skip feature only
# works for non-creates.
if created or not hasattr(instance, "skip_history_when_saving"):
SiteConfigurationHistory.objects.create(
site=instance.site,
site_values=instance.site_values,
enabled=instance.enabled,
) | [
86,
1055,
830,
351
] |
def METHOD_NAME():
assert default_version in supported_versions | [
9,
235,
623,
616,
295
] |
def METHOD_NAME():
assert DeviceState.Idle == playstate(PLAY_STATE_IDLE)
assert DeviceState.Loading == playstate(PLAY_STATE_LOADING)
assert DeviceState.Stopped == playstate(PLAY_STATE_STOPPED)
assert DeviceState.Paused == playstate(PLAY_STATE_PAUSED)
assert DeviceState.Playing == playstate(PLAY_STATE_PLAYING)
assert DeviceState.Seeking == playstate(PLAY_STATE_FORWARD)
assert DeviceState.Seeking == playstate(PLAY_STATE_BACKWARD) | [
9,
2448,
-1
] |
def METHOD_NAME(name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryDataVersionResult:
"""
Azure Resource Manager resource envelope.
Azure REST API version: 2023-04-01.
:param str name: Container name.
:param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Version identifier.
"""
__args__ = dict()
__args__['name'] = name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices:getRegistryDataVersion', __args__, opts=opts, typ=GetRegistryDataVersionResult).value
return AwaitableGetRegistryDataVersionResult(
data_version_base_properties=pulumi.get(__ret__, 'data_version_base_properties'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
510,
365,
281
] |
def METHOD_NAME(self, space):
typ = Type(self)
if typ == 'Object':
return self
elif typ == 'Boolean': # Unsure ... todo check here
return space.Boolean.create((self, ), space)
elif typ == 'Number': # ?
return space.Number.create((self, ), space)
elif typ == 'String': # ?
return space.String.create((self, ), space)
elif typ == 'Null' or typ == 'Undefined':
raise MakeError('TypeError',
'undefined or null can\'t be converted to object')
else:
raise RuntimeError() | [
24,
279
] |
def METHOD_NAME(enqueue_mask, device):
if enqueue_mask is None:
return enqueue_mask
enqueue_mask = c_f.to_device(enqueue_mask, device=device)
return torch.cat([enqueue_mask, all_gather(enqueue_mask)], dim=0) | [
1432,
419,
361
] |
def METHOD_NAME(model):
res = model.metadata.result_info[2]
assert res.name == "acceleration"
assert res.n_components == 3
assert res.dimensionality == "vector"
assert res.homogeneity == "acceleration"
assert res.unit == "m/s^2"
assert res.name == "acceleration"
assert res.qualifiers == [] | [
9,
19,
1571,
15017,
280,
724
] |
def METHOD_NAME(is_file, artifact, artifact_type, ext, tmp_path, cm_fn_tuple):
if is_file:
artifact_representation = tmp_path / f"test.{ext}"
artifact(artifact_representation)
else:
artifact_representation = artifact
inferred_from_path, inferred_type, inferred_ext = _infer_artifact_type_and_ext(
f"{ext}_{artifact_type.__name__}_artifact", artifact_representation, cm_fn_tuple
)
assert not is_file ^ inferred_from_path
assert inferred_type is artifact_type
assert inferred_ext == f".{ext}" | [
9,
1852,
1831,
44,
61,
1661
] |
def METHOD_NAME(self, device, dtypes, shape_strides):
a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0])
b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1])
a = a_buffer.as_strided(*shape_strides[0])
b = b_buffer.as_strided(*shape_strides[1])
expected = ref_fn(a, b)
result = jitted_fn(a, b)
self.assertEqual(expected, result) | [
9,
75,
1249,
9007
] |
def METHOD_NAME(message):
return result((message, ), (), 1) | [
883
] |
def METHOD_NAME(self):
return self.block << 40 | self.txnum << 16 | self.outnum | [
24,
962
] |
def METHOD_NAME(self):
db_path = os.path.join(TESTFN, 'test.msi')
with self.assertRaises(msilib.MSIError) as cm:
msilib.OpenDatabase(db_path, msilib.MSIDBOPEN_CREATE)
self.assertEqual(str(cm.exception), 'create failed') | [
9,
463,
129,
1423
] |
def METHOD_NAME(self, rectangle):
assert isinstance(rectangle, Rectangle)
self._rectangle = rectangle # Should make a copy?? | [
0,
5928
] |
def METHOD_NAME(self):
previous_time = datetime.now()
# An infinite loop
while not self.exit_signal.is_set():
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from the previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.ideal_cycle.add(ms / self.iteration_counter)
except:
self.ideal_cycle.add(0)
# Reset the counter
self.iteration_counter = 0 | [
599,
3831
] |
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
region: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegionDiskIamPolicyResult]:
"""
Retrieves the current IAM policy data for regiondisk
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.compute.get_region_disk_iam_policy(project=google_compute_region_disk["regiondisk"]["project"],
region=google_compute_region_disk["regiondisk"]["region"],
name=google_compute_region_disk["regiondisk"]["name"])
```
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param str region: A reference to the region where the disk resides. Used to find the parent resource to bind the IAM policy to. If not specified,
the value will be parsed from the identifier of the parent resource. If no region is provided in the parent identifier and no
region is specified, it is taken from the provider configuration.
"""
... | [
19,
1216,
113,
1694,
54,
146
] |
def METHOD_NAME():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run() | [
57
] |
def METHOD_NAME(self):
self._session.close() | [
1602
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(self):
# These enviroment variables will be propagated to all the
# processes that nose.run creates.
if self.no_data:
print("Running tests in no-data mode...")
import iris.config
iris.config.TEST_DATA_DIR = None
if self.create_missing:
os.environ["IRIS_TEST_CREATE_MISSING"] = "true"
tests = []
if self.unit_tests:
tests.append("unit")
if self.default_tests:
tests.append("default")
if self.integration_tests:
tests.append("integration")
if not tests:
tests.append("default")
print("Running test suite(s): {}".format(", ".join(tests)))
if self.stop:
print("Stopping tests after the first error or failure")
if self.num_processors is None:
# Choose a magic number that works reasonably well for the default
# number of processes.
self.num_processors = (multiprocessing.cpu_count() + 1) // 4 + 1
else:
self.num_processors = int(self.num_processors) | [
977,
1881
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.