text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(
ctx: click.Context,
log_level: str,
log_filepath: Optional[Path],
config_filepath: Optional[Path],
):
"""
A gateway between Liquidsoap and the API.
"""
setup_logger(log_level, log_filepath, rotate=False)
ctx.obj = App(config=Config(config_filepath)) | [
615
] |
def METHOD_NAME(self, **args: str) -> None: ... | [
0,
4610
] |
def METHOD_NAME(self):
if len(self.context_X) == 0 or len(self.question_X) == 0 or len(self.Y) == 0:
train_size = 0
test_size = 0
for train_dataset in self.train_file_name:
label = train_dataset.split(".")[0]
train_size += self.process_data_file(
os.path.join(self.data_path, train_dataset), label, True
)
for test_dataset in self.test_file_name:
label = test_dataset.split("-")[0]
test_size += self.process_data_file(
os.path.join(self.data_path, test_dataset), label, False
)
self.attributes["train_index_list"] = [i for i in range(train_size)]
self.attributes["test_index_list"] = [
i for i in range(train_size, train_size + test_size)
]
self.attributes["index_list"] = (
self.attributes["train_index_list"] + self.attributes["test_index_list"]
)
assert len(self.attributes["index_list"]) == len(
self.attributes["label_index_list"]
)
print(len(self.attributes["train_index_list"]))
print(len(self.attributes["test_index_list"])) | [
557,
365
] |
def METHOD_NAME(self):
self.assertEqual(0b1010, 10)
self.assertEqual(str(0b1010), '10')
self.assertEqual(-0b1010, -10)
self.assertEqual(str(-0b1010), '-10')
self.assertEqual(0, -0) | [
9,
808,
1479
] |
def METHOD_NAME(cobj, p):
"""Build Lennard Jones parameters and add callback for viscosity collision integral"""
ChapmanEnskogLennardJones.build_lennard_jones_parameters(cobj)
if not hasattr(cobj, "viscosity_collision_integral_callback"):
cobj.viscosity_collision_integral_callback = (
collision_integral_neufeld_callback
) | [
56,
386
] |
def METHOD_NAME(self) -> str:
"""
The provisioning state of the private dns zone group resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME(test_class):
np.random.seed(129873)
ref_class = BruteFlatNeighborSearch
n_particle = 1000
positions = np.random.rand(n_particle*3).reshape(3, n_particle)
ref_instance = ref_class(inter_dist_vert=0.3, inter_dist_horiz=0.3)
test_instance = test_class(inter_dist_vert=0.3, inter_dist_horiz=0.3)
ref_instance.rebuild(positions)
test_instance.rebuild(positions)
for particle_idx in np.random.choice(positions.shape[1], 100, replace=False):
ref_result, _ = ref_instance.find_neighbors_by_idx(particle_idx)
compare_results_by_idx(test_instance, particle_idx, ref_result) | [
9,
2301,
1874
] |
def METHOD_NAME(
api_client, data_fixture
):
data_fixture.create_password_provider()
response = api_client.get(reverse("api:settings:get"))
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json["show_admin_signup_page"] is True
# create the admin user
response = api_client.post(
reverse("api:user:index"),
{
"name": "admin",
"email": "[email protected]",
"password": "admin1234",
"language": "en",
"authenticate": True,
},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
token = response_json["access_token"]
response = api_client.get(
reverse("api:settings:get"),
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["show_admin_signup_page"] is False | [
9,
208,
865,
2870,
21,
137,
1168
] |
def METHOD_NAME(self):
return [
self.InputItem(
name=_("HTTP 请求方法"),
key="bk_http_request_method",
type="string",
schema=StringItemSchema(description=_("HTTP 请求方法")),
),
self.InputItem(
name=_("HTTP 请求目标地址"),
key="bk_http_request_url",
type="string",
schema=StringItemSchema(description=_("HTTP 请求目标地址")),
),
self.InputItem(
name=_("HTTP 请求 body"),
key="bk_http_request_body",
type="string",
schema=StringItemSchema(description=_("HTTP 请求 body")),
),
] | [
1461,
275
] |
def METHOD_NAME(in_iter: dict, search_keys: Any) -> Any:
out = in_iter
for search_key in search_keys:
out = out[search_key]
return out | [
1070,
43,
612,
219
] |
def METHOD_NAME(self, func):
"""Applies a filter on all ignored items, and remove all matches where func(first,second)
doesn't return True.
"""
filtered = IgnoreList()
for first, second in self:
if func(first, second):
filtered.ignore(first, second)
self._ignored = filtered._ignored
self._count = filtered._count | [
527
] |
def METHOD_NAME():
assert cirq.dot(2) == 2
assert cirq.dot(2.5, 2.5) == 6.25
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6], [7, 8]])
assert cirq.dot(a) is not a
np.testing.assert_allclose(cirq.dot(a), a, atol=1e-8)
np.testing.assert_allclose(cirq.dot(a, b), np.dot(a, b), atol=1e-8)
np.testing.assert_allclose(cirq.dot(a, b, a), np.dot(np.dot(a, b), a), atol=1e-8)
# Invalid use
with pytest.raises(ValueError):
cirq.dot() | [
9,
1903
] |
def METHOD_NAME(self, get_service_config_value_mock):
"""Test if mconfig is used if control_proxy.yml is empty.
"""
get_service_config_value_mock.side_effect = ['', 0.5, ["Excluded"]]
sentry_mconfig = mconfigs_pb2.SharedSentryConfig()
sentry_mconfig.dsn_python = 'https://test.me'
sentry_mconfig.sample_rate = 1
sentry_mconfig.exclusion_patterns.append("another error")
self.assertEqual(
SharedSentryConfig('https://test.me', 1, ["another error"]),
_get_shared_sentry_config(sentry_mconfig),
) | [
9,
19,
1644,
1063,
200,
280,
10144
] |
def METHOD_NAME(
self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs
) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]:
"""
Transformation of an image with randomly sampled contrast.
:param x: Input samples.
:param y: Label of the samples `x`.
:return: Transformed samples and labels.
"""
import tensorflow as tf
contrast_factor_i = np.random.uniform(low=self.contrast_factor_range[0], high=self.contrast_factor_range[1])
if x.shape[3] == 3:
red, green, blue = x[:, :, :, 0], x[:, :, :, 1], x[:, :, :, 2]
x_gray = 0.2989 * red + 0.587 * green + 0.114 * blue
elif x.shape[3] == 1:
x_gray = x[:, :, :, 0]
else: # pragma: no cover
raise ValueError("Number of color channels is not 1 or 3 in input `x` of format HWC.")
mean = tf.math.reduce_mean(x_gray, axis=None)
return (
tf.clip_by_value(
contrast_factor_i * x + (1.0 - contrast_factor_i) * mean,
clip_value_min=self.clip_values[0],
clip_value_max=self.clip_values[1],
),
y,
) | [
1053
] |
def METHOD_NAME(self):
NAME = 'noVM' # no special meaning, any error is fine
MESSAGE = 'we want a specific message here'
res = response.error(NAME, MESSAGE)
template = errCode[NAME]
self.assertEqual(res["status"]["code"], template["status"]["code"])
self.assertEqual(res["status"]["message"], MESSAGE) | [
9,
168,
41,
277
] |
def METHOD_NAME():
from_pt = Point(20, 20, 40)
to_pt = Point(0, 0, 10)
arc = plan_arc(from_pt, to_pt, 50, extra_waypoints=[(5, 10), (20, 30)])
check_arc_basic([a[0] for a in arc], from_pt, to_pt)
assert arc[1][0].x == 5
assert arc[1][0].y == 10
assert arc[1][0].z == 50
assert arc[2][0].x == 20
assert arc[2][0].y == 30
assert arc[2][0].z == 50 | [
9,
3268,
41,
4599
] |
def METHOD_NAME(dataset: LegacyTUDataset, mode="concat", save=True):
"""
Description
-----------
Add node labels to graph node features dict
Parameters
----------
dataset : LegacyTUDataset
The dataset object
concat : str, optional
How to add node label to the graph. Valid options are "add",
"replace" and "concat".
- "add": Directly add node_label to graph node feature dict.
- "concat": Concatenate "feat" and "node_label"
- "replace": Use "node_label" as "feat"
Default: :obj:`"concat"`
save : bool, optional
Save the result dataset.
Default: :obj:`True`
"""
# check if node label is not available
if (
not os.path.exists(dataset._file_path("node_labels"))
or len(dataset) == 0
):
logging.warning("No Node Label Data")
return dataset
# check if has cached value
check_mark_name = "node_label_as_feature"
check_mark_path = os.path.join(
dataset.save_path, "info_{}_{}.json".format(dataset.name, dataset.hash)
)
check_mark = _load_check_mark(check_mark_path)
if (
check_mark_name in check_mark
and check_mark[check_mark_name]
and not dataset._force_reload
):
logging.warning("Using cached value in node_label_as_feature")
return dataset
logging.warning(
"Adding node labels into node features..., mode={}".format(mode)
)
# check if graph has "feat"
if "feat" not in dataset[0][0].ndata:
logging.warning("Dataset has no node feature 'feat'")
if mode.lower() == "concat":
mode = "replace"
# first read node labels
DS_node_labels = dataset._idx_from_zero(
np.loadtxt(dataset._file_path("node_labels"), dtype=int)
)
one_hot_node_labels = dataset._to_onehot(DS_node_labels)
# read graph idx
DS_indicator = dataset._idx_from_zero(
np.genfromtxt(dataset._file_path("graph_indicator"), dtype=int)
)
node_idx_list = []
for idx in range(np.max(DS_indicator) + 1):
node_idx = np.where(DS_indicator == idx)
node_idx_list.append(node_idx[0])
# add to node feature dict
for idx, g in zip(node_idx_list, dataset.graph_lists):
node_labels_tensor = torch.tensor(one_hot_node_labels[idx, :])
if mode.lower() == "concat":
g.ndata["feat"] = torch.cat(
(g.ndata["feat"], node_labels_tensor), dim=1
)
elif mode.lower() == "add":
g.ndata["node_label"] = node_labels_tensor
else: # replace
g.ndata["feat"] = node_labels_tensor
if save:
check_mark[check_mark_name] = True
_save_check_mark(check_mark_path, check_mark)
dataset.save()
return dataset | [
1716,
636,
947,
964
] |
def METHOD_NAME(self, idx):
if idx is None:
return None
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return None
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_mfr_id")
if not output:
return None
mfr = output['status']
return mfr.rstrip('\n') | [
19,
2180,
147
] |
def METHOD_NAME(n, delta_k=None, delta_r=0.0):
G = MNA(n)
G.add_d(n-1, 1)
t0 = time.perf_counter_ns()
for i in range(n):
G.add_r(i-1, i, 1+delta_r if delta_k is not None and i == delta_k else 1)
G.add_c(i, 1)
t1 = time.perf_counter_ns()
print(f'build: {(t1-t0)/1e9}')
G.semantic()
return G | [
370,
357,
3264
] |
def METHOD_NAME(key, uid, nonce):
state = 0
for i in range(32, 48):
state = (state << 1) | ((key >> i) & 1)
for i in range(0, 32):
state = (state << 1) | ((uid >> i) & 1)
#print '%012x' % state
#print '%012x' % (int("{0:048b}".format(state)[::-1],2))
for i in range(0, 32):
nonce_bit = (f20(state) ^ ((nonce >> (31-i)) & 1))
#print nonce_bit
state = (state >> 1) | (((nonce_bit ^ (key >> (31-i))) & 1) << 47)
#print '%012x' % state
#print '%012x' % (int("{0:048b}".format(state)[::-1],2))
return state | [
-1,
176
] |
def METHOD_NAME(toks):
return CharacterRangeEmitter(srange(toks[0])) | [
276,
661
] |
def METHOD_NAME(filters: Optional[pulumi.Input[Optional[pulumi.InputType['GetReleaseLabelsFiltersArgs']]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReleaseLabelsResult]:
"""
Retrieve information about EMR Release Labels.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.emr.get_release_labels(filters=aws.emr.GetReleaseLabelsFiltersArgs(
application="[email protected]",
prefix="emr-5",
))
```
:param pulumi.InputType['GetReleaseLabelsFiltersArgs'] filters: Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See Filters.
"""
... | [
19,
586,
415,
146
] |
def METHOD_NAME(self):
assert (
hasattr(self.clf, "decision_scores_")
and self.clf.decision_scores_ is not None
)
assert hasattr(self.clf, "labels_") and self.clf.labels_ is not None
assert hasattr(self.clf, "threshold_") and self.clf.threshold_ is not None
assert hasattr(self.clf, "_mu") and self.clf._mu is not None
assert hasattr(self.clf, "_sigma") and self.clf._sigma is not None | [
9,
386
] |
def METHOD_NAME(self):
self.cmdIn() | [
1452
] |
def METHOD_NAME():
"""Enable one breakpoint in the debugger""" | [
3237,
1317,
5116
] |
def METHOD_NAME(self):
return self.type == const.ReplayStorageType.ceph.value | [
44,
8037
] |
def METHOD_NAME(self):
r"""Test of the genOutSpec method (empty filename).
Method: This checks that the compiled outspec dictionary
is correct and complete. No file is written.
"""
# the with clause allows the chatter on stdout during
# object creation to be suppressed
with RedirectStreams(stdout=self.dev_null):
sim = self.fixture(SimpleScript)
# compile the output spec
outspec_orig = sim.genOutSpec(None)
# ensure output spec is OK
self.validate_outspec(outspec_orig, sim) | [
9,
17329,
17154
] |
def METHOD_NAME(tmp_path: Path, context: CfnginContext) -> None:
"""Test interactive create bad dir."""
key_dir = tmp_path / "missing"
lines = ("create", str(key_dir))
with mock_input(lines):
result = ensure_keypair_exists(context, keypair=KEY_PAIR_NAME)
assert result == {} | [
9,
1204,
129,
1068,
1190
] |
def METHOD_NAME(desc: current.RunDescriber) -> str:
"""
Serialize the given RunDescriber to YAML as a RunDescriber of the
version for storage
"""
import ruamel.yaml # lazy import
yaml = ruamel.yaml.YAML()
with io.StringIO() as stream:
yaml.dump(to_dict_for_storage(desc), stream=stream)
output = stream.getvalue()
return output | [
24,
406,
43,
948
] |
def METHOD_NAME(save_path, image_groundtruth, groundtruth,
image_predict, predict, legend):
tail = save_path.split(".")[-1]
save_path = (save_path[:-len(tail)] + "png")
import matplotlib.patches as mpatches
from matplotlib import use
use('Agg')
if image_groundtruth is not None:
image_groundtruth = image_groundtruth[..., ::-1]
image_predict = image_predict[..., ::-1]
if groundtruth is not None:
groundtruth = groundtruth[..., ::-1]
predict = predict[..., ::-1]
fig = plt.figure()
red_patches = []
for key, value in legend.items():
red_patch = mpatches.Patch(
color=[x / 255.0 for x in value[::-1]], label=key)
red_patches.append(red_patch)
plt.legend(
handles=red_patches, bbox_to_anchor=(1.05, 0), loc=3, borderaxespad=0)
plt.axis('off')
if image_groundtruth is not None and \
groundtruth is not None:
left, bottom, width, height = 0.02, 0.51, 0.38, 0.38
fig.add_axes([left, bottom, width, height])
plt.imshow(image_groundtruth)
plt.axis('off')
plt.title("Ground Truth", loc='left')
left, bottom, width, height = 0.52, 0.51, 0.38, 0.38
fig.add_axes([left, bottom, width, height])
plt.imshow(groundtruth)
plt.axis('off')
left, bottom, width, height = 0.01, 0.5, 0.9, 0.45
fig.add_axes([left, bottom, width, height])
currentAxis = plt.gca()
rect = patches.Rectangle(
(0.0, 0.0), 1.0, 1.0, linewidth=1, edgecolor='k', facecolor='none')
currentAxis.add_patch(rect)
plt.axis('off')
left, bottom, width, height = 0.02, 0.06, 0.38, 0.38
fig.add_axes([left, bottom, width, height])
plt.imshow(image_predict)
plt.axis('off')
plt.title("Prediction", loc='left')
left, bottom, width, height = 0.52, 0.06, 0.38, 0.38
fig.add_axes([left, bottom, width, height])
plt.imshow(predict)
plt.axis('off')
left, bottom, width, height = 0.01, 0.05, 0.9, 0.45
fig.add_axes([left, bottom, width, height])
currentAxis = plt.gca()
rect = patches.Rectangle(
(0.0, 0.0), 1.0, 1.0, linewidth=1, edgecolor='k', facecolor='none')
currentAxis.add_patch(rect)
plt.axis('off')
else:
plt.subplot(1, 2, 1)
plt.imshow(image_predict)
plt.axis('off')
plt.title("Combination ", y=-0.12)
plt.subplot(1, 2, 2)
plt.imshow(predict)
plt.axis('off')
plt.title("Prediction", y=-0.12)
plt.savefig(save_path, dpi=200, bbox_inches='tight')
plt.close() | [
3701,
16347,
1571
] |
def METHOD_NAME(self, input):
return self.net(input, is_infer=True) | [
1852,
819
] |
def METHOD_NAME():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.command(
"settings.py",
"runserver",
None,
None,
None,
None,
"noinput",
"somethingelse",
)
mock.assert_called_once_with(
"django-admin.py runserver --settings=settings.py "
"--noinput --somethingelse",
python_shell=False,
env=None,
runas=None,
) | [
9,
5063,
2870,
615,
462,
41,
335
] |
def METHOD_NAME(f, enable_abspath_if_through_root):
import os
_, currentdir = os.path.split(os.getcwd())
def check(args, r_expected, ra_expected=None):
result = f(*args)
assert result == r_expected
if (enable_abspath_if_through_root and ra_expected is not None):
result = f(*(args), **{"enable_abspath_if_through_root": True})
assert result == ra_expected
check(["a"], 'a')
check([os.path.abspath("a")], 'a')
check(["a/b"], 'a\\b')
check(["../a/b"], '..\\a\\b')
check(["a", "../b"], '..\\'+currentdir+'\\a')
check(["a/b", "../c"], '..\\'+currentdir+'\\a\\b')
check(["a", "b/c"], '..\\..\\a')
check(["//conky/mountpoint/a", "//conky/mountpoint/b/c"], '..\\..\\a',
"\\\\conky\\mountpoint\\a")
check(["a", "a"], '.')
check(["c:/foo/bar/bat", "c:/x/y/z"], '..\\..\\..\\foo\\bar\\bat',
"c:\\foo\\bar\\bat")
check(["c:/foo/bar/bat", "c:/foo/bar"], 'bat', 'bat')
check(["c:/foo/bar/bat", "c:/"], 'foo\\bar\\bat', "c:\\foo\\bar\\bat")
check(["c:/", "c:/foo/bar/bat"], '..\\..\\..', "c:\\")
check(["c:/foo/bar/bat", "c:/x"], '..\\foo\\bar\\bat', "c:\\foo\\bar\\bat")
check(["c:/x", "c:/foo/bar/bat"], '..\\..\\..\\x', "c:\\x")
check(["c:/", "c:/"], '.', "c:\\")
check(["/a", "/a"], '.')
check(["/a/b", "/a/b"], '.')
check(["c:/foo", "C:/FOO"], '.', '.')
check(["c:/aa", "C:/cccc"], '..\\aa', 'c:\\aa')
check(["c:/aa/bbb", "C:/cccc/ddddd"], '..\\..\\aa\\bbb', 'c:\\aa\\bbb')
#
if (enable_abspath_if_through_root):
assert f("c:\\foo", "d:\\foo", True) == "c:\\foo"
assert f("//m/d", "//n/d", True) == "\\\\m\\d"
assert f("d:\\foo", "//n/d", True) == "d:\\foo"
assert f("//n/d", "d:\\foo", True) == "\\\\n\\d" | [
3446,
9646,
10363
] |
def METHOD_NAME(self):
return Channel(self) | [
307
] |
def METHOD_NAME(self):
pass | [
9,
697,
2009
] |
def METHOD_NAME(a, b):
return a | b | [
2147,
2003
] |
ync def METHOD_NAME(self): | [
9,
45,
171,
997,
954,
168,
41
] |
def METHOD_NAME(self, fix_y_pred_diff_len, fix_y_true):
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
self.objective.score(fix_y_true, fix_y_pred_diff_len) | [
955,
362,
9210
] |
def METHOD_NAME(self):
ns = Namespace(system_assigned="enAble")
validate_app_force_set_system_identity_or_warning(ns)
self.assertTrue("enable", ns.system_assigned) | [
9,
1818,
0,
112,
2989,
1205,
362
] |
def METHOD_NAME(async_client):
return AsyncYamlRunner(async_client) | [
958,
1102
] |
def METHOD_NAME(build_dir):
target_dir=os.path.join(build_dir,'target')
allinone_jar=__find_genomicsdb_jar(target_dir,'genomicsdb-*allinone-spark.jar')
examples_jar=__find_genomicsdb_jar(target_dir,'genomicsdb-*examples.jar')
if 'CLASSPATH' in os.environ:
classpath=os.environ['CLASSPATH']
else:
classpath=''
if (len(classpath) > 0):
environ["CLASSPATH"] = allinone_jar+os.pathsep+examples_jar
else:
environ["CLASSPATH"] = allinone_jar+os.pathsep+examples_jar+os.pathsep+classpath | [
102,
14567
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The link to the next page of FeaturesetJob objects. If null, there are no additional pages.
"""
return pulumi.get(self, "next_link") | [
243,
548
] |
def METHOD_NAME(self, globs: Tuple[str, ...], poll: bool) -> Optional[Snapshot]:
"""Returns a Snapshot of the input globs.
If poll=True, will wait for up to INVALIDATION_POLL_INTERVAL for the globs to have changed,
and will return None if they have not changed.
"""
timeout = self.INVALIDATION_POLL_INTERVAL if poll else None
try:
snapshot = self._scheduler_session.product_request(
Snapshot,
subjects=[PathGlobs(globs)],
poll=poll,
timeout=timeout,
)[0]
return cast(Snapshot, snapshot)
except ExecutionTimeoutError:
if poll:
return None
raise | [
19,
394
] |
def METHOD_NAME(self):
# Callback stub for the DUT's main.
self._bazel_subprocess_actions.append("wait") | [
14034,
618
] |
async def METHOD_NAME(self, subscription_id: str):
try:
client = self.get_client(subscription_id)
results = []
try:
compliance_standards = await run_concurrently(
lambda: list(client.regulatory_compliance_standards.list())
)
except Exception as e:
if 'as it has no standard pricing bundle' in str(e):
print_debug(f'Failed to retrieve regulatory compliance standards: {e}')
else:
print_exception(f'Failed to retrieve regulatory compliance standards: {e}')
return {}
else:
for standard in compliance_standards:
try:
compliance_controls = await run_concurrently(
lambda standard=standard: list(client.regulatory_compliance_controls.list(
regulatory_compliance_standard_name=standard.name))
)
for control in compliance_controls:
control.standard_name = standard.name
results.append(control)
except Exception as e:
print_exception(f'Failed to retrieve compliance controls: {e}')
finally:
return results
except Exception as e:
print_exception(f'Failed to retrieve regulatory compliance results: {e}')
return [] | [
19,
14663,
4495,
51
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes in [
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca' # FAT_CIGAM
] | [
137,
14342,
1857
] |
def METHOD_NAME():
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=1SU')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYMONTHDAY=1')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYSETPOS=1')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TU,TH;BYSETPOS=1')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;INTERVAL=2;BYDAY=MO,TU,WE,TH,FR,SA,SU;BYSETPOS=1')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;INTERVAL=2;BYDAY=WE,SU,MO,TH,FR,TU,SA;BYSETPOS=1')
)
assert RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;INTERVAL=2;BYDAY=WE,MO,TH,FR,TU,SA;BYSETPOS=1')
)
assert not RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=-1SU')
)
assert not RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYMONTHDAY=1,2,3,4,5,6,7')
)
assert not RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYMONTHDAY=-1')
)
assert not RecurrenceEditor.check_understood_rrule(
icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYSETPOS=3')
) | [
9,
250,
11085,
11086
] |
def METHOD_NAME(client, resource_group, service, app, deployment):
try:
return client.deployments.get(resource_group, service, app, deployment)
except CloudError:
raise InvalidArgumentValueError('Deployment {} not found under app {}'.format(deployment, app)) | [
602,
1503,
1985
] |
def METHOD_NAME(self):
"""
Related to #89.
This does not test #89 but tests the fix for it.
We want to make sure the worker threads can be used multiple times
and with different time gap between each execution.
"""
@vectorize('float64(float64, float64)', target='parallel')
def fnv(a, b):
return a + b
sleep_time = 1 # 1 second
while sleep_time > 0.00001: # 10us
time.sleep(sleep_time)
a = b = np.arange(10**5)
np.testing.assert_equal(a + b, fnv(a, b))
# Reduce sleep time
sleep_time /= 2 | [
9,
600,
17
] |
def METHOD_NAME(self):
"""
Tests that weight entries are automatically created or updated
"""
self.user_login('test')
user = User.objects.get(username=self.current_user)
# Existing weight entry is old, a new one is created
entry1 = WeightEntry.objects.filter(user=user).latest()
response = self.client.post(
reverse('nutrition:bmi:calculate'), {
'height': 180,
'weight': 80
}
)
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.weight, 83)
self.assertEqual(entry2.weight, 80)
# Existing weight entry is from today, is updated
entry2.delete()
entry1.date = datetime.date.today()
entry1.save()
response = self.client.post(
reverse('nutrition:bmi:calculate'), {
'height': 180,
'weight': 80
}
)
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.pk, entry2.pk)
self.assertEqual(entry2.weight, 80)
# No existing entries
WeightEntry.objects.filter(user=user).delete()
response = self.client.post(
reverse('nutrition:bmi:calculate'), {
'height': 180,
'weight': 80
}
)
self.assertEqual(response.status_code, 200)
entry = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry.weight, 80)
self.assertEqual(entry.date, datetime.date.today()) | [
9,
4005,
1336,
475
] |
def METHOD_NAME(self):
"""Post Object.
Returns
-------
:class:`pyaedt.modules.PostProcessor.CircuitPostProcessor`
"""
if self._post is None: # pragma: no cover
from pyaedt.modules.PostProcessor import CircuitPostProcessor
self._post = CircuitPostProcessor(self)
return self._post | [
72
] |
f METHOD_NAME(self, info_state_key, action_idx, amount): | [
238,
12549
] |
async def METHOD_NAME():
sample = AuthSamplesAsync()
await sample.auth_connection_string_async()
await sample.auth_active_directory_async()
await sample.auth_shared_access_signature_async()
await sample.auth_blob_url_async()
await sample.auth_default_azure_credential() | [
57
] |
def METHOD_NAME(self):
instance_trait = self.trait_under_test
class FooContainer(HasTraits):
not_adapting_foo = instance_trait(Foo)
adapting_foo = instance_trait(Foo, adapt="yes")
adapting_foo_permissive = instance_trait(Foo, adapt="default")
adapting_foo_dynamic_default = instance_trait(
Foo,
adapt="default",
factory=default_foo,
)
not_adapting_foo_list = List(Foo)
adapting_foo_list = List(instance_trait(Foo, adapt="yes"))
return FooContainer() | [
129,
2563,
224
] |
def METHOD_NAME(self, batch, stage):
"Given an input batch it computes the enhanced signal"
batch = batch.to(self.device)
wavs, lens = batch.sig
noisy = self.hparams.add_noise(wavs, lens).unsqueeze(-1)
enhanced = self.modules.generator(noisy)
return enhanced | [
226,
76
] |
def METHOD_NAME(self) -> bool:
"""
Returns True if the the device contains no firmware.
"""
# We check by comparing the device reported firmware hash.
# If erased, the firmware is all '\xFF'.
firmware_v, _ = self.versions()
empty_firmware = struct.pack("<I", firmware_v) + b"\xFF" * MAX_FIRMWARE_SIZE
empty_firmware_hash = hashlib.sha256(hashlib.sha256(empty_firmware).digest()).digest()
reported_firmware_hash, _ = self.get_hashes()
return empty_firmware_hash == reported_firmware_hash | [
10504
] |
def METHOD_NAME(func1, func2, objpath1, objpath2):
return f"""ValueError: StructuralEqual check failed, caused by lhs at {objpath1}: | [
391,
1571
] |
def METHOD_NAME(notebook_name, dataproc_name, bucket_name, ssh_user, key_path):
logging.info('Terminating Dataproc cluster and cleaning Dataproc config from bucket')
try:
cluster = GCPMeta.get_list_cluster_statuses([dataproc_name])
if cluster[0]['status'] == 'running':
computational_name = GCPMeta.get_cluster(dataproc_name).get('labels').get('computational_name')
GCPActions.bucket_cleanup(bucket_name, dataproc_conf['project_name'], dataproc_name)
logging.info('The bucket {} has been cleaned successfully'.format(bucket_name))
GCPActions.delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
logging.info('The Dataproc cluster {} has been terminated successfully'.format(dataproc_name))
GCPActions.remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
key_path, computational_name)
else:
logging.info("There are no Dataproc clusters to terminate.")
except Exception as err:
datalab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
sys.exit(1) | [
1602,
15434,
2059
] |
def METHOD_NAME(model: BertModel, ckpt_dir: str, model_name: str):
"""
Args:
model: BertModel Pytorch model instance to be converted
ckpt_dir: Tensorflow model directory
model_name: model name
Currently supported HF models:
- Y BertModel
- N BertForMaskedLM
- N BertForPreTraining
- N BertForMultipleChoice
- N BertForNextSentencePrediction
- N BertForSequenceClassification
- N BertForQuestionAnswering
"""
tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
var_map = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
state_dict = model.state_dict()
def to_tf_var_name(name: str):
for patt, repl in iter(var_map):
name = name.replace(patt, repl)
return f"bert/{name}"
def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(tf_var)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
tf_name = to_tf_var_name(var_name)
torch_tensor = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
torch_tensor = torch_tensor.T
tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
tf.keras.backend.set_value(tf_var, torch_tensor)
tf_weight = session.run(tf_var)
print(f"Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}")
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) | [
197,
3299,
1830,
24,
554
] |
def METHOD_NAME(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset | [
250,
539
] |
def METHOD_NAME(self) -> str:
"""
Field of the response containing data.
By default the value self.name will be used if this property is empty or None
"""
return None | [
365,
101
] |
def METHOD_NAME(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key") | [
3288,
59
] |
def METHOD_NAME(directory, row):
document_id, kind, story_url = row['document_id'], row['kind'], row['story_url']
story_path = os.path.join(directory, document_id + '.content')
actual_story_size = 0
if os.path.exists(story_path):
with PathManager.open(story_path, 'rb') as f:
actual_story_size = len(f.read())
if actual_story_size <= 19000:
if kind == 'gutenberg':
time.sleep(2)
build_data.download(story_url, directory, document_id + '.content')
else:
return True
file_type = subprocess.check_output(['file', '-b', story_path])
file_type = file_type.decode('utf-8')
if 'gzip compressed' in file_type:
gz_path = os.path.join(directory, document_id + '.content.gz')
shutil.move(story_path, gz_path)
build_data.untar(gz_path)
return False | [
1365,
3794
] |
def METHOD_NAME(
cls,
workspace: "Workspace",
scope: SerializationProcessorScope,
import_export_config: ImportExportConfig,
) -> dict[str, Any]:
"""
Exports the `role_assignments` in `scope` when it is being exported
by an application type `export_serialized`.
"""
if not import_export_config.include_permission_data:
# We cannot yet export RBAC roles to another workspace as we would also need
# to export all subjects to the new workspace also or somehow allow to user
# to choose how to map subjects.
return
# Do not export anything if the workspace doesn't have RBAC enabled.
if not LicenseHandler.workspace_has_feature(RBAC, workspace):
return {}
# Application subclass scopes can't be passed to
# the role assignment handler. See #1624.
if isinstance(scope, Application):
scope = getattr(scope, "application_ptr", scope)
serialized_role_assignments = []
role_assignments = RoleAssignmentHandler().get_role_assignments(
workspace, scope
)
for role_assignment in role_assignments:
serialized_role_assignments.append(
EnterpriseExportSerializedStructure.role_assignment(
subject_id=role_assignment.subject_id,
subject_type_id=role_assignment.subject_type_id,
role_id=role_assignment.role.id,
)
)
return {"role_assignments": serialized_role_assignments} | [
294,
5913
] |
def METHOD_NAME(sysroot):
syslinuxpath = sysroot + '/boot/syslinux/syslinux.cfg'
entries = parse_loader_configs(sysroot)
syslinux_entries = []
# Parse SYSLINUX config
with open(syslinuxpath) as f:
syslinux_entry = None
for line in f:
try:
k, v = line.strip().split(" ", 1)
except ValueError:
continue
if k == 'DEFAULT':
if syslinux_entry is not None:
syslinux_default = v
elif k == 'LABEL':
if syslinux_entry is not None:
syslinux_entries.append(syslinux_entry)
syslinux_entry = {}
syslinux_entry['title'] = v
elif k == 'KERNEL':
syslinux_entry['linux'] = v
elif k == 'INITRD':
syslinux_entry['initrd'] = v
elif k == 'APPEND':
syslinux_entry['options'] = v
if syslinux_entry is not None:
syslinux_entries.append(syslinux_entry)
if len(entries) != len(syslinux_entries):
fatal("Found {0} loader entries, but {1} SYSLINUX entries\n".format(
len(entries), len(syslinux_entries)))
def assert_key_same_file(a, b, key):
aval = a[key]
bval = b[key]
sys.stderr.write("aval: %r\nbval: %r\n" % (aval, bval))
# Paths in entries are always relative to /boot
entry = os.stat(sysroot + "/boot" + aval)
# Syslinux entries can be relative to /boot (if it's on another filesystem)
# or relative to / if /boot is on /.
s1 = os.stat(sysroot + bval)
s2 = os.stat(sysroot + "/boot" + bval)
# A symlink ensures that no matter what they point at the same file
assert_eq(entry, s1)
assert_eq(entry, s2)
for i, (entry, syslinuxentry) in enumerate(zip(entries, syslinux_entries)):
assert_key_same_file(entry, syslinuxentry, 'linux')
assert_key_same_file(entry, syslinuxentry, 'initrd')
entry_ostree = get_ostree_option(entry['options'])
syslinux_ostree = get_ostree_option(syslinuxentry['options'])
if entry_ostree != syslinux_ostree:
fatal("Mismatch on ostree option: {0} != {1}".format(
entry_ostree, syslinux_ostree))
sys.stdout.write('SYSLINUX configuration validated\n')
return 0 | [
187,
9841
] |
def METHOD_NAME(t):
for s in ignore_tokens:
if s != '' and s in t:
return False
return True | [
2947,
466
] |
def METHOD_NAME(self):
return self.xml_with_pre.METHOD_NAME | [
252,
1768
] |
def METHOD_NAME(fn):
if isinstance(fn, str):
fn = open(fn)
for row in csv.DictReader(fn):
given = row['First']
family = row['Last']
party = row['Party'].decode('utf-8')
party = PARTY_YNMP_TO_TWFY.get(party, party)
cons = row['Constituency'].decode('utf-8')
person_id = None
if row['parlparse_id']:
person_id = 'uk.org.publicwhip/person/{0}'.format(row['parlparse_id'])
yield {'given_name': given, 'family_name': family}, party, cons, person_id | [
-1,
732,
781
] |
def METHOD_NAME(self):
proj = Project.objects.get(slug="pip-fr")
url = core_tags.make_document_url(proj, "abc", "xyz")
self.assertEqual(url, self.pip_abc_xyz_fr_page_url)
url = core_tags.make_document_url(proj, "abc", "index", "index.html")
self.assertEqual(url, self.pip_abc_fr_url_index) | [
9,
2518,
155,
61,
281,
61,
1174
] |
def METHOD_NAME(
end_time: datetime, start_months_prior: int, end_months_prior: int
) -> str:
"""Generate YYYY-MM stings for months specified.
For all months inclusive, generate YYYY-MM strings starting at the
month of the end_time, and ending start_months_prior before the end_time.
Parameters
----------
end_time : datetime
The time for the last month desired
start_months_prior : int
Number of months before the end_time to end
string generation.
Yields
------
month_str : str
month string in the YYYY-MM format
"""
start = end_time - relativedelta(months=start_months_prior)
first_month = start.replace(day=1)
last_month = (
end_time + relativedelta(day=31) - relativedelta(months=end_months_prior)
)
reverse_months = sorted(
rrule.rrule(rrule.MONTHLY, dtstart=first_month, until=last_month), reverse=True
)
for date in reverse_months:
yield f"{date.year:04}-{date.month:02}" | [
842,
1485,
370
] |
def METHOD_NAME(input_dict):
return {key: value for (key, value) in input_dict.items() if key != "id"} | [
188,
147
] |
def METHOD_NAME(
clickhouse_query: Union[Query, CompositeQuery[Table]],
query_settings: QuerySettings,
reader: Reader,
) -> QueryResult:
nonlocal runner_call_count
nonlocal runner_settings
runner_call_count += 1
runner_settings.append(query_settings)
return query_result | [
539,
1102
] |
def METHOD_NAME(cls) -> bytes:
return bytes.fromhex(bitcoin.rev_hex(cls.GENESIS)) | [
3853,
3842,
321
] |
def METHOD_NAME():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
with servicebus_client:
with servicebus_client.get_queue_sender(queue_name=QUEUE_NAME) as sender:
sender.send_messages(ServiceBusMessage("message"))
with AutoLockRenewer() as renewer:
# For this sample we're going to set the renewal recurrence of the autolockrenewer to greater than the
# service side message lock duration, to demonstrate failure. Normally, this should not be adjusted.
renewer._sleep_time = 40
with servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME, prefetch_count=10) as receiver:
def on_lock_renew_failure_callback(renewable, error):
# If auto-lock-renewal fails, this function will be called.
# If failure is due to an error, the second argument will be populated, otherwise
# it will default to `None`.
# This callback can be an ideal location to log the failure, or take action to safely
# handle any processing on the message or session that was in progress.
print("Intentionally failed to renew lock on {} due to {}".format(renewable, error))
received_msgs = receiver.receive_messages(max_message_count=1, max_wait_time=5)
for msg in received_msgs:
# automatically renew the lock on each message for 120 seconds
renewer.register(receiver,
msg,
max_lock_renewal_duration=90,
on_lock_renew_failure=on_lock_renew_failure_callback)
print('Register messages into AutoLockRenewer done.')
# Cause the messages and autorenewal to time out.
# Other reasons for renew failure could include a network or service outage.
time.sleep(80)
try:
for msg in received_msgs:
receiver.complete_message(msg)
except ServiceBusError as e:
print('Messages cannot be settled if they have timed out. (This is expected)')
print('Lock renew failure demonstration complete.') | [
10627,
625,
41,
625,
15627,
374,
1076
] |
def METHOD_NAME(name, **fields):
if fields:
return collections.namedtuple(
name,
fields.keys(),
)(*z3.Ints(' '.join(fields.values())))
else:
return z3.Int(name) | [
567,
486
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(entity, future_features):
"""Returns the AST and source code of given entity.
Args:
entity: Any, Python function/method/class
future_features: Iterable[Text], future features to use (e.g.
'print_statement'). See
https://docs.python.org/2/reference/simple_stmts.html#future
Returns:
gast.AST, Text: the parsed AST node; the source code that was parsed to
generate the AST (including any prefixes that this function may have added).
"""
try:
original_source = inspect_utils.getimmediatesource(entity)
except (IOError, OSError) as e:
raise ValueError(
'Unable to locate the source code of {}. Note that functions defined'
' in certain environments, like the interactive Python shell do not'
' expose their source code. If that is the case, you should to define'
' them in a .py source file. If you are certain the code is'
' graph-compatible, wrap the call using'
' @tf.autograph.do_not_convert. Original error: {}'.format(entity, e))
def raise_parse_failure(comment):
raise ValueError(
'Failed to parse source code of {}, which Python reported as:\n{}\n'
'{}'.format(entity, original_source, comment))
# Comments and multiline strings can appear at arbitrary indentation levels,
# causing textwrap.dedent to not correctly dedent source code.
# TODO(b/115884650): Automatic handling of comments/multiline strings.
source = textwrap.dedent(original_source)
future_statements = tuple(
'from __future__ import {}'.format(name) for name in future_features)
source = '\n'.join(future_statements + (source,))
try:
return parse_str(source, preamble_len=len(future_features)), source
except IndentationError:
# The text below lists the causes of this error known to us. There may
# be more.
raise_parse_failure(
'This may be caused by multiline strings or comments not indented at'
' the same level as the code.')
except SyntaxError as e:
if not tf_inspect.isfunction(entity) or entity.__name__ != '<lambda>':
raise
# Certain entities, like lambdas, only hold the raw code lines which defined
# them, which may include surrounding tokens and may be syntactically
# invalid out of context. For example:
#
# l = (
# lambda x: x,)[0]
#
# will have the dedented source "lambda x: x,)[0]"
# Here we make an attempt to stip away the garbage by looking at the
# information in the syntax error.
lines = source.split('\n')
lineno, offset = e.lineno, e.offset # 1-based
# Give up if there's nothing we can chip away.
if len(lines) == lineno and len(lines[-1]) == offset:
raise_parse_failure(
'If this is a lambda function, the error may be avoided by creating'
' the lambda in a standalone statement.')
# Drop all lines following the error location
# TODO(mdan): What's with the pylint errors?
lines = lines[:lineno] # pylint:disable=invalid-slice-index
# Drop all characters following the error location
lines[-1] = lines[-1][:offset - 1] # pylint:disable=invalid-slice-index
source = '\n'.join(lines)
try:
return parse_str(source, preamble_len=len(future_features)), source
except SyntaxError as e:
raise_parse_failure(
'If this is a lambda function, the error may be avoided by creating'
' the lambda in a standalone statement.') | [
214,
2419
] |
def METHOD_NAME(self):
"""Threshold value in TS corresponding to `n_sigma`.
This assumes that the TS follows a chi squared distribution
with a number of degree of freedom equal to `n_free_parameters`.
"""
return sigma_to_ts(self.n_sigma, self.n_free_parameters) | [
5096,
853
] |
def METHOD_NAME(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().METHOD_NAME(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices
]),
"\n".join([
"Configured {}.{}.ports = {}".format(
dev.host.hostid, dev.name, dev.ports
)
for dev in [host1.br0, host2.br0]
])
]
return desc | [
567,
9,
2806,
1067
] |
def METHOD_NAME(variant_scalar_rgb):
f = mi.load_dict({'type': 'mitchell'})
assert dr.allclose(f.eval(0), 0.8888, atol=1e-3)
assert dr.allclose(f.eval_discretized(0), 0.8888, atol=1e-3)
assert f.eval(2.1) == 0 and f.eval_discretized(2.1) == 0 | [
2580,
-1
] |
def METHOD_NAME(self, m):
# server mode
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, self.P)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(
H, self.transport.host_key_type
)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound() | [
214,
-1,
176
] |
def METHOD_NAME(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x | [
668,
5142,
387,
852
] |
def METHOD_NAME(clf):
Y_pred = clf.predict(X_test)
Y_pred_bin = binarize_predictions(Y_test, Y_pred, fpr=target_fpr)
recall = evaluate_recall(Y_test, Y_pred_bin)
fpr_ratio = evaluate_fairness(Y_test, Y_pred_bin, S_test, metric_col=target_fairness_metric)
return recall, fpr_ratio | [
1195,
5082,
5083
] |
def METHOD_NAME(identifier):
'''Run a harvester synchronously'''
log.info('Harvesting source "%s"', identifier)
actions.METHOD_NAME(identifier) | [
22
] |
def METHOD_NAME(self, index):
"""
Returns the radial index corresponding to the given global matrix index.
"""
return self.at(index).METHOD_NAME(index) | [
-1
] |
def METHOD_NAME(self):
self.assertFalse(util.is_dictionary(int()))
self.assertFalse(util.is_dictionary(None))
self.assertFalse(util.is_dictionary(str())) | [
9,
137,
2445,
1478
] |
def METHOD_NAME():
"""Called by pyang plugin framework at to initialize the plugin."""
# Register the plugin
plugin.register_plugin(SMIPlugin())
# Add our special argument syntax checkers
syntax.add_arg_type('smi-oid', _chk_smi_oid)
syntax.add_arg_type('smi-max-access', _chk_smi_max_access)
# Register that we handle extensions from the YANG module 'ietf-yang-smiv2'
grammar.register_extension_module(smi_module_name)
# Register the special grammar
for stmt, occurence, (arg, rules), add_to_stmts in smi_stmts:
grammar.add_stmt((smi_module_name, stmt), (arg, rules))
grammar.add_to_stmts_rules(add_to_stmts,
[((smi_module_name, stmt), occurence)])
# Add validation step
statements.add_validation_phase('smi_set_oid', after='inherit_properties')
statements.add_validation_fun('smi_set_oid',
[(smi_module_name, 'oid')],
v_set_oid)
statements.add_validation_fun('smi_set_oid',
[(smi_module_name, 'subid')],
v_set_subid)
# Register special error codes
error.add_error_code('SMIv2_BAD_SUBID', 1,
"subid needs an oid or subid statement in an ancestor")
error.add_error_code('SMIv2_SUBID_AND_OID', 1,
"subid and oid cannot be given at the same time") | [
14180,
2793,
176
] |
def METHOD_NAME(self, epoch: int, shuffle: bool = None) -> DataLoader:
if shuffle is None:
shuffle = self.shuffle
# rebuild sampler
if epoch > 1:
self.sampler_args["epoch"] = epoch
batch_sampler = CategoryBalancedSampler(**self.sampler_args)
batches = list(batch_sampler)
if self.sampler_args["num_batches"] is not None:
batches = batches[: self.sampler_args.num_batches]
bs_list = [len(batch) for batch in batches]
if self.sampler_args["distributed"]:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
self.sampler = RawSampler(batches)
if self.num_iters_per_epoch is not None:
N = len(self.sampler)
# If corpus size is larger than the num_per_epoch
if self.num_iters_per_epoch < N:
N = len(self.sampler)
real_epoch, offset = divmod(self.num_iters_per_epoch * epoch, N)
if offset >= self.num_iters_per_epoch:
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = current_batches[
offset - self.num_iters_per_epoch : offset
]
else:
prev_batches = self.sampler.generate(real_epoch - 1 + self.seed)
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch - 1 + self.seed).shuffle(
prev_batches
)
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = (
prev_batches[offset - self.num_iters_per_epoch :]
+ current_batches[:offset]
)
# If corpus size is less than the num_per_epoch
else:
_epoch, _cursor = divmod(self.num_iters_per_epoch * (epoch - 1), N)
_remain = self.num_iters_per_epoch
batches = []
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(current_batches)
while _remain > 0:
_batches = current_batches[_cursor : _cursor + _remain]
batches += _batches
if _cursor + _remain >= N:
_epoch += 1
_cursor = 0
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(
current_batches
)
else:
_cursor = _cursor + _remain
_remain -= len(_batches)
assert len(batches) == self.num_iters_per_epoch
else:
batches = self.sampler.generate(epoch + self.seed)
if shuffle:
np.random.RandomState(epoch + self.seed).shuffle(batches)
# For backward compatibility for pytorch DataLoader
if self.collate_fn is not None:
kwargs = dict(collate_fn=self.collate_fn)
else:
kwargs = {}
return DataLoader(
dataset=self.dataset,
batch_sampler=batches,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
worker_init_fn=partial(worker_init_fn, base_seed=epoch + self.seed),
**kwargs,
) | [
56,
84
] |
def METHOD_NAME(bo, modid):
"""Given a Blender Object, finds a given modifier and returns it or None"""
if bo is not None:
# if they give us the wrong modid, it is a bug and an AttributeError
return getattr(bo.plasma_modifiers, modid)
return None | [
416,
2872
] |
def METHOD_NAME(obj1, obj2):
"""Compare two HS3 dicts."""
if not isinstance(obj1, collections.abc.Mapping) or not isinstance(
obj2, collections.abc.Mapping
):
raise TypeError(
f"obj1 and obj2 both need to be of type 'Mapping', are {obj1} and {obj2}"
)
missing2 = set(obj1.keys()) - set(obj2.keys())
missing1 = set(obj2.keys()) - set(obj1.keys())
if missing1 and missing2:
raise ValueError(
f"Both objects are missing keys: {missing1} and {missing2}. "
f"obj1: {obj1}, obj2: {obj2}"
)
return cleanup_recursive(obj1, obj2) | [
950,
-1
] |
def METHOD_NAME(benchmark):
# debdeps: python3-pytest-benchmark
g = []
for x in range(1020):
v = {
"anomaly": None if (int(x / 100) % 2 == 0) else True,
"confirmed": None,
"input": "foo",
"measurement_start_time": datetime.utcfromtimestamp(x + 1234567890),
"probe_cc": "BR",
"scores": "",
"test_name": "web_connectivity",
"tid": "",
}
g.append(v)
(msmts, changes) = benchmark(bench_detect_blocking_changes_1s_g, g)
with Path("detector/views/chart_alone.tpl").open() as f:
tpl = webapp.bottle.SimpleTemplate(f.read())
cc = "BR"
inp = "foo"
test_name = "web_connectivity"
cd = webapp.generate_chart(msmts, changes, cc, test_name, inp)
chart = tpl.render(**cd)
assert chart
data.joinpath("output/chart_ww_BR_bench.html").write_text(chart)
last_mean = msmts[-1][-1]
assert pytest.approx(0.415287511652) == last_mean | [
9,
7608,
2991,
5999,
1103
] |
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(context):
_act_on_config(context.op_config)
if context.op_config["return_wrong_type"]:
return "wow"
return 13 | [
2648,
181
] |
def METHOD_NAME(n):
assert n < 7919
return FIRST_1000_PRIMES[bisect.bisect_right(FIRST_1000_PRIMES, n)] | [
243,
7322
] |
def METHOD_NAME(self):
self.OriginalExecuteFunction = _retry_utility.ExecuteFunction
_retry_utility.ExecuteFunction = self._MockExecuteFunctionInvalidSessionToken
try:
self.created_collection.create_item(body={'id': '1' + str(uuid.uuid4()), 'pk': 'mypk'})
self.fail()
except exceptions.CosmosHttpResponseError as e:
self.assertEqual(e.http_error_message, "Could not parse the received session token: 2")
self.assertEqual(e.status_code, StatusCodes.INTERNAL_SERVER_ERROR)
_retry_utility.ExecuteFunction = self.OriginalExecuteFunction | [
9,
2026,
163,
168,
3966,
43,
532
] |
def METHOD_NAME(graph_json_str, libmod, device):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by json graph.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.runtime.Module
The module of the corresponding function
device : Device
The device to deploy the module, only supports CUDA GPU
Returns
-------
graph_module : GraphModuleCudaGraph
CUDA graph executor module that can be used to execute the graph.
Note
----
See also :py:class:`tvm.contrib.cuda_graph.cuda_graph_executor.GraphModuleCudaGraph`
for examples to directly construct a GraphModuleCudaGraph from an exported
relay compiled library.
"""
assert isinstance(graph_json_str, string_types)
try:
dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_cuda_graph.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor_cuda_graph.create")
except ValueError:
raise ValueError(
"To enable CUDA graph support (experimental), please set "
"'(USE_GRAPH_EXECUTOR_CUGRAPH ON)' in config.cmake and rebuild TVM"
)
return GraphModuleCudaGraph(fcreate(graph_json_str, libmod, *device_type_id)) | [
129
] |
def METHOD_NAME(tree_file, dir=None):
if dir is None:
dir = tempfile.mkdtemp()
header = []
cur_file = None
f = open(tree_file)
try:
lines = f.readlines()
finally:
f.close()
del f
try:
for line in lines:
if line[:5] == '#####':
filename = line.strip().strip('#').strip().replace('/', os.path.sep)
path = os.path.join(dir, filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if cur_file is not None:
f, cur_file = cur_file, None
f.close()
cur_file = open(path, 'w')
elif cur_file is not None:
cur_file.write(line)
elif line.strip() and not line.lstrip().startswith('#'):
if line.strip() not in ('"""', "'''"):
header.append(line)
finally:
if cur_file is not None:
cur_file.close()
return dir, ''.join(header) | [
789,
1458,
151
] |
def METHOD_NAME(self) -> str:
"""url with user:password part removed unless it is formed with
environment variables as specified in PEP 610, or it is ``git``
in the case of a git URL.
"""
purl = urllib.parse.urlsplit(self.url)
netloc = self._remove_auth_from_netloc(purl.netloc)
surl = urllib.parse.urlunsplit(
(purl.scheme, netloc, purl.path, purl.query, purl.fragment)
)
return surl | [
6517,
274
] |
def METHOD_NAME(examples, label_list, tokenizer, max_mention_length):
label_map = {label: i for i, label in enumerate(label_list)}
conv_tables = (
("-LRB-", "("),
("-LCB-", "("),
("-LSB-", "("),
("-RRB-", ")"),
("-RCB-", ")"),
("-RSB-", ")"),
)
features = []
for example in tqdm(examples):
def preprocess_and_tokenize(text, start, end=None):
target_text = text[start:end].rstrip()
for a, b in conv_tables:
target_text = target_text.replace(a, b)
return tokenizer.tokenize(target_text, add_prefix_space=True)
tokens = [tokenizer.cls_token]
tokens += preprocess_and_tokenize(example.text, 0, example.span[0])
mention_start = len(tokens)
tokens.append(ENTITY_TOKEN)
tokens += preprocess_and_tokenize(example.text, example.span[0], example.span[1])
tokens.append(ENTITY_TOKEN)
mention_end = len(tokens)
tokens += preprocess_and_tokenize(example.text, example.span[1])
tokens.append(tokenizer.sep_token)
word_ids = tokenizer.convert_tokens_to_ids(tokens)
word_attention_mask = [1] * len(tokens)
word_segment_ids = [0] * len(tokens)
entity_ids = [2, 0]
entity_attention_mask = [1, 0]
entity_segment_ids = [0, 0]
entity_position_ids = list(range(mention_start, mention_end))[:max_mention_length]
entity_position_ids += [-1] * (max_mention_length - mention_end + mention_start)
entity_position_ids = [entity_position_ids, [-1] * max_mention_length]
labels = [0] * len(label_map)
for label in example.labels:
labels[label_map[label]] = 1
features.append(
InputFeatures(
word_ids=word_ids,
word_segment_ids=word_segment_ids,
word_attention_mask=word_attention_mask,
entity_ids=entity_ids,
entity_position_ids=entity_position_ids,
entity_segment_ids=entity_segment_ids,
entity_attention_mask=entity_attention_mask,
labels=labels,
)
)
return features | [
197,
2794,
24,
2247
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.