text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
input = torch.randn(100)
output = torch.ops.aten.relu(input)
self.assertEqual(output, input.relu()) | [
9,
16922,
5276,
3589,
837
] |
def METHOD_NAME(cls):
"""
Generates a GET response of (invalid) HTML lacking any caption tracks within it.
This fake HTML is nevered rendered; it's only intended as a source for a regex
search
"""
html_with_no_caption_tracks = "<b>No caption URL info for regex to find here</b>"
return cls.MockResponse(html_with_no_caption_tracks) | [
41,
654,
5876,
6520
] |
def METHOD_NAME(cmd):
"""
Take the given command and use the OS to automatically open the appropriate
resource. For instance, if a URL is provided, this will have the OS automatically
open the URL in the default web browser.
"""
if platform == "linux" or platform == "linux2":
cmd = ['xdg-open', cmd]
elif platform == "darwin":
cmd = ['open', cmd]
elif platform == "win32":
cmd = ['start', cmd]
subprocess.check_call(cmd) | [
457,
2773,
1452
] |
def METHOD_NAME():
s = tree.String(None, 'bö', (0, 0))
assert repr(s) # Should not raise an Error! | [
9,
774,
144
] |
def METHOD_NAME():
''' Test that a reference can be copied. '''
array_symbol = DataSymbol("symbol", ArrayType(REAL_TYPE, [10]))
scalar_symbol = DataSymbol("other", REAL_TYPE)
ref = Reference(array_symbol)
ref1 = ref.copy()
assert isinstance(ref1, Reference)
assert ref1 is not ref
assert ref1.symbol is array_symbol
# Modifying the new reference does not affect the original
ref1._symbol = scalar_symbol
assert ref.symbol is array_symbol | [
9,
272,
1046,
673,
775
] |
def METHOD_NAME(self):
"""Unarchive a file."""
# handle some defaults for archive_path and destination_path
archive_path = self.env.get("archive_path", self.env.get("pathname"))
if not archive_path:
raise ProcessorError(
"Expected an 'archive_path' input variable but none is set!"
)
destination_path = self.env.get(
"destination_path",
os.path.join(self.env["RECIPE_CACHE_DIR"], self.env["NAME"]),
)
# Create the directory if needed.
if not os.path.exists(destination_path):
try:
os.makedirs(destination_path)
except OSError as err:
raise ProcessorError(
"Can't create %s: %s" % (destination_path, err.strerror)
)
elif self.env.get("purge_destination"):
for entry in os.listdir(destination_path):
path = os.path.join(destination_path, entry)
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.unlink(path)
except OSError as err:
raise ProcessorError("Can't remove %s: %s" % (path, err.strerror))
fmt = self.env.get("archive_format")
if fmt is None:
fmt = self.get_archive_format(archive_path)
if not fmt:
raise ProcessorError(
"Can't guess archive format for filename %s"
% os.path.basename(archive_path)
)
self.output(
"Guessed archive format '%s' from filename %s"
% (fmt, os.path.basename(archive_path))
)
elif fmt not in EXTNS:
raise ProcessorError(
"'%s' is not valid for the 'archive_format' variable. "
"Must be one of %s." % (fmt, ", ".join(EXTNS))
)
if fmt == "zip":
cmd = [
"/usr/bin/ditto",
"--noqtn",
"-x",
"-k",
archive_path,
destination_path,
]
elif fmt == "gzip":
cmd = ["/usr/bin/ditto", "--noqtn", "-x", archive_path, destination_path]
elif fmt == "7z":
cmd_7z = self.env.get("7z_unarchiver_path")
if not cmd_7z:
raise ProcessorError(
"Expected an '7z_unarchiver_path' input variable since file format is 7z!"
)
cmd = [cmd_7z, "x", archive_path, "-o" + destination_path]
elif fmt.startswith("tar"):
cmd = ["/usr/bin/tar", "-x", "-f", archive_path, "-C", destination_path]
if fmt.endswith("gzip"):
cmd.append("-z")
elif fmt.endswith("bzip2"):
cmd.append("-j")
# Call the shell command.
try:
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as proc:
_, stderr = proc.communicate()
except OSError as err:
raise ProcessorError(
"%s execution failed with error code %d: %s"
% (os.path.basename(cmd[0]), err.errno, err.strerror)
)
if proc.returncode != 0:
raise ProcessorError(
"Unarchiving %s with %s failed: %s"
% (archive_path, os.path.basename(cmd[0]), stderr)
)
self.output("Unarchived %s to %s" % (archive_path, destination_path)) | [
57
] |
def METHOD_NAME(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks [email protected] for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x + w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y + h), self.height - 1)
return x1, y1, x2, y2 | [
16702,
24,
9316
] |
def METHOD_NAME(self, mngrs):
self.__attribManagers = list(mngrs) | [
0,
7574,
11849
] |
def METHOD_NAME(self, ctx):
"""No conversion for SnowSQL."""
return None | [
16814,
741,
24,
440
] |
def METHOD_NAME(self, _):
with self.argument_context(consts.EXTENSION_NAME) as c:
c.argument('location',
validator=get_default_location_from_resource_group)
c.argument('name',
options_list=['--name', '-n'],
help='Name of the extension instance')
c.argument('extension_type',
help='Name of the extension type.')
c.argument('cluster_name',
options_list=['--cluster-name', '-c'],
help='Name of the Kubernetes cluster')
c.argument('cluster_type',
arg_type=get_enum_type(['connectedClusters', 'managedClusters', 'appliances', 'provisionedClusters']),
options_list=['--cluster-type', '-t'],
help='Specify Arc clusters or AKS managed clusters or Arc appliances or provisionedClusters.')
c.argument('cluster_resource_provider',
options_list=['--cluster-resource-provider', '--cluster-rp'],
help='Cluster Resource Provider name for this clusterType (Required for provisionedClusters)')
c.argument('scope',
arg_type=get_enum_type(['cluster', 'namespace']),
help='Specify the extension scope.')
c.argument('auto_upgrade_minor_version',
arg_group="Version",
options_list=['--auto-upgrade-minor-version', '--auto-upgrade'],
arg_type=get_three_state_flag(),
help='Automatically upgrade minor version of the extension instance.')
c.argument('version',
arg_group="Version",
help='Specify the version to install for the extension instance if'
' --auto-upgrade-minor-version is not enabled.')
c.argument('release_train',
arg_group="Version",
help='Specify the release train for the extension type.')
c.argument('configuration_settings',
arg_group="Configuration",
options_list=['--configuration-settings', '--config', c.deprecate(target='--config-settings', redirect='--configuration-settings')],
action=AddConfigurationSettings,
nargs='+',
help='Configuration Settings as key=value pair. Repeat parameter for each setting')
c.argument('configuration_protected_settings',
arg_group="Configuration",
options_list=['--config-protected-settings', '--config-protected', c.deprecate(target='--configuration-protected-settings', redirect='--config-protected-settings')],
action=AddConfigurationProtectedSettings,
nargs='+',
help='Configuration Protected Settings as key=value pair. Repeat parameter for each setting')
c.argument('configuration_settings_file',
arg_group="Configuration",
options_list=['--config-settings-file', '--config-file', c.deprecate(target='--configuration-settings-file', redirect='--config-settings-file')],
help='JSON file path for configuration-settings')
c.argument('configuration_protected_settings_file',
arg_group="Configuration",
options_list=['--config-protected-settings-file', '--config-protected-file', c.deprecate(target='--configuration-protected-settings-file', redirect='--config-protected-file')],
help='JSON file path for configuration-protected-settings')
c.argument('release_namespace',
help='Specify the namespace to install the extension release.')
c.argument('target_namespace',
help='Specify the target namespace to install to for the extension instance. This'
' parameter is required if extension scope is set to \'namespace\'')
c.argument('plan_name',
arg_group="Marketplace",
options_list=['--plan-name'],
help='The plan name is referring to the Plan ID of the extension that is being taken from Marketplace portal under Usage Information + Support')
c.argument('plan_product',
arg_group="Marketplace",
options_list=['--plan-product'],
help='The plan product is referring to the Product ID of the extension that is being taken from Marketplace portal under Usage Information + Support. An example of this is the name of the ISV offering used.')
c.argument('plan_publisher',
arg_group="Marketplace",
options_list=['--plan-publisher'],
help='The plan publisher is referring to the Publisher ID of the extension that is being taken from Marketplace portal under Usage Information + Support')
with self.argument_context(f"{consts.EXTENSION_NAME} update") as c:
c.argument('yes',
options_list=['--yes', '-y'],
help='Ignore confirmation prompts')
with self.argument_context(f"{consts.EXTENSION_NAME} delete") as c:
c.argument('yes',
options_list=['--yes', '-y'],
help='Ignore confirmation prompts')
c.argument('force',
help='Specify whether to force delete the extension from the cluster.')
with self.argument_context(f"{consts.EXTENSION_NAME} extension-types list") as c:
c.argument('cluster_name',
options_list=['--cluster-name', '-c'],
help='Name of the Kubernetes cluster')
c.argument('cluster_type',
arg_type=get_enum_type(['connectedClusters', 'managedClusters', 'appliances']),
options_list=['--cluster-type', '-t'],
help='Specify Arc clusters or AKS managed clusters or Arc appliances.')
with self.argument_context(f"{consts.EXTENSION_NAME} extension-types list-by-location") as c:
c.argument('location',
validator=get_default_location_from_resource_group)
with self.argument_context(f"{consts.EXTENSION_NAME} extension-types show") as c:
c.argument('extension_type',
help='Name of the extension type.')
c.argument('cluster_name',
options_list=['--cluster-name', '-c'],
help='Name of the Kubernetes cluster')
c.argument('cluster_type',
arg_type=get_enum_type(['connectedClusters', 'managedClusters', 'appliances']),
options_list=['--cluster-type', '-t'],
help='Specify Arc clusters or AKS managed clusters or Arc appliances.')
with self.argument_context(f"{consts.EXTENSION_NAME} extension-types list-versions") as c:
c.argument('extension_type',
help='Name of the extension type.')
c.argument('location',
validator=get_default_location_from_resource_group) | [
557,
134
] |
def METHOD_NAME(s):
output = ""
for ch in s.replace("\r", "\\r").replace("\n", "\\n").replace("\t", "\\t"):
char = ord(ch)
if not is_printable(char):
ch = "\\x%0.2x" % char
output += ch
return output | [
527,
144,
43,
52
] |
def METHOD_NAME(*group_names):
"""
Requires user membership in at least one of the groups passed in.
"""
def in_groups(u):
if u.is_authenticated:
if u.is_superuser or bool(u.groups.filter(name__in=group_names)):
return True
return False
return user_passes_test(in_groups) | [
846,
984
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return None | [
1519
] |
def METHOD_NAME(self):
self.comp_spin.setEnabled(not self.autoset_ncomp) | [
803,
212
] |
def METHOD_NAME(alpha: dc.float64, imgIn: dc.float64[W, H]):
k = (1.0 - np.exp(-alpha)) * (1.0 - np.exp(-alpha)) / (1.0 + alpha * np.exp(-alpha) - np.exp(2.0 * alpha))
a1 = k
a5 = k
a2 = k * np.exp(-alpha) * (alpha - 1.0)
a6 = k * np.exp(-alpha) * (alpha - 1.0)
a3 = k * np.exp(-alpha) * (alpha + 1.0)
a7 = k * np.exp(-alpha) * (alpha + 1.0)
a4 = -k * np.exp(-2.0 * alpha)
a8 = -k * np.exp(-2.0 * alpha)
b1 = 2.0**(-alpha)
b2 = -np.exp(-2.0 * alpha)
c1 = 1
c2 = 1
y1 = np.empty_like(imgIn)
y1[:, 0] = a1 * imgIn[:, 0]
y1[:, 1] = a1 * imgIn[:, 1] + a2 * imgIn[:, 0] + b1 * y1[:, 0]
for j in range(2, H):
y1[:, j] = (a1 * imgIn[:, j] + a2 * imgIn[:, j - 1] + b1 * y1[:, j - 1] + b2 * y1[:, j - 2])
y2 = np.empty_like(imgIn)
y2[:, -1] = 0.0
y2[:, -2] = a3 * imgIn[:, -1]
for j in range(H - 3, -1, -1):
y2[:, j] = (a3 * imgIn[:, j + 1] + a4 * imgIn[:, j + 2] + b1 * y2[:, j + 1] + b2 * y2[:, j + 2])
imgOut = c1 * (y1 + y2)
y1[0, :] = a5 * imgOut[0, :]
y1[1, :] = a5 * imgOut[1, :] + a6 * imgOut[0, :] + b1 * y1[0, :]
for i in range(2, W):
y1[i, :] = (a5 * imgOut[i, :] + a6 * imgOut[i - 1, :] + b1 * y1[i - 1, :] + b2 * y1[i - 2, :])
y2[-1, :] = 0.0
y2[-2, :] = a7 * imgOut[-1, :]
for i in range(W - 3, -1, -1):
y2[i, :] = (a7 * imgOut[i + 1, :] + a8 * imgOut[i + 2, :] + b1 * y2[i + 1, :] + b2 * y2[i + 2, :])
imgOut[:] = c2 * (y1 + y2)
return imgOut | [
14394,
1885
] |
async def METHOD_NAME(self, search_id):
# Sends a DELETE request to
# https://<server_ip>/api/ariel/searches/<search_id>
# deletes search created earlier.
endpoint = self.endpoint_start + "searches" + '/' + search_id
return await self.client.call_api(endpoint, 'DELETE', timeout=self.timeout) | [
34,
1070
] |
def METHOD_NAME(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger) | [
1284,
390,
2034
] |
def METHOD_NAME(self, filename):
"""Evaluate the given file and returns some evaluation metrics.
Args:
filename (str): A file name that will be evaluated.
Returns:
dict: A dictionary containing evaluation metrics.
"""
load_sess = self.sess
group_preds = []
group_labels = []
for (
batch_data_input,
newsid_list,
data_size,
) in self.iterator.load_data_from_file(filename):
if batch_data_input:
step_pred, step_labels = self.eval(load_sess, batch_data_input)
group_preds.extend(step_pred)
group_labels.extend(step_labels)
res = cal_metric(group_labels, group_preds, self.hparams.pairwise_metrics)
return res | [
22,
1171
] |
def METHOD_NAME(colors):
return {
Token: colors['base0'],
Comment: 'italic ' + colors['base01'],
Comment.Hashbang: colors['base01'],
Comment.Multiline: colors['base01'],
Comment.Preproc: 'noitalic ' + colors['magenta'],
Comment.PreprocFile: 'noitalic ' + colors['base01'],
Keyword: colors['green'],
Keyword.Constant: colors['cyan'],
Keyword.Declaration: colors['cyan'],
Keyword.Namespace: colors['orange'],
Keyword.Type: colors['yellow'],
Operator: colors['base01'],
Operator.Word: colors['green'],
Name.Builtin: colors['blue'],
Name.Builtin.Pseudo: colors['blue'],
Name.Class: colors['blue'],
Name.Constant: colors['blue'],
Name.Decorator: colors['blue'],
Name.Entity: colors['blue'],
Name.Exception: colors['blue'],
Name.Function: colors['blue'],
Name.Function.Magic: colors['blue'],
Name.Label: colors['blue'],
Name.Namespace: colors['blue'],
Name.Tag: colors['blue'],
Name.Variable: colors['blue'],
Name.Variable.Global:colors['blue'],
Name.Variable.Magic: colors['blue'],
String: colors['cyan'],
String.Doc: colors['base01'],
String.Regex: colors['orange'],
Number: colors['cyan'],
Generic: colors['base0'],
Generic.Deleted: colors['red'],
Generic.Emph: 'italic',
Generic.Error: colors['red'],
Generic.Heading: 'bold',
Generic.Subheading: 'underline',
Generic.Inserted: colors['green'],
Generic.Output: colors['base0'],
Generic.Prompt: 'bold ' + colors['blue'],
Generic.Strong: 'bold',
Generic.EmphStrong: 'bold italic',
Generic.Traceback: colors['blue'],
Error: 'bg:' + colors['red'],
} | [
93,
641
] |
def METHOD_NAME(self, cs, name):
container = SearchIndexerDataContainer(name="searchcontainer")
data_source_connection = SearchIndexerDataSourceConnection(
name=name, type="azureblob", connection_string=cs, container=container
)
return data_source_connection | [
129,
365,
1458,
550
] |
def METHOD_NAME(name, arch):
return """int main(int argc, char **argv) | [
2629,
57
] |
def METHOD_NAME(resources=None, languages=None):
"""
Create .po files for new language(s)
"""
if not languages:
print("ERROR: Specify at least one language")
exit(1)
locale_dirs = _get_locale_dirs(None)
errors = []
for name, dir_ in locale_dirs:
# check if langs exists
catalog_langs = sorted([d for d in os.listdir(dir_) if not d.startswith("_") and os.path.isdir(os.path.join(dir_, d))])
if existing_langs := list(set(catalog_langs) & set(languages)):
print(f"ERROR: Lang(s) {existing_langs} just exist.")
exit(1)
_makemessages(languages) | [
238,
1767
] |
def METHOD_NAME():
_test_infinity_value(float("inf"), "float64")
_test_infinity_value(float("-inf"), "float64")
_test_infinity_value(float("inf"), "float32")
_test_infinity_value(float("-inf"), "float32") | [
9,
7037,
99
] |
def METHOD_NAME(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.dict_file = osp.join(self.tmp_dir.name, 'fake_chars.txt')
create_dummy_dict_file(self.dict_file)
self.dict_cfg = dict(
type='Dictionary',
dict_file=self.dict_file,
with_start=True,
with_end=True,
same_start_end=False,
with_padding=False,
with_unknown=False)
# both max_seq_len has been set
with self.assertWarns(Warning):
ABIFuser(
self.dict_cfg,
max_seq_len=10,
vision_decoder=dict(
type='ABIVisionDecoder',
in_channels=2,
num_channels=2,
max_seq_len=5),
language_decoder=dict(
type='ABILanguageDecoder',
d_model=2,
n_head=2,
d_inner=16,
n_layers=1,
max_seq_len=5))
# both dictionaries have been set
with self.assertWarns(Warning):
ABIFuser(
self.dict_cfg,
max_seq_len=10,
vision_decoder=dict(
type='ABIVisionDecoder',
in_channels=2,
num_channels=2,
dictionary=self.dict_cfg),
language_decoder=dict(
type='ABILanguageDecoder',
d_model=2,
n_head=2,
d_inner=16,
n_layers=1,
dictionary=self.dict_cfg)) | [
0,
1
] |
def METHOD_NAME(self):
return self.url | [
125,
274
] |
def METHOD_NAME(self):
"""
Return languages tabs that need to be displayed.
"""
languages = [lang for lang in self.languages
if lang in self.data
or self.instance.has_translation(lang)]
return languages | [
-1,
2539
] |
def METHOD_NAME(gui_config: GuiConfig, name: str) -> list[str]:
for custom_node in gui_config.nodes:
if custom_node.name == name:
return custom_node.services
return [] | [
19,
343,
3186
] |
def METHOD_NAME(self, data: SingleSensorData, *, sampling_rate_hz: float, **_) -> Self:
"""Estimate the position of the sensor based on the provided global frame data.
Parameters
----------
data
Continuous sensor data that includes at least a Acc with all values in the global world frame
sampling_rate_hz
The sampling rate of the data in Hz
Returns
-------
self
The class instance with all result attributes populated
"""
self.data = data
self.sampling_rate_hz = sampling_rate_hz
if not 0.0 <= self.turning_point <= 1.0:
raise ValueError("`turning_point` must be in the rage of 0.0 to 1.0")
is_single_sensor_data(self.data, check_gyr=False, frame="sensor", raise_exception=True)
acc_data = data[SF_ACC].to_numpy()
if self.gravity is not None:
acc_data -= self.gravity
# Add an implicit 0 to the beginning of the acc data
padded_acc = np.pad(acc_data, pad_width=((1, 0), (0, 0)), constant_values=0)
velocity = self._forward_backward_integration(padded_acc)
position_xy = cumtrapz(velocity[:, :2], axis=0, initial=0) / self.sampling_rate_hz
if self.level_assumption is True:
position_z = self._forward_backward_integration(velocity[:, [2]])
else:
position_z = cumtrapz(velocity[:, [2]], axis=0, initial=0) / self.sampling_rate_hz
position = np.hstack((position_xy, position_z))
self.velocity_ = pd.DataFrame(velocity, columns=GF_VEL)
self.velocity_.index.name = "sample"
self.position_ = pd.DataFrame(position, columns=GF_POS)
self.position_.index.name = "sample"
return self | [
918
] |
def METHOD_NAME(self, epoch):
self.virtual_epoch_size = (
self.virtual_epoch_size
if self.virtual_epoch_size is not None
else self.virtual_size
)
if self.virtual_epoch_size > self.virtual_size:
logger.warning(
f"virtual epoch size {self.virtual_epoch_size} "
f"is greater than virtual dataset size {self.virtual_size}"
)
self.virtual_epoch_size = self.virtual_size
self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size)
self._current_epoch_start_index = self._get_epoch_start_index(epoch)
logger.info(
f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}"
) | [
102
] |
def METHOD_NAME(self, bash):
"""a b | with WORDBREAKS -= :"""
output = self._test(bash, "(a b '')", 2, "a b ", 4, arg="-n :")
assert output == ",b" | [
9,
1629
] |
def METHOD_NAME(args):
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
reader = PipelineDataFormat.from_str(
format=format,
output_path=args.output,
input_path=args.input,
column=args.column if args.column else nlp.default_input_names,
overwrite=args.overwrite,
)
return RunCommand(nlp, reader) | [
22,
462,
1155
] |
f METHOD_NAME(self): | [
9,
6837,
41,
3829,
2247
] |
def METHOD_NAME(self):
"""
Raise ClaimLockError if claiming lock failed
"""
if not self._create_lock_dir():
raise ClaimLockError("Failed to claim lock (the lock directory exists at {})".format(self.lock_dir)) | [
625,
894,
180
] |
def METHOD_NAME(
self, api_key=None, stripe_version=None, stripe_account=None, **params
) -> Self:
if not self.has_more:
return self.empty_list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
)
last_id = getattr(self.data[-1], "id")
if not last_id:
raise ValueError(
"Unexpected: element in .data of list object had no id"
)
params_with_filters = self._retrieve_params.copy()
params_with_filters.update({"starting_after": last_id})
params_with_filters.update(params)
result = self.list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
**params_with_filters
)
assert isinstance(result, ListObject)
return result | [
243,
1174
] |
def METHOD_NAME(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [amp * math.cos(2. * math.pi * freq * x) +
1j * amp * math.sin(2. * math.pi * freq * x) for x in t]
return y | [
5136,
1458,
2629
] |
def METHOD_NAME(self):
"""Append the implementation string to the user-agent string.
This adds the the information that you're using CPython 2.7.13 to the
User-Agent.
"""
self._pieces.append(_implementation_tuple())
return self | [
1872,
3212
] |
def METHOD_NAME(name, error_type, error):
el = create_testcase(name)
# encapsulate in CDATA section
error_text = root.createCDATASection("\n%s\n" % error)
failure_el = root.createElement("failure")
failure_el.setAttribute("type", error_type)
el.appendChild(failure_el)
el.lastChild.appendChild(error_text)
return el | [
129,
374
] |
def METHOD_NAME(s3_seekable_obj, s3_client_stub):
s3_client_stub.add_response("head_object", {"ContentLength": 1000})
assert s3_seekable_obj.size == 1000 | [
9,
1318
] |
def METHOD_NAME(self):
return self.emailAddress | [
-1,
85
] |
def METHOD_NAME():
fileorig = options["input"]
filevect = options["output"]
if not filevect:
filevect = basename(fileorig, "txt")
# are we in LatLong location?
s = grass.read_command("g.proj", flags="j")
kv = parse_key_val(s)
if kv["+proj"] != "longlat":
grass.fatal(_("This module only operates in LatLong/WGS84 locations"))
#### setup temporary file
tmpfile = grass.tempfile()
coldescs = [
("RC", "rc integer"),
("UFI", "uf1 integer"),
("UNI", "uni integer"),
("LAT", "lat double precision"),
("LONG", "lon double precision"),
("DMS_LAT", "dms_lat integer"),
("DMS_LONG", "dms_long integer"),
("MGRS", "mgrs varchar(15)"),
("JOG", "jog varchar(7)"),
("FC", "fc varchar(1)"),
("DSG", "dsg varchar(6)"),
("PC", "pc integer"),
("CC1", "cci varchar(255)"),
("ADM1", "adm1 varchar(2)"),
("POP", "pop integer"),
("ELEV", "elev double precision"),
("CC2", "cc2 varchar(255)"),
("NT", "nt varchar(2)"),
("LC", "lc varchar(3)"),
("SHORT_FORM", "shortform varchar(128)"),
("GENERIC", "generic varchar(128)"),
("SORT_NAME_RO", "sortnamero varchar(255)"),
("FULL_NAME_RO", "fullnamero varchar(255)"),
("FULL_NAME_ND_RO", "funamesdro varchar(255)"),
("SORT_NAME_RG", "sortnamerg varchar(255)"),
("FULL_NAME_RG", "fullnamerg varchar(255)"),
("FULL_NAME_ND_RG", "funamesdrg varchar(255)"),
("NOTE", "note varchar(4000)"),
("MODIFY_DATE", "mod_date date"),
("DISPLAY", "display varchar(255)"),
("NAME_RANK", "namerank integer"),
("NAME_LINK", "namelink integer"),
("TRANSL_CD", "translcd varchar(32)"),
("NM_MODIFY_DATE", "nmmodifydate varchar(10)"),
]
colnames = [desc[0] for desc in coldescs]
coltypes = dict([(desc[0], "integer" in desc[1]) for desc in coldescs])
header = None
num_places = 0
inf = open(fileorig)
outf = open(tmpfile, "wb")
for line in inf:
fields = line.rstrip("\r\n").split("\t")
if not header:
header = fields
continue
vars = dict(zip(header, fields))
fields2 = []
for col in colnames:
if col in vars:
if coltypes[col] and vars[col] == "":
fields2.append("0")
else:
fields2.append(vars[col])
else:
if coltypes[col]:
fields2.append("0")
else:
fields2.append("")
line2 = ";".join(fields2) + "\n"
outf.write(line2)
num_places += 1
outf.close()
inf.close()
grass.message(_("Converted %d place names.") % num_places)
# TODO: fix dms_lat,dms_long DDMMSS -> DD:MM:SS
# Solution:
# IN=DDMMSS
# DEG=`echo $IN | cut -b1,2`
# MIN=`echo $IN | cut -b3,4`
# SEC=`echo $IN | cut -b5,6`
# DEG_STR="$DEG:$MIN:$SEC"
# modifications (to match DBF 10 char column name limit):
# short_form -> shortform
# sort_name -> sortname
# full_name -> fullname
# full_name_sd -> funamesd
# pump data into GRASS:
columns = [desc[1] for desc in coldescs]
grass.run_command(
"v.in.ascii",
cat=0,
x=5,
y=4,
sep=";",
input=tmpfile,
output=filevect,
columns=columns,
)
try_remove(tmpfile)
# write cmd history:
vgrass.vector_history(filevect) | [
57
] |
def METHOD_NAME(m2ee):
if not appdynamics_used():
return
if not _is_javaagent_installed():
logging.warning(
"AppDynamics Java Agent isn't installed yet. "
"Please redeploy your application to complete "
"AppDynamics Java Agent installation."
)
return
logging.info("AppDynamics Java Agent env. variables are configured. Starting...")
util.upsert_javaopts(
m2ee,
[
f"-javaagent:{APPDYNAMICS_JAVAAGENT_PATH}",
f"-Dappagent.install.dir={APPDYNAMICS_INSTALL_PATH}",
],
)
_set_default_env(m2ee) | [
86,
200
] |
def METHOD_NAME(self):
response = yield self.web.get(
'graphviz',
{b'tx': self.tx2.hash_hex.encode('utf-8'), b'graph_type': b'funds', b'max_level': b'2'}
)
data = response.written[0]
self.assertIsNotNone(data) | [
9,
11810,
303,
53
] |
def METHOD_NAME(typingctx):
sig = signature(types.int32)
def codegen(context, builder, sig, args):
return nvvmutils.call_sreg(builder, 'warpsize')
return sig, codegen | [
-1
] |
def METHOD_NAME():
"""
"""
# FIXME
src = ['register_test_data/src_label.nii']
dest = ['register_test_data/dest_label_RPI.nii']
step = Paramreg(
step='0',
type='label',
algo='syn',
metric='MeanSquares',
iter='10',
shrink='1',
smooth='0',
laplacian='0',
gradStep='0.5',
deformation='1x1x0',
slicewise='0',
init='',
poly='5',
filter_size=5,
dof='Tx_Ty_Tz_Rx_Ry_Rz_Sz',
smoothWarpXY='2',
pca_eigenratio_th='1.6',
rot_method='pca',
)
cli_params = Param()
cli_params.debug = 2
return src, dest, step, cli_params | [
-1,
365
] |
def METHOD_NAME(kernel_dst_path, configs=None):
"""
Remove entry referring to the upgrade kernel.
We have to ensure there are no duplicit boot entries. Main reason is crash
of zipl when duplicit entries exist.
"""
cmd = [
'/usr/sbin/grubby',
'--remove-kernel', '{0}'.format(kernel_dst_path)
]
try:
if configs:
for config in configs:
run(cmd + ['-c', config])
else:
run(cmd)
except CalledProcessError:
# TODO(pstodulk): instead of this, check whether the entry exists or not
# so no warning of problem is reported (info log could be present if the
# entry is missing.
api.current_logger().warning(
'Could not remove {} entry. May be ignored if the entry did not exist.'.format(kernel_dst_path)
) | [
188,
2228,
738,
1642,
475
] |
def METHOD_NAME(self):
return self._port_to_eeprom_mapping | [
237,
24,
2359,
445
] |
def METHOD_NAME(unprocessed_firm_dict: dict):
event_ids = unprocessed_firm_dict.get('event_ids')
last_event_id = unprocessed_firm_dict.get('last_event_id')
last_processed_event_id = unprocessed_firm_dict.get('last_processed_event_id')
if not last_processed_event_id or pd.isna(last_processed_event_id):
return []
if last_processed_event_id != last_event_id:
last_processed_event_id_index = event_ids.index(last_processed_event_id) + 1
processed_events_ids = event_ids[:last_processed_event_id_index]
return processed_events_ids
# shouldn't get here
return [] | [
19,
5288,
417,
308
] |
def METHOD_NAME(apps, schema_editor):
""" Move objects from NewAnsibleCollectionDeprecated to AnsibleCollectionDeprecated."""
AnsibleCollectionDeprecated = apps.get_model('ansible', 'AnsibleCollectionDeprecated')
NewAnsibleCollectionDeprecated = apps.get_model('ansible', 'NewAnsibleCollectionDeprecated')
RepositoryVersionContentDetails = apps.get_model('core', 'RepositoryVersionContentDetails')
for deprecation in NewAnsibleCollectionDeprecated.objects.all():
AnsibleCollectionDeprecated(
collection_id=deprecation.collection_id,
repository_version_id=deprecation.version_added_id,
).save()
deprecation.version_memberships.all().delete()
RepositoryVersionContentDetails.objects.filter(content_type="ansible.collection_deprecation").delete() | [
2744,
365,
280,
2228,
578,
24,
80
] |
f METHOD_NAME(self): | [
9,
176
] |
def METHOD_NAME(self, icon_score: 'IconScoreBase', func_name: str):
is_func_readonly = getattr(icon_score, '_IconScoreBase__is_func_readonly')
if func_name is not None and is_func_readonly(func_name):
self.func_type = IconScoreFuncType.READONLY
else:
self.func_type = IconScoreFuncType.WRITABLE | [
0,
717,
44,
604,
875,
747
] |
def METHOD_NAME(s):
"""Convert a snippet of HTML to Textile, a simple markup language. See
http://www.textism.com/tools/textile/ for Textile's rules.
>>> html2textile("<h1>Hello world!</h1>")
'h1. Hello world!'
>>> html2textile("<h1>Hello <strong>world</strong>!</h1>")
'h1. Hello *world*!'
>>> html2textile('<h1>Hello <a href="http://www.google.com/">world</a>!</h1>')
'h1. Hello "world":http://www.google.com/!'
>>> html2textile('<img src="http://www.google.com/intl/en/images/logo.gif" \
... width="276" height="110" alt="Google logo">')
'!http://www.google.com/intl/en/images/logo.gif!'
>>> html2textile('<h1>Hello world!</h1><p>Welcome to my home page.</p>')
'h1. Hello world!\\n\\np. Welcome to my home page.'
"""
parser = HtmlToTextileConvertingParser()
parser.feed(s)
parser.close()
return parser.result | [
-1
] |
def METHOD_NAME(filename, instructions):
log_message(f"writing to {filename}...")
with open(filename, "w") as script:
script.write("import {AssemblyInstructionInfo} from '../base.js';\n")
script.write("\n")
script.write("export function getAsmOpcode(opcode: string | undefined): AssemblyInstructionInfo | undefined {\n")
script.write(" if (!opcode) return;\n")
script.write(" switch (opcode.toUpperCase()) {\n")
for inst in instructions.values():
script.write(f" case \"{inst.mnemonic}\":\n")
if inst.mnemonic_2:
script.write(f" case \"{inst.mnemonic_2}\":\n")
script.write(" return {\n")
html = f"{16 * ' '}\"html\": \"<p>"
html += inst.description.replace("\n\n", "</p><p>")
html += "</p>\",\n"
script.write(html)
script.write(f"{16 * ' '}\"tooltip\": \"{inst.name}\",\n")
script.write(f"{16 * ' '}\"url\": \"{FILE}#page={inst.page}\",\n")
script.write(12 * " " + "};\n\n")
script.write(" }\n}") | [
77,
782
] |
def METHOD_NAME(options):
if not options.output:
sys.stderr.write('--output not specified\n')
return -1
if not options.name:
sys.stderr.write('--name not specified\n')
return -1
if not options.tar_input:
sys.stderr.write('--tar_input not specified\n')
return -1
# Read it back in.
with open(options.tar_input, 'rb') as tar_file:
tar_archive = tar_file.read()
# Write CC file.
WriteCCFile(options.output, options.outer_namespace,
options.inner_namespace, options.name, tar_archive)
return 0 | [
93,
1298,
171
] |
def METHOD_NAME(
self,
container_name: str,
prefix: str,
) -> bool:
"""
Checks if content exists at a certain path in an Azure Blob Storage container.
"""
blob_client = self.service_client.get_blob_client(
container_name,
prefix,
)
return blob_client.METHOD_NAME() | [
954
] |
def METHOD_NAME(self):
return self.ids ^ self.ready | [
6600,
620
] |
def METHOD_NAME():
with Image.open("Tests/images/hopper.gif") as im:
original = im.copy()
im.palette.dirty = 1
assert_image_equal(im.convert("RGB"), original.convert("RGB")) | [
9,
1372
] |
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem) | [
297,
365
] |
def METHOD_NAME(tmp_path):
exttree = {
"cool_stuff": {"a": np.array([0, 1, 2], float), "b": np.array([3, 4, 5], float)},
"list_of_stuff": ["foobar", 42, np.array([7, 8, 9], float)],
}
external_path = os.path.join(str(tmp_path), "external.asdf")
ext = asdf.AsdfFile(exttree)
# Since we're testing with small arrays, force all arrays to be stored
# in internal blocks rather than letting some of them be automatically put
# inline.
ext.write_to(external_path, all_array_storage="internal")
external_path = os.path.join(str(tmp_path), "external2.asdf")
ff = asdf.AsdfFile(exttree)
ff.write_to(external_path, all_array_storage="internal")
tree = {
# The special name "data" here must be an array. This is
# included so that such validation can be ignored when we just
# have a "$ref".
"data": {"$ref": "external.asdf#/cool_stuff/a"},
"science_data": {"$ref": "external.asdf#/cool_stuff/a"},
"science_data2": {"$ref": "external2.asdf#/cool_stuff/a"},
"foobar": {
"$ref": "external.asdf#/list_of_stuff/0",
},
"answer": {"$ref": "external.asdf#/list_of_stuff/1"},
"array": {
"$ref": "external.asdf#/list_of_stuff/2",
},
"whole_thing": {"$ref": "external.asdf#"},
"myself": {
"$ref": "#",
},
"internal": {"$ref": "#science_data"},
}
def do_asserts(ff):
assert "unloaded" in repr(ff.tree["science_data"])
assert "unloaded" in str(ff.tree["science_data"])
assert len(ff._external_asdf_by_uri) == 0
assert_array_equal(ff.tree["science_data"], exttree["cool_stuff"]["a"])
assert len(ff._external_asdf_by_uri) == 1
with pytest.raises((ValueError, RuntimeError), match=r"assignment destination is read-only"):
# Assignment destination is readonly
ff.tree["science_data"][0] = 42
assert_array_equal(ff.tree["science_data2"], exttree["cool_stuff"]["a"])
assert len(ff._external_asdf_by_uri) == 2
assert ff.tree["foobar"]() == "foobar"
assert ff.tree["answer"]() == 42
assert_array_equal(ff.tree["array"], exttree["list_of_stuff"][2])
assert_tree_match(ff.tree["whole_thing"](), exttree)
assert_array_equal(ff.tree["whole_thing"]["cool_stuff"]["a"], exttree["cool_stuff"]["a"])
assert_array_equal(ff.tree["myself"]["science_data"], exttree["cool_stuff"]["a"])
# Make sure that referencing oneself doesn't make another call
# to disk.
assert len(ff._external_asdf_by_uri) == 2
assert_array_equal(ff.tree["internal"], exttree["cool_stuff"]["a"])
with asdf.AsdfFile(tree, uri=util.filepath_to_url(os.path.join(str(tmp_path), "main.asdf"))) as ff:
do_asserts(ff)
internal_path = os.path.join(str(tmp_path), "main.asdf")
ff.write_to(internal_path)
with asdf.open(internal_path) as ff:
do_asserts(ff)
with asdf.open(internal_path) as ff:
assert len(ff._external_asdf_by_uri) == 0
ff.resolve_references()
assert len(ff._external_asdf_by_uri) == 2
assert isinstance(ff.tree["data"], ndarray.NDArrayType)
assert isinstance(ff.tree["science_data"], ndarray.NDArrayType)
assert_array_equal(ff.tree["science_data"], exttree["cool_stuff"]["a"])
assert_array_equal(ff.tree["science_data2"], exttree["cool_stuff"]["a"])
assert ff.tree["foobar"] == "foobar"
assert ff.tree["answer"] == 42
assert_array_equal(ff.tree["array"], exttree["list_of_stuff"][2])
assert_tree_match(ff.tree["whole_thing"], exttree)
assert_array_equal(ff.tree["whole_thing"]["cool_stuff"]["a"], exttree["cool_stuff"]["a"])
assert_array_equal(ff.tree["myself"]["science_data"], exttree["cool_stuff"]["a"])
assert_array_equal(ff.tree["internal"], exttree["cool_stuff"]["a"]) | [
9,
751,
272
] |
def METHOD_NAME(output, output_len, targets, targets_len):
output_trans = output.permute(1, 0, 2) # needed by the CTCLoss
METHOD_NAME = F.ctc_loss(output_trans, targets, output_len, targets_len, reduction='none', zero_infinity=True)
METHOD_NAME /= output_len
METHOD_NAME = METHOD_NAME.mean()
return METHOD_NAME | [
1572
] |
def METHOD_NAME(self, request, *args, **kwargs):
api_token = self.get_api_token()
q_param_serializer = SlackChannelListQueryParamSerializer(data=request.GET)
q_param_serializer.is_valid(raise_exception=True)
slack_wrapper = SlackWrapper(api_token=api_token)
channel_data_response = slack_wrapper.get_channels_data(
**q_param_serializer.validated_data
)
serializer = self.get_serializer(channel_data_response)
return Response(serializer.data) | [
245
] |
def METHOD_NAME(self):
with patch.object(self.c.plugin, 'create_charm') as create_charm, \
patch.object(self.c, '_cleanup') as _cleanup:
create_charm.side_effect = Exception
with self.assertRaises(Exception):
self.c.create_charm()
self.assertTrue(_cleanup.called) | [
9,
129,
13715,
168
] |
def METHOD_NAME(batch_size):
train_path = "../../../data/shakespeare/train"
test_path = "../../../data/shakespeare/test"
users, groups, train_data, test_data = read_data(train_path, test_path)
if len(groups) == 0:
groups = [None for _ in users]
train_data_num = 0
test_data_num = 0
train_data_local_dict = dict()
test_data_local_dict = dict()
train_data_local_num_dict = dict()
train_data_global = list()
test_data_global = list()
client_idx = 0
for u, g in zip(users, groups):
user_train_data_num = len(train_data[u]["x"])
user_test_data_num = len(test_data[u]["x"])
train_data_num += user_train_data_num
test_data_num += user_test_data_num
train_data_local_num_dict[client_idx] = user_train_data_num
# transform to batches
train_batch = batch_data(train_data[u], batch_size)
test_batch = batch_data(test_data[u], batch_size)
# index using client index
train_data_local_dict[client_idx] = train_batch
test_data_local_dict[client_idx] = test_batch
train_data_global += train_batch
test_data_global += test_batch
client_idx += 1
client_num = client_idx
output_dim = VOCAB_SIZE
return (
client_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
output_dim,
) | [
557,
2312,
365,
-1
] |
def METHOD_NAME(server):
server.log.info("Forked child, re-executing.") | [
709,
1005
] |
def METHOD_NAME(url, path, auth=None, chunk_size=10240, progress=False):
"""
Download a file. Will write to ``path + '.part'`` while downloading
and rename after successful download to the final name.
Parameters
----------
url: str or url
The URL to download
path: pathlib.Path or str
Where to store the downloaded data.
auth: None or tuple of (username, password) or a request.AuthBase instance.
chunk_size: int
Chunk size for writing the data file, 10 kB by default.
"""
log.info(f"Downloading {url} to {path}")
name = urlparse(url).path.split("/")[-1]
path = Path(path)
part_file = None
with requests.get(url, stream=True, auth=auth, timeout=5) as r:
# make sure the request is successful
r.raise_for_status()
total = float(r.headers.get("Content-Length", float("inf")))
pbar = tqdm(
total=total,
disable=not progress,
unit="B",
unit_scale=True,
desc=f"Downloading {name}",
)
try:
# open a .part file to avoid creating
# a broken file at the intended location
part_file = path.with_suffix(path.suffix + ".part")
part_file.parent.mkdir(parents=True, exist_ok=True)
with part_file.open("wb") as f:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(len(chunk))
except BaseException: # we really want to catch everything here
# cleanup part file if something goes wrong
if part_file is not None and part_file.is_file():
part_file.unlink()
raise
# when successful, move to intended location
part_file.rename(path) | [
136,
171
] |
def METHOD_NAME(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before
the snapshot op considers a previously unfinished snapshot to be stale.
num_reader_threads: Number of threads to parallelize reading from snapshot.
Especially useful if compression is turned on since the decompression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are
read from the snapshot are different from the order they're written.
reader_buffer_size: Maximum number of elements we can prefetch reading from
the snapshot. Defaults to 1. Increasing this might improve performance
but will increase memory consumption.
num_writer_threads: Number of threads to parallelize writing from snapshot.
We'll open up `num_writer_threads` files and write to them in parallel.
Especially useful if compression is turned on since the compression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are
read from the upstream iterator are different from the order they're
written.
writer_buffer_size: Maximum number of pipeline elements to fill up the
buffer before writing them out using `num_writer_threads`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SnapshotDataset(dataset, path, compression, reader_path_prefix,
writer_path_prefix, shard_size_bytes,
pending_snapshot_expiry_seconds, num_reader_threads,
reader_buffer_size, num_writer_threads,
writer_buffer_size)
return _apply_fn | [
394
] |
def METHOD_NAME():
global test_only
parser=argparse.ArgumentParser(description="Manage configlets for CUD (Update & Delete")
parser.add_argument("-j", "--json", help="json file that contains configlet", action='append')
parser.add_argument("-t", "--test", help="Test only", action='store_true', default=False)
parser.add_argument("-p", "--parse", help="Parse JSON only", action='store_true', default=False)
parser.add_argument("-u", "--update", help="Apply the JSON as update", action='store_true', default=False)
parser.add_argument("-d", "--delete", help="Apply the JSON as delete", action='store_true', default=False)
args = parser.parse_args()
test_only = args.test
parse_only = args.parse
do_update = args.update
do_delete = args.delete
do_act = test_only | parse_only | do_update | do_delete
if not do_act:
print("Expect an action update/delete or for debug parse/test\n")
parser.print_help()
sys.exit(-1)
for json_file in args.json:
with open(json_file, 'r') as stream:
data = json.load(stream)
if parse_only == False:
for i in data:
process_entry (do_update, i)
else:
print("Parsed:")
print(data) | [
57
] |
def METHOD_NAME(key, value=None, stack_offset=1, deferred=False, prefix=""):
return _mlperf_print(key=key, value=value, benchmark=TRANSFORMER,
stack_offset=stack_offset, tag_set=TRANSFORMER_TAG_SET,
deferred=deferred, root_dir=ROOT_DIR_TRANSFORMER,
prefix=prefix) | [
1052,
38
] |
def METHOD_NAME(panel_data) -> None:
with pytest.raises(ValueError):
ClusteredCovariance(
panel_data.y,
panel_data.x,
panel_data.params,
panel_data.entity_ids,
panel_data.time_ids,
extra_df=0,
clusters=panel_data.cluster5,
)
with pytest.raises(ValueError):
ClusteredCovariance(
panel_data.y,
panel_data.x,
panel_data.params,
panel_data.entity_ids,
panel_data.time_ids,
extra_df=0,
clusters=panel_data.cluster4[::2],
) | [
9,
9266,
8649,
168
] |
async def METHOD_NAME(mock_iam_client):
policy = await get_managed_policy(mock_iam_client, EXAMPLE_POLICY_ARN)
assert policy["PolicyName"] == EXAMPLE_MANAGED_POLICY_NAME | [
9,
19,
3627,
54
] |
def METHOD_NAME(self, key, element, ranges, **kwargs):
layout = super().METHOD_NAME(key, element, ranges)
gridded = element.gridded
xdim, ydim = gridded.dimensions()[:2]
if self.invert_axes:
xaxis, yaxis = ('yaxis', 'xaxis')
else:
xaxis, yaxis = ('xaxis', 'yaxis')
shape = gridded.interface.shape(gridded, gridded=True)
xtype = gridded.interface.dtype(gridded, xdim)
if xtype.kind in 'SUO':
layout[xaxis]['tickvals'] = np.arange(shape[1])
layout[xaxis]['ticktext'] = gridded.dimension_values(0, expanded=False)
ytype = gridded.interface.dtype(gridded, ydim)
if ytype.kind in 'SUO':
layout[yaxis]['tickvals'] = np.arange(shape[0])
layout[yaxis]['ticktext'] = gridded.dimension_values(1, expanded=False)
return layout | [
176,
571
] |
def METHOD_NAME(self):
# Configure the XQueue stub response to any submission to the test queue
response_content = {'test_response': 'test_content'}
self.server.config['This is only a test.'] = response_content
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'This is only a test.'})
)
# Check that we receive the response we configured
self._check_grade_response(callback_url, expected_header, json.dumps(response_content)) | [
9,
111,
3303,
17
] |
def METHOD_NAME(scr_env, nodelist):
# if SCR_MIN_NODES is set, use that
num_needed = os.environ.get('SCR_MIN_NODES')
if num_needed is None or int(num_needed) <= 0:
# otherwise, use value in nodes file if one exists
num_needed = scr_env.get_runnode_count()
if num_needed <= 0:
# otherwise, assume we need all nodes in the allocation
num_needed = scr_glob_hosts(count=True,
hosts=nodelist,
resmgr=scr_env.resmgr)
if num_needed is None:
# failed all methods to estimate the minimum number of nodes
return 0
return int(num_needed) | [
480,
2002
] |
async def METHOD_NAME():
class Service(LoopService):
state = []
loop_seconds = 0
async def _on_start(self):
self.state.append("_on_start")
await super()._on_start()
async def run_once(self):
pass
async def _on_stop(self):
self.state.append("_on_stop")
await super()._on_stop()
service = Service()
await service.start(loops=3)
assert service.state == ["_on_start", "_on_stop"] | [
9,
1751,
549,
1929,
69,
447,
69
] |
def METHOD_NAME(cls):
config = cls.wrap(commcare_build_config)
config._id = config._ID
config.save()
return config | [
904
] |
def METHOD_NAME(orientation, node_layout, link_orientation):
grid = HexModelGrid((5, 5), orientation=orientation, node_layout=node_layout)
links = grid.orientation_of_link == link_orientation
assert np.all(
np.isclose(
grid.angle_of_link[links],
ORIENTATION_ANGLE[link_orientation] * np.pi / 180.0,
)
) | [
9,
548,
5354
] |
def METHOD_NAME():
"""Datacite client calls assertion helper."""
assert dc_mock().metadata_post.call_count == 0
assert dc_mock().doi_post.call_count == 0 | [
638,
13306,
1929,
41,
1038,
365
] |
def METHOD_NAME(self) -> Dict[str, object]:
"""
A `dict` representing this QMP message.
Generated on-demand, if required. This property is private
because it returns an object that could be used to invalidate
the internal state of the `Message` object.
"""
if self._obj is None:
self._obj = self._deserialize(self._data or b'{}')
return self._obj | [
279
] |
def METHOD_NAME(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps) | [
80,
186
] |
def METHOD_NAME(output_folder):
pipeline_client = PipelinesClient(base_url='https://dev.azure.com/azure-sdk',
creds=BasicAuthentication(os.getenv('PIPELINE_TOKEN'), ''))
pipelines = pipeline_client.list_pipelines(project='internal')
for pipeline in pipelines:
if re.findall('^python - \w*$', pipeline.name):
key = pipeline.name.replace('python - ', '')
if key == output_folder:
pipeline_url = 'https://dev.azure.com/azure-sdk/internal/_build?definitionId={}'.format(pipeline.id)
return pipeline_url
else:
_LOG.info('Cannot find definitionId, Do not display pipeline_url')
return '' | [
19,
440,
586,
1148
] |
def METHOD_NAME(self, soup: BeautifulSoup) -> str:
tag = soup.select_one(".fic_image img")
assert tag
if tag.has_attr("data-src"):
return self.absolute_url(tag["data-src"])
elif tag.has_attr("src"):
return self.absolute_url(tag["src"]) | [
214,
3866
] |
def METHOD_NAME(target, worker, activation, profile="default", tgt_type="glob"):
"""
Check if the worker is in `activation` state in the targeted load balancers
The function will return the following dictionary:
result - False if no server returned from the published command
errors - list of servers that couldn't find the worker
wrong_state - list of servers that the worker was in the wrong state
(not activation)
"""
ret = {
"result": True,
"errors": [],
"wrong_state": [],
}
args = [worker, profile]
status = __salt__["publish.publish"](target, "modjk.worker_status", args, tgt_type)
# Did we got any respone from someone ?
if not status:
ret["result"] = False
return ret
# Search for errors & status
for balancer in status:
if not status[balancer]:
ret["errors"].append(balancer)
elif status[balancer]["activation"] != activation:
ret["wrong_state"].append(balancer)
return ret | [
1794,
452
] |
def METHOD_NAME(footprint):
"""
ALMA footprints have the form:
'Polygon ICRS 266.519781 -28.724666 266.524678 -28.731930 266.536683
-28.737784 266.543860 -28.737586 266.549277 -28.733370 266.558133
-28.729545 266.560136 -28.724666 266.558845 -28.719605 266.560133
-28.694332 266.555234 -28.687069 266.543232 -28.681216 266.536058
-28.681414 266.530644 -28.685630 266.521788 -28.689453 266.519784
-28.694332 266.521332 -28.699778'
Some of them have *additional* polygons
"""
if footprint[:7] != 'Polygon' and footprint[:6] != 'Circle':
raise ValueError("Unrecognized footprint type")
try:
import regions
except ImportError:
print('Could not import `regions`, which is required for the '
'functionality of this function.')
raise
reglist = []
meta = {'source': 1, 'include': 1, 'fixed': 0, 'text': ''}
visual = {'color': 'green', 'dash': '0', 'dashlist': '8 3',
'font': '"helvetica 10 normal roman"', 'width': '1'}
entries = footprint.split()
if entries[0] == 'Circle':
center = SkyCoord(float(entries[2]), float(entries[3]), frame='icrs', unit=(u.deg, u.deg))
reg = regions.CircleSkyRegion(center, radius=float(entries[4])*u.deg,
meta=meta, visual=visual)
reglist.append(reg)
else:
polygons = [index for index, entry in enumerate(entries) if entry == 'Polygon']
for start, stop in zip(polygons, polygons[1:]+[len(entries)]):
start += 1
ra = [float(x) for x in entries[start+1:stop:2]]*u.deg
dec = [float(x) for x in entries[start+2:stop:2]]*u.deg
vertices = SkyCoord(ra, dec, frame='icrs')
reg = regions.PolygonSkyRegion(vertices=vertices, meta=meta, visual=visual)
reglist.append(reg)
return reglist | [
9241,
24,
739
] |
def METHOD_NAME(self, language, country):
conf = address_config.get_property('components', language, country=country)
for component, value in six.iteritems(conf):
if component == 'combinations':
continue
total_prob = 0.0
for k, v in six.iteritems(value):
if k.endswith('probability'):
total_prob += v
self.assertTrue(isclose(total_prob, 1.0), six.u('language: {}, country: {}, component: {}'.format(language, country, component))) | [
250,
811
] |
def METHOD_NAME(test_name, cloud_provider, rke_client, kubectl):
"""
Providing address, hostname override(non-resolvable) and internal address
"""
rke_template = 'cluster_install_config_7.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to non-resolvable name for hostname_override
index = 0
for node in nodes:
node.node_name = "{0}-{1}".format(test_name, index)
index += 1
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True) | [
9,
428,
200,
3141
] |
def METHOD_NAME(self) -> None:
self._name: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._visibility: Attribute[str] = NotSet
self._selected_repositories: Attribute[PaginatedList[Repository]] = NotSet
self._selected_repositories_url: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet | [
176,
177
] |
def METHOD_NAME():
refcase_file = "CEST_PAS_UNE_REFCASE"
refcase_file_content = """ | [
9,
4700,
200,
216,
69,
256,
-1
] |
def METHOD_NAME():
t = _get_time()
dt = t - t + 0.1234556 * u.s
dty = load(dump(dt))
assert type(dt) is type(dty)
for attr in ("shape", "jd1", "jd2", "format", "scale"):
assert np.all(getattr(dt, attr) == getattr(dty, attr)) | [
9,
8227
] |
def METHOD_NAME(self) -> None:
"""Initialize layers of the head."""
self._init_reg_convs()
self._init_predictor() | [
176,
2315
] |
def METHOD_NAME(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads kaggle datasets. Generates if not stored already.
The Kaggle dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ma, Junshui, et al. "Deep neural nets as a method for quantitative structure–activity relationships." Journal of chemical information and modeling 55.2 (2015): 263-274.
It contains 100,000 unique Merck in-house compounds that were
measured on 15 enzyme inhibition and ADME/TOX datasets.
Unlike most of the other datasets featured in MoleculeNet,
the Kaggle collection does not have structures for the
compounds tested since they were proprietary Merck compounds.
However, the collection does feature pre-computed descriptors
for these compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
KAGGLE_tasks = [
'3A4', 'CB1', 'DPP4', 'HIVINT', 'HIV_PROT', 'LOGD', 'METAB', 'NK1',
'OX1', 'OX2', 'PGP', 'PPB', 'RAT_F', 'TDI', 'THROMBIN'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "kaggle")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = \
gen_kaggle(KAGGLE_tasks, train_dir, valid_dir, test_dir, data_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return KAGGLE_tasks, (train_dataset, valid_dataset,
test_dataset), transformers | [
557,
11503
] |
def METHOD_NAME(self):
self.assertTrue(jpUtils.isKanaCharacter("ア")) | [
9,
137,
-1,
5859
] |
def METHOD_NAME(self):
"""
Return a clone ("deep copy") of this node.
:return: new AST node instance
:rtype: ASTInputBlock
"""
input_definitions_dup = [input_definition.METHOD_NAME() for input_definition in self.input_definitions]
dup = ASTInputBlock(input_definitions=input_definitions_dup,
# ASTNode common attributes:
source_position=self.source_position,
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
in_comment=self.in_comment,
implicit_conversion_factor=self.implicit_conversion_factor)
return dup | [
670
] |
def METHOD_NAME(argv):
# argument parsing exception handling
q_help = "(Optional) Scalar quantities to calculate from vector fields"
q_help += " (N: magnitude,"
q_help += " n: normal component,"
q_help += " t: tangent component,"
q_help += " a: angle between vector and surface normal)"
q_help += " Default: calculate all quantities"
def allowed_fields(s):
for ch in s:
if ch not in "Nnta":
raise argparse.ArgumentTypeError(q_help)
return s
parser = argparse.ArgumentParser(prog="msh2cortex",
description="Interpolates fields from the gray"
" matter volume to a cortical surface located between the gray"
" and white matter surfaces."
" Outputs freesurfer overlay files or gifti files")
parser.add_argument("-i", '--in', dest='fn_in', required=True,
help="Input mesh with simulation results")
parser.add_argument('-m', "--m2mpath", dest='m2mpath', required=True,
help="path to m2m_{subjectID} directory, created in the "
"segmentation")
parser.add_argument('-o', '--out_folder', dest='out_folder', required=True,
help="Folder where output files will be saved")
parser.add_argument('-d', '--depth', dest='depth', required=False, type=float,
help="(Optional) Depth where the field is to be interpolated."
" 0 means at the gray matter surface, 1 at the white matter"
" surface. Default: 0.5. This argument is only used if the"
" head mesh was generated with mri2mesh", default=0.5)
parser.add_argument('-f', '--fsaverage_folder', dest='fsaverage_folder',
required=False,
help="(Optional) Folder where output files in fsaverage space will"
" be saved. If not set, the fields will not be transformed to "
" FsAverage",
default=None)
parser.add_argument('--quantities', type=allowed_fields, required=False,
help=q_help, default='Nnta')
parser.add_argument('--open-in-gmsh', action='store_true',
help="(Optional) If set, opens a gmsh window with the overlays after"
" performing the transformations")
parser.add_argument('--version', action='version', version=__version__)
return parser.parse_args(argv) | [
214,
134
] |
def METHOD_NAME(
mock_get_instance_config,
mock_send_sensu_event,
mock_get_services_for_cluster,
mock_latest_oom_events,
scribereader_output,
):
mock_get_services_for_cluster.return_value = [
("fake_service1", "fake_instance1"),
("fake_service2", "fake_instance2"),
("fake_service3", "fake_instance3"),
]
main(["", "-s", "some_superregion", "-d", "soa_dir", "--check-interval", "3"])
assert mock_send_sensu_event.call_count == 3
mock_latest_oom_events.assert_called_once_with(
cluster="fake_cluster",
superregion="some_superregion",
interval=180,
) | [
9,
57
] |
def METHOD_NAME(__data: ReadableBuffer, __errors: str | None = None, __mapping: _CharMap | None = None) -> tuple[str, int]: ... | [
5304,
1268
] |
def METHOD_NAME(self, user=None):
return self.get_link(kind="unfollow", user=user) | [
19,
7885,
548
] |
f METHOD_NAME(self): | [
0,
1228
] |
def METHOD_NAME(value: int) -> int: ... | [
0,
4775
] |
def METHOD_NAME(self, context):
self.inputs.new('SvVerticesSocket', "ControlPoints")
self.inputs.new('SvStringsSocket', "Weights")
self.inputs.new('SvStringsSocket', "Knots")
self.inputs.new('SvStringsSocket', "Degree").prop_name = 'degree'
self.outputs.new('SvCurveSocket', "Curve")
self.outputs.new('SvStringsSocket', "Knots")
self.update_sockets(context) | [
2153,
176
] |
def METHOD_NAME(self, key, default=None):
"""Pop item from info by key."""
with self:
popped = self._info.METHOD_NAME(key, default)
return popped | [
760
] |
f METHOD_NAME(self): | [
9,
265
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.