text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(directory):
"""
Function to find DICOM files in a given folder.
:param directory: File path of folder to search.
:return: List of file paths of DICOM files in given folder.
"""
dicom_files = []
# Walk through directory
for root, dirs, files in os.walk(directory, topdown=True):
for name in files:
# Attempt to open file as a DICOM file
try:
dcmread(os.path.join(root, name))
except (InvalidDicomError, FileNotFoundError):
pass
else:
dicom_files.append(os.path.join(root, name))
return dicom_files | [
19,
7087,
1537
] |
def METHOD_NAME(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise | [
137,
919,
9522
] |
def METHOD_NAME(self):
for option in self.user_options:
setattr(self, option[0].strip('=').replace('-', '_'), None)
option_dict = self.distribution.get_option_dict(self.package_type)
# This is a hack, we probably aren't supposed to loop through
# the option_dict so early because distutils does exactly the
# same thing later to check that we support the
# options. However, it works...
for (option, (source, value)) in option_dict.items():
setattr(self, option, str(value)) | [
15,
1881
] |
def METHOD_NAME(self):
"""
Instantiate the memory controller and bind it to
the current interface.
"""
METHOD_NAME = MemCtrl()
METHOD_NAME.dram = self
return METHOD_NAME | [
2951
] |
def METHOD_NAME(group_id):
group_id = str(group_id)
key = normalize_cache_key(group_id, GROUP_ID_CACHE_PREFIX)
cached_group_name = cache.get(key)
if cached_group_name:
return cached_group_name
group = ccnet_api.get_group(int(group_id))
if not group:
return ''
group_name = group.group_name
cache.set(key, group_name, GROUP_ID_CACHE_TIMEOUT)
return group_name | [
846,
147,
24,
156
] |
def METHOD_NAME(params, mount_point, client):
csr = params.get('csr')
extra_params = params.get('extra_params')
role = params.get('role').strip('/')
# check if role exists
try:
current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data')
except Exception:
current_state = {}
if not current_state:
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_verbatim(csr=csr, name=role, extra_params=extra_params,
mount_point=mount_point).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result | [
13006
] |
def METHOD_NAME(accounts, sovryn, loanToken, SUSD, WRBTC, priceFeeds, chain, SOV, FeesEvents):
margin_trading_sending_loan_tokens(accounts, sovryn, loanToken, 1e19, SUSD, WRBTC, priceFeeds, chain, False)
margin_trading_sov_reward_payment(accounts, loanToken, 1e19, SUSD, WRBTC, chain, SOV, FeesEvents) | [
9,
9193,
8957,
9128,
16026,
1735
] |
def METHOD_NAME(self) -> None:
"""Toggle the value of the widget when called as an action.
This would normally be used for a keyboard binding.
"""
self.toggle() | [
1006,
766
] |
def METHOD_NAME(self):
"""Search for possible RMA refunds and link them to the delivery note. We
don't want to link their delivery note lines as that would unbalance the
qtys to invoice wich isn't correct for this case"""
super().METHOD_NAME()
for delivery_note in self:
refunds = delivery_note.sudo().rma_ids.mapped("refund_id")
if not refunds:
continue
delivery_note.write(
{
"invoice_ids": delivery_note.invoice_ids + refunds,
"invoice_count": len(delivery_note.invoice_ids),
}
) | [
19,
-1
] |
def METHOD_NAME():
tc = MagneticOperation.from_linear_translation_time_reversal(
translation=np.array([0, 0, 1 / 2]), time_reversal=True
)
tc2 = tc * tc
assert tc2 == MagneticOperation.from_linear_translation_time_reversal(
translation=np.array([0, 0, 1])
)
# Translation by (0, 0, -1/2) is equivalent to that by (0, 0, 1/2)
assert remainder1_symmetry_operation(tc.inverse()) == tc | [
9,
6919,
710
] |
def METHOD_NAME(self, extracted, create, **kwargs):
if not create:
return
if extracted:
for indicator in extracted:
self.METHOD_NAME.add(indicator) | [
2118
] |
async def METHOD_NAME(self, index, body=None, params=None, headers=None):
"""
Adds a policy to an index. This operation does not change the policy if the index already has one.
:arg index: The name of the index to add policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "add", index),
params=params,
headers=headers,
body=body,
) | [
238,
54
] |
def METHOD_NAME(local_param: ColoParameter) -> int:
"""_tensor_numel
Get the number of elements of a tensor.
Args:
local_param (ColoParameter): The local parameter.
strict_ddp_flag (bool): whether to enable the strict ddp mode.
Returns:
int: the number of elements.
"""
# TODO(ver217): support dtensor here
return local_param.numel() | [
768,
12513
] |
def METHOD_NAME():
sun_json = get_data()
sunrise = sun_json['results']['sunrise']
sunrise_obj.parse_api(sunrise)
sunset = sun_json['results']['sunset']
sunset_obj.parse_api(sunset) | [
19,
4447
] |
def METHOD_NAME(self) -> str:
"""
The provisioning state of the packet core data plane resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME(self, analysis_config):
with pytest.raises(CuckooOperationalError):
analysis_config.get("foo") | [
9,
19,
1335,
130,
622
] |
def METHOD_NAME(self, load_avg_mock, cpu_count_mock):
cpu_count_mock.return_value = 8
load_avg_mock.return_value = (5.65, 0.85, 0.9)
module = build_module()
module.update()
assert module.state(widget(module)) == 'warning' | [
9,
3437,
551,
41,
1629,
938
] |
def METHOD_NAME(style_name):
if style_name.startswith('Italic'):
pass
elif 'Italic' in style_name:
style_name = style_name.replace('Italic', ' Italic')
return style_name | [
989,
-1,
156
] |
def METHOD_NAME(self, input):
# the output shape is calculated using the formula stated
# at https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose1d.html
l_in = input.shape[-1]
c_out = self.out_channels
l_out = math.floor((l_in - 1) * self.stride[0] - 2 * self.padding[0] + self.dilation[0] *
(self.kernel_size[0] - 1) + self.output_padding[0] + 1)
result_shape = input.shape[:-2] + (
c_out,
l_out,
)
return torch.empty(result_shape, device='meta') | [
3296,
7883,
-1
] |
def METHOD_NAME(self):
data = Table.from_numpy(None, [[1, 2, 3, 1, 1, 1],
[1, 2, 3, 1, np.nan, 1],
[1, 2, 3, 1, 1, np.nan]])
i = Integrate(methods=Integrate.Baseline, limits=[[0, 5]])(data)
self.assertEqual(i[0][0], 3)
self.assertEqual(i[1][0], 3)
self.assertEqual(i[2][0], 3)
np.testing.assert_equal(i.domain[0].compute_value.baseline(data)[1], 1) | [
9,
980
] |
def METHOD_NAME(self):
try:
if self.has_property(self.__IMAGE_URL_PROPERTY__):
img_url = self.get_property(self.__IMAGE_URL_PROPERTY__)
if len(img_url) == 0:
return None
f = urlopen(img_url)
img = f.read()
return img
except Exception:
print((sys.exc_info()[1])) | [
1047,
660
] |
def METHOD_NAME():
"""Test running all the algorithms."""
bound_ids = msaf.io.get_all_boundary_algorithms()
label_ids = msaf.io.get_all_label_algorithms()
# Add ground truth to boundary id
bound_ids += ["gt"]
# Add None to labels
label_ids += [None]
# Config params
feature = "pcp"
annot_beats = False
framesync = False
file_struct = msaf.io.FileStruct(audio_file)
file_struct.features_file = msaf.config.features_tmp_file
# Running all algorithms on a file that is too short
for bound_id in bound_ids:
for label_id in label_ids:
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = False
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
assert len(est_times) == 2
assert len(est_labels) == 1
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Commpute and save features for long audio file
file_struct = msaf.io.FileStruct(long_audio_file)
file_struct.features_file = msaf.config.features_tmp_file
def _test_run_msaf(bound_id, label_id, hier=False):
print("bound_id: %s,\tlabel_id: %s" % (bound_id, label_id))
config = msaf.io.get_configuration(feature, annot_beats, framesync,
bound_id, label_id)
config["hier"] = hier
config["features"] = Features.select_features(
feature, file_struct, annot_beats, framesync)
est_times, est_labels = msaf.run.run_algorithms(
file_struct, bound_id, label_id, config)
# Take the first level if hierarchy algorithm
if hier:
est_times = est_times[0]
est_labels = est_labels[0]
npt.assert_almost_equal(est_times[0], 0.0, decimal=2)
assert len(est_times) - 1 == len(est_labels)
npt.assert_almost_equal(est_times[-1], config["features"].dur,
decimal=2)
# Running all boundary algorithms on a relatively long file
# Combining boundaries with labels
for bound_id in bound_ids:
if bound_id == "gt":
continue
for label_id in label_ids:
_test_run_msaf(bound_id, label_id, False)
# Test the hierarchical algorithms
hier_ids = ["olda", "scluster"]
for hier_bounds_id in hier_ids:
for hier_labels_id in hier_ids:
if hier_labels_id == "olda":
hier_labels_id = "fmc2d"
_test_run_msaf(hier_bounds_id, hier_labels_id, True) | [
9,
22,
12345
] |
def METHOD_NAME(
self, image, root_dir, config, *args, generator_dir=None, generator_args=None
):
"""Executes artman command in the artman container.
Args:
image:
The Docker image for artman.
root_dir:
The input directory that will be mounted to artman docker
container as local googleapis directory.
config:
Path to artman configuration YAML file.
*args:
Arguments to artman that follow ``generate``. Defines which
artifacts to generate.
generator_dir (Optional[str]):
Path to local gapic-generator directory to use for generation.
By default, the latest version of gapic-generator will be used.
generator_args (Optional[List[str]]):
Additional arguments to pass to the gapic generator, such as
``--dev_samples``.
Returns:
The output directory with artman-generated files.
"""
container_name = "artman-docker"
output_dir = root_dir / "artman-genfiles"
additional_flags = []
if generator_args:
additional_flags.append(
"--generator-args='{}'".format(" ".join(generator_args))
)
docker_cmd = ["docker", "run", "--name", container_name, "--rm", "-i"]
# Environment variables
docker_cmd.extend(
[
"-e",
f"HOST_USER_ID={os.getuid()}",
"-e",
f"HOST_GROUP_ID={os.getgid()}",
"-e",
"RUNNING_IN_ARTMAN_DOCKER=True",
]
)
# Local directories to mount as volumes (and set working directory -w)
docker_cmd.extend(
[
"-v",
f"{root_dir}:{root_dir}",
"-v",
f"{output_dir}:{output_dir}",
"-w",
root_dir,
]
)
# Use local copy of GAPIC generator to generate, if path provided
if generator_dir:
docker_cmd.extend(["-v", f"{generator_dir}:/toolkit"])
# Run /bin/bash in the image and then provide the shell command to run
docker_cmd.extend([image, "/bin/bash", "-c"])
artman_command = " ".join(
map(
str,
["artman", "--local", "--config", config]
+ additional_flags
+ ["generate"]
+ list(args),
)
)
cmd = docker_cmd + [artman_command]
shell.METHOD_NAME(cmd, cwd=root_dir)
return output_dir | [
22
] |
def METHOD_NAME(session, url, compiler, options, source, filters):
r = session.post(
url + "api/compiler/" + compiler + "/compile",
json={
"source": source,
"options": options,
"filters": {key: True for key in filters},
},
headers={"Accept": "application/json"},
timeout=_TIMEOUT,
)
r.raise_for_status()
def fixup(obj):
try:
if "text" in obj:
obj["text"] = re.sub(r"/tmp/compiler-explorer-[^/]+", "/tmp", obj["text"])
return obj
except:
print("Issues with obj '{}'".format(obj))
raise
result = r.json()
if "asm" not in result:
result["asm"] = []
result["asm"] = [fixup(obj) for obj in result["asm"]]
return result | [
19
] |
def METHOD_NAME():
return [MathesarColumn(col_name, **DEFAULT_COLUMNS[col_name]) for col_name in DEFAULT_COLUMNS] | [
19,
235,
11243,
105,
245
] |
def METHOD_NAME(self):
return self.minimumSize() | [
1318,
3711
] |
f METHOD_NAME(res: reslib.Resource, indent: str) -> None: | [
4716
] |
def METHOD_NAME(self, group, name):
"""Returns the storage account name and key in a tuple"""
return name, self.get_account_key(group, name) | [
19,
598,
100
] |
def METHOD_NAME(name, urlString, tmpDir, quiet=False, force_clean=True):
if not quiet:
print("Downloading %s" % urlString)
startTime = time.time()
fileName = '%s/%s' % (tmpDir, name)
if not force_clean and os.path.exists(fileName):
if not quiet and fileName.find('.asc') == -1:
print(' already done: %.1f MB' % (os.path.getsize(fileName)/1024./1024.))
return
try:
attemptDownload(urlString, fileName)
except Exception as e:
print('Retrying download of url %s after exception: %s' % (urlString, e))
try:
attemptDownload(urlString, fileName)
except Exception as e:
raise RuntimeError('failed to download url "%s"' % urlString) from e
if not quiet and fileName.find('.asc') == -1:
t = time.time()-startTime
sizeMB = os.path.getsize(fileName)/1024./1024.
print(' %.1f MB in %.2f sec (%.1f MB/sec)' % (sizeMB, t, sizeMB/t)) | [
136
] |
def METHOD_NAME(_):
run_training() | [
57
] |
def METHOD_NAME(
data: dataBlob,
strategy_name: str,
optimal_positions: optimalPositions,
actual_positions: dict,
) -> listOfOrders:
upper_positions = optimal_positions.upper_positions
list_of_instruments = upper_positions.keys()
trade_list = [
trade_given_optimal_and_actual_positions(
data, strategy_name, instrument_code, optimal_positions, actual_positions
)
for instrument_code in list_of_instruments
]
trade_list = listOfOrders(trade_list)
return trade_list | [
245,
47,
9225,
1393,
7726,
61,
3928
] |
def METHOD_NAME(self, **kwargs): ... | [
245,
861
] |
def METHOD_NAME(self):
if self.container_rid not in self.svc.resources_by_id:
raise ex.Error("rid %s not found" % self.container_rid)
return self.svc.resources_by_id[self.container_rid] | [
224
] |
def METHOD_NAME(png_stack_dir: str, monkeypatch):
with h5py.File(os.path.join(tempfile.mkdtemp(), "myproj.ilp"), "w") as project_file:
info = FilesystemDatasetInfo(
filePath=os.path.join(png_stack_dir, "*.png"), project_file=project_file, sequence_axis="z"
)
assert not info.is_under_project_file() | [
9,
654,
1821,
3336,
1646,
155,
171
] |
async def METHOD_NAME():
"""get_token should raise CredentialUnavailableError for incomplete configuration."""
with mock.patch.dict(ENVIRON, {}, clear=True):
with pytest.raises(CredentialUnavailableError) as ex:
await EnvironmentCredential().get_token("scope")
for a, b in itertools.combinations(ALL_VARIABLES, 2): # all credentials require at least 3 variables set
with mock.patch.dict(ENVIRON, {a: "a", b: "b"}, clear=True):
with pytest.raises(CredentialUnavailableError) as ex:
await EnvironmentCredential().get_token("scope") | [
9,
6600,
830
] |
def METHOD_NAME(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2)) | [
718,
4769
] |
def METHOD_NAME(self, srv):
self.bin_type = srv.target_name
if self.last_image is not None:
response = self.find_single_bin(np.copy(self.last_image), srv.target_name)
if response is False or response is None:
rospy.loginfo("did not find")
resp = VisionRequest2DResponse(
header=mil_ros_tools.make_header(frame="/down"),
found=False,
)
else:
# Fill in
center, radius = response
resp = VisionRequest2DResponse(
header=Header(stamp=self.last_image_time, frame_id="/down"),
pose=Pose2D(x=center[0], y=center[1], theta=radius),
max_x=self.last_image.shape[0],
max_y=self.last_image.shape[1],
camera_info=self.image_sub.camera_info,
found=True,
)
return resp | [
377,
762
] |
def METHOD_NAME(self):
self.cpp_info.libs = ["tins"]
self.cpp_info.set_property("pkg_config_name", "libtins")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.defines.append("TINS_STATIC")
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi"]) | [
360,
100
] |
def METHOD_NAME(self,vp,mode):
return | [
4132,
2004
] |
def METHOD_NAME(event):
if event == 0: # CTRL_C_EVENT
action()
# Typical C implementations would return 1 to indicate that
# the event was processed and other control handlers in the
# stack should not be executed. However, that would
# prevent the Python interpreter's handler from translating
# CTRL-C to a `KeyboardInterrupt` exception, so we pretend
# that we didn't handle it.
return 0 | [
276
] |
def METHOD_NAME(
rundb_mock, packager: str, expected_result: bool, is_mandatory: bool
):
"""
Test the custom packagers collection from a project during the `look_for_context` method.
:param rundb_mock: A runDB mock fixture.
:param packager: The custom packager to collect.
:param expected_result: Whether the packager collection should succeed.
:param is_mandatory: If the packager is mandatory for the run or not. Mandatory packagers will always raise
exception if they couldn't be collected.
"""
project = mlrun.get_or_create_project(name="default")
project.add_custom_packager(
packager=packager,
is_mandatory=is_mandatory,
)
project.save_to_db()
mlrun_function = project.set_function(
func=__file__, name="test_custom_packagers", image="mlrun/mlrun"
)
if expected_result or not is_mandatory:
mlrun_function.run(handler="collect_custom_packagers", local=True)
return
try:
mlrun_function.run(handler="collect_custom_packagers", local=True)
assert False
except RunError:
pass | [
9,
343,
2764
] |
def METHOD_NAME(self, summary, variants, **kwargs):
"""
Return human readable representation
The summary/variants accepts verbosity where 0 means silent and
maximum is up to the plugin.
:param summary: How verbose summary to output (int)
:param variants: How verbose list of variants to output (int)
:param kwargs: Other free-form arguments
:rtype: str
"""
if self.variants == _NO_VARIANTS:
return ""
out = []
verbose = variants > 1
if summary:
# TODO: tree representation
out.append("No tree representation for JSON serialized variants")
if variants:
out.append(f"JSON Serialized Variants ({len(self)}):")
for variant in self:
paths = ", ".join([x.path for x in variant["variant"]])
out.append(
"%sVariant %s: %s"
% ("\n" if verbose else "", variant["variant_id"], paths)
)
if not verbose:
continue
env = set()
for node in variant["variant"]:
for key, value in node.environment.items():
origin = node.environment.origin[key].path
env.add((f"{origin}:{key}", str(value)))
if not env:
return out
fmt = " %%-%ds => %%s" % max( # pylint: disable=C0209
len(_[0]) for _ in env
)
for record in sorted(env):
out.append(fmt % record)
return "\n".join(out) | [
24,
3
] |
def METHOD_NAME(self):
# Clean up the working directory
if self.sim.store_sensitivities == "disk":
shutil.rmtree(self.sim.sensitivity_path) | [
531,
481
] |
def METHOD_NAME(
self,
) -> io.RawIOBase:
(_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data) | [
19,
3182
] |
def METHOD_NAME(self):
self.levels = raid.RAIDLevels(["raid0", "raid1", "raid4", "raid5", "raid6", "raid10", "striped"])
self.levels_none = raid.RAIDLevels([])
self.levels_some = raid.RAIDLevels(["mirror", 6]) | [
0,
1
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self, request, *args, **kwargs):
"""
Process POST /auth/saml/v0/provider_config/ {postData}
"""
customer_uuid = self.requested_enterprise_uuid
try:
enterprise_customer = EnterpriseCustomer.objects.get(pk=customer_uuid)
except EnterpriseCustomer.DoesNotExist:
raise ValidationError(f'Enterprise customer not found at uuid: {customer_uuid}') # lint-amnesty, pylint: disable=raise-missing-from
# Create the samlproviderconfig model first
try:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
except IntegrityError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
# Associate the enterprise customer with the provider
association_obj = EnterpriseCustomerIdentityProvider(
enterprise_customer=enterprise_customer,
provider_id=convert_saml_slug_provider_id(serializer.data['slug'])
)
association_obj.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) | [
129
] |
def METHOD_NAME(keras_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
filename = os.path.join(dname, "results.pdf")
surrogate_scatter3D(
keras_surrogate, data_validation, filename=filename, show=False
)
assert os.path.exists(filename) # PDF results file | [
9,
8000,
227,
4098
] |
def METHOD_NAME(self):
if self.ask_vault_pass:
return {
'ANSIBLE_VAULT_PASSWORD': self._get_ansible_vault_password_and_record(),
}
else:
return {} | [
19,
1967,
4090,
485,
1659
] |
METHOD_NAME(self): | [
59,
624
] |
def METHOD_NAME(self) -> str:
"""
NSX-T Manager username
"""
return pulumi.get(self, "nsxt_username") | [
12622,
2072
] |
async def METHOD_NAME(
conn: triopg._triopg.TrioConnectionProxy,
organization_id: OrganizationID,
author: DeviceID,
realm_id: RealmID,
checkpoint: int,
) -> Tuple[int, Dict[VlobID, int]]:
await _check_realm_and_read_access(conn, organization_id, author, realm_id, None)
ret = await conn.fetch(
*_q_poll_changes(
organization_id=organization_id.str, realm_id=realm_id, checkpoint=checkpoint
)
)
changes_since_checkpoint: Dict[VlobID, int] = {
VlobID.from_hex(src_id): src_version for _, src_id, src_version in ret
}
new_checkpoint: int = ret[-1][0] if ret else checkpoint
return (new_checkpoint, changes_since_checkpoint) | [
539,
1237,
1103
] |
def METHOD_NAME(name, *funcs):
"""
Check if an argument is a possible input for a specific function.
Parameters
----------
name : :class:`str`
Argument name.
f : :class:`callable`
Function(s) to check.
Returns
--------
:class:`bool`
"""
args = []
for f in funcs:
args += inspect.getfullargspec(f).args + inspect.getfullargspec(f).kwonlyargs
return name in set(args) | [
-1
] |
def METHOD_NAME(self, epoch):
self.epoch = epoch | [
0,
1165
] |
def METHOD_NAME(stream: BytesIO):
with tarfile.open(fileobj=stream, mode="r") as tar:
info = tar.getmember("backup.json")
with tar.extractfile(info) as f:
backup_data = json.load(f)
backup_data['size'] = float(
round(len(stream.getbuffer()) / 1024.0 / 1024.0, 2))
backup_data['version'] = 'dev'
return backup_data | [
214,
1001,
100
] |
def METHOD_NAME(str_version):
"""Extract revision (4th component) from version number (if any)."""
# e.g. '1.0.0.0-prerelease'
pattern = re.compile(r'^(\d+)(\.\d+)(\.\d+)(\.\d+)(.*)')
match = pattern.match(str_version)
if not match:
return str_version, 0
return (''.join(
(match.group(1), match.group(2), match.group(3), match.group(5))),
int(match.group(4)[1:])) | [
297,
71
] |
def METHOD_NAME(doctype, old_name, new_name):
# NOTE: fieldname is not considered, since the document is renamed
frappe.qb.update(Auth).set(Auth.name, new_name).where(
(Auth.doctype == doctype) & (Auth.name == old_name)
).run() | [
2010,
2897
] |
def METHOD_NAME(prompt_num_for_policy):
policy_tokens = []
for i in range(prompt_num_for_policy):
policy_tokens.append(f'<policy_{i}>')
return policy_tokens | [
19,
54,
1735
] |
def METHOD_NAME(backend):
# test data structure
# noise_conf = NoiseConf()
# noise_conf.add_noise("h1", "t0")
# noise_conf.add_noise("h1", ["t1", "t2"], [[0], [1]])
# noise_conf.add_noise("h1", ["t3"], [[0]])
# noise_conf.add_noise("h1", "t4")
# noise_conf.add_noise("h1", ["t5"], [[3]])
# noise_conf.add_noise("h2", ["v1", "v2"], [[0], [1]])
# noise_conf.add_noise("h2", ["v3"], [[0]])
# noise_conf.add_noise("h2", "v4")
c = tc.Circuit(2)
c.cnot(0, 1)
c.rx(0, theta=0.4)
c.rx(1, theta=0.8)
c.h(0)
c.h(1)
dmc = tc.DMCircuit(2)
dmc.cnot(0, 1)
dmc.rx(0, theta=0.4)
dmc.rx(1, theta=0.8)
dmc.h(0)
dmc.h(1)
error1 = tc.channels.generaldepolarizingchannel(0.1, 1)
error2 = tc.channels.generaldepolarizingchannel(0.01, 2)
error3 = tc.channels.thermalrelaxationchannel(300, 400, 100, "ByChoi", 0)
readout_error = []
readout_error.append([0.9, 0.75]) # readout error of qubit 0
readout_error.append([0.4, 0.7]) # readout error of qubit 1
noise_conf = NoiseConf()
noise_conf.add_noise("rx", error1)
noise_conf.add_noise("rx", [error3], [[0]])
noise_conf.add_noise("h", [error3, error1], [[0], [1]])
noise_conf.add_noise("x", [error3], [[0]])
noise_conf.add_noise("cnot", [error2], [[0, 1]])
noise_conf.add_noise("readout", readout_error)
cnoise = circuit_with_noise(c, noise_conf, [0.1] * 7)
value = cnoise.expectation_ps(x=[0, 1])
# value = expectation_ps_noisfy(c, x=[0, 1], noise_conf=noise_conf, nmc=10000)
# np.testing.assert_allclose(value, 0.09, atol=1e-1)
# value = expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf)
# np.testing.assert_allclose(value, 0.09, atol=1e-1)
# with readout_error
value = sample_expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
value = sample_expectation_ps_noisfy(c, x=[0, 1], noise_conf=noise_conf, nmc=100000)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
# test composed channel and general condition
newerror = composedkraus(error1, error3)
noise_conf1 = NoiseConf()
noise_conf1.add_noise("rx", [newerror, error1], [[0], [1]])
noise_conf1.add_noise("h", [error3, error1], [[0], [1]])
noise_conf1.add_noise("x", [error3], [[0]])
def condition(d):
return d["name"] == "cnot" and d["index"] == (0, 1)
noise_conf1.add_noise_by_condition(condition, error2)
noise_conf1.add_noise("readout", readout_error)
value = sample_expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf1)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
# test standardized gate
newerror = composedkraus(error1, error3)
noise_conf2 = NoiseConf()
noise_conf2.add_noise("Rx", [newerror, error1], [[0], [1]])
noise_conf2.add_noise("H", [error3, error1], [[0], [1]])
noise_conf2.add_noise("x", [error3], [[0]])
noise_conf2.add_noise("cx", [error2], [[0, 1]])
noise_conf2.add_noise("readout", readout_error)
value = sample_expectation_ps_noisfy(
c, x=[0, 1], noise_conf=noise_conf2, nmc=100000
)
np.testing.assert_allclose(value, -0.12, atol=1e-2) | [
9,
-1
] |
def METHOD_NAME(zone):
zone._increment_serial = types.MethodType(_increment_serial, zone) | [
1575,
2456
] |
def METHOD_NAME(module):
functions = set()
module_upper = module.upper()
for id_name in _op_dir():
id_split = id_name.split("_OT_", 1)
if len(id_split) == 2 and module_upper == id_split[0]:
functions.add(id_split[1])
return list(functions) | [
14148,
829,
1599,
1190
] |
async def METHOD_NAME(client):
await client.number.put_big_double_negative_decimal()
assert (await client.number.get_big_double_negative_decimal()) == -99999999.99 | [
9,
4289,
2152,
2927,
3397
] |
def METHOD_NAME(
plot_params: dict,
video_name="video.mp4",
fps=15,
xlim=(0, 4),
ylim=(-1, 1), | [
1288,
1781
] |
def METHOD_NAME(dev):
my_cmd = struct.Struct("<LHHQ")
sub_cmd_in = 0xAA55
indata = my_cmd.pack(sub_cmd_in, 0x1234, 0x5678, int(time.time()))
outdata = dev.cmd(MRPC_ECHO, indata, len(indata))
sub_cmd_out, param1, param2, time_val = my_cmd.unpack(outdata)
if sub_cmd_in != ~sub_cmd_out & 0xFFFFFFFF:
raise CommandError("Echo data did not match: {:x} != ~{:x}".
format(sub_cmd_in, sub_cmd_out)) | [
1605,
1660
] |
def METHOD_NAME(df):
"""
Test case_when for scenarios where `default` is an index.
"""
default = range(len(df))
result = df.case_when(
"numbers > 1",
lambda df: df.numbers + 10,
default=pd.Index(default),
column_name="bleh",
)
expected = np.where(df.numbers > 1, df.numbers + 10, default)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected) | [
9,
331,
1646,
235,
724
] |
def METHOD_NAME(self):
self.set_which_results({
'xeyes': ['/usr/bin/xeyes', '/usr/X11/bin/xeyes'],
})
self.assertEqual(utils.getCommand('xeyes'), '/usr/bin/xeyes') | [
9,
19,
462,
457
] |
def METHOD_NAME(notify_api, mocker):
utils_mock = mocker.patch("app.aws.s3.utils_s3upload")
service_id = uuid.uuid4()
csv_data = "foo"
upload_id = upload_job_to_s3(service_id, csv_data)
utils_mock.assert_called_once_with(
filedata=csv_data,
region=notify_api.config["AWS_REGION"],
bucket_name=current_app.config["CSV_UPLOAD_BUCKET_NAME"],
file_location=f"service-{service_id}-notify/{upload_id}.csv",
) | [
9,
172,
202,
24,
607
] |
def METHOD_NAME(s, m, pk):
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[:b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8:b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, t1) = P = scalarmult_B(S)
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
if (not isoncurve(P) or not isoncurve(Q) or
(x1*z2 - x2*z1) % q != 0 or (y1*z2 - y2*z1) % q != 0):
raise SignatureMismatch("signature does not pass verification") | [
-1
] |
def METHOD_NAME(*args, **kwargs):
if env[0] is None:
env[0] = constructor(*args, **kwargs)
return env[0] | [
503
] |
def METHOD_NAME(self) -> HtmlBodyMailParts:
elements = self.get_elements()
elements = self._process_elements(elements)
return elements | [
214,
3562
] |
def METHOD_NAME(self):
if not self.page.has_previous():
return None
url = self.request and self.request.get_full_path() or ''
url = url.encode('utf-8')
page_number = self.page.previous_page_number()
return replace_query_param(self.cap_page_size(url), self.page_query_param, page_number) | [
19,
1511,
548
] |
def METHOD_NAME(self, board, depth, alpha, beta):
baseIndex = (board.hash % self.buckets) * 4
key = (board.hash // self.buckets) & 0xFFFFFFFF
for i in range(baseIndex, baseIndex + 4):
tkey, search_id, hashf, tdepth, score, move = entryType.unpack_from(
self.data, i * entryType.size
)
if tkey == key:
# Mate score bounds are guaranteed to be accurate at any depth.
if tdepth < depth and abs(score) < MATE_VALUE - MAXPLY:
return move, score, hashfBAD
if hashf == hashfEXACT:
return move, score, hashf
if hashf == hashfALPHA and score <= alpha:
return move, alpha, hashf
if hashf == hashfBETA and score >= beta:
return move, beta, hashf | [
2570
] |
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(self, section, option):
"""
Like get, but interpret the value as a LDAP DN, and sanitize it by
removing the extra spaces.
If the value is not a valid DN, a ldap.LDAPError exception will be
raised.
"""
return ",".join(ldap.explode_dn(self.get(section, option))) | [
-1
] |
def METHOD_NAME():
eq(
commcare_translations.get_translation_file_paths('en', version=2, commcare_version='2.23.3beta2'),
[
'../historical-translations-by-version/2.23.3-messages_en-2.txt',
'../historical-translations-by-version/2.23.2-messages_en-2.txt',
'../historical-translations-by-version/2.23.1-messages_en-2.txt',
'../historical-translations-by-version/2.23-messages_en-2.txt',
'../messages_en-2.txt',
'../messages_en-1.txt',
]
) | [
9,
281,
3336,
41,
5492,
281,
988
] |
def METHOD_NAME():
master_key = Key("nop", "nop", "nop", "nop", "nop", "nop")
master_key_map = {master_key.id: master_key}
with pytest.raises(InvalidCiphertextException):
decrypt(master_keys=master_key_map, ciphertext_blob=b"", encryption_context={}) | [
9,
443,
532,
14054,
275
] |
def METHOD_NAME(self):
# These imports are incompatible with CUDASIM
from numba.cuda.descriptor import cuda_target
from numba.cuda.cudadrv.nvvm import llvm_to_ptx
targetctx = cuda_target.target_context
mod = targetctx.create_module("")
textstring = 'A Little Brown Fox'
gv0 = targetctx.insert_const_string(mod, textstring)
# Insert the same const string a second time - the first should be
# reused.
targetctx.insert_const_string(mod, textstring)
res = re.findall(r"@\"__conststring__.*internal.*constant.*\["
r"19\s+x\s+i8\]", str(mod))
# Ensure that the const string was only inserted once
self.assertEqual(len(res), 1)
fnty = ir.FunctionType(ir.IntType(8).as_pointer(), [])
# Using insert_const_string
fn = ir.Function(mod, fnty, "test_insert_const_string")
builder = ir.IRBuilder(fn.append_basic_block())
res = builder.addrspacecast(gv0, ir.PointerType(ir.IntType(8)),
'generic')
builder.ret(res)
matches = re.findall(r"@\"__conststring__.*internal.*constant.*\["
r"19\s+x\s+i8\]", str(mod))
self.assertEqual(len(matches), 1)
# Using insert_string_const_addrspace
fn = ir.Function(mod, fnty, "test_insert_string_const_addrspace")
builder = ir.IRBuilder(fn.append_basic_block())
res = targetctx.insert_string_const_addrspace(builder, textstring)
builder.ret(res)
matches = re.findall(r"@\"__conststring__.*internal.*constant.*\["
r"19\s+x\s+i8\]", str(mod))
self.assertEqual(len(matches), 1)
ptx = llvm_to_ptx(str(mod)).decode('ascii')
matches = list(re.findall(r"\.const.*__conststring__", ptx))
self.assertEqual(len(matches), 1) | [
9,
3402,
144
] |
def METHOD_NAME(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. Bitbucket uses a pair of OAuthToken consisting
on a oauth_token and oauth_token_secret.
instance must be a UserSocialAuth instance.
"""
token = super().METHOD_NAME(instance)
if token and "access_token" in token:
token = dict(tok.split("=") for tok in token["access_token"].split("&"))
return token | [
1735
] |
def METHOD_NAME(self, read_buffer: ReadBuffer, args):
return self.static_parse_context(read_buffer) | [
628,
214
] |
def METHOD_NAME(
path, is_aggregate_tokenizer: bool = False, verbose: int = 0,
):
lines_read = 0
text_dataset, lang_dataset = [], []
if path[-8:] == '.json.gz': # for Common Crawl dataset
fin = gzip.open(path, 'r')
else:
fin = open(path, 'r', encoding='utf-8')
if verbose > 0:
reader = tqdm(iter(lambda: fin.readline(), ''), desc="Read 0 lines", unit=' lines')
else:
reader = fin
for line in reader:
lang = None
if line:
if path[-8:] == '.json.gz': # for Common Crawl dataset
line = json.loads(line.decode('utf-8'))['text']
elif path.endswith('.json'):
jline = json.loads(line)
line = jline['text']
if is_aggregate_tokenizer:
lang = jline['lang']
line_list = line.split("\n")
line = " ".join(line_list)
if line:
text_dataset.append(line)
if lang:
lang_dataset.append(lang)
lines_read += 1
if verbose > 0 and lines_read % 100000 == 0:
reader.set_description(f"Read {lines_read} lines")
else:
break
fin.close()
if is_aggregate_tokenizer:
assert len(text_dataset) == len(
lang_dataset
), f"text_dataset length {len(text_dataset)} and lang_dataset length {len(lang_dataset)} must be the same!"
return list(zip(text_dataset, lang_dataset))
else:
return [[text] for text in text_dataset] | [
203,
849,
171
] |
def METHOD_NAME(self, val):
"""
Sets the speed of all the FANs to a value denoted by the duty-cycle percentage val
:param val: An integer, <0-100> denoting FAN duty cycle percentage
:return: Boolean, True if operation is successful, False if not
"""
status = True
pwm = int(round(self.PWM_MAX*val/100.0))
try:
with open(os.path.join(self.fan_path, self.fan_set_speed.format(1)), 'w') as fan_pwm:
fan_pwm.write(str(pwm))
except (ValueError, IOError):
log_err("Read file {} failed".format(self.fan_path + self.fan_set_speed.format(1)))
status = False
return status | [
0,
1942
] |
async def METHOD_NAME(self):
# Setting up configuration for model
config = self.model.__class__.config(
await parse_unknown(
"--model-predict",
"TARGET:float:1",
"--model-features",
"feature_1:float:1",
"--model-features",
"feature_2:float:1",
"-model-location",
self.model_dir.name,
)
)
self.assertEqual(config.location, pathlib.Path(self.model_dir.name))
self.assertEqual(config.steps, 3000)
self.assertEqual(config.epochs, 30)
self.assertEqual(config.hidden, [12, 40, 15])
self.assertEqual(config.predict.name, "TARGET") | [
9,
200
] |
def METHOD_NAME(self):
identifier = "123"
assert split_key(
f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:{identifier}"
) == (self.project, ActionTargetType.ISSUE_OWNERS, identifier, None) | [
9,
80,
641,
59,
769
] |
def METHOD_NAME(ctx, x):
return ctx.exp(ctx.j*ctx.pi*x) | [
4500
] |
def METHOD_NAME(self): # [missing-raises-doc]
"""This is a Numpy docstring.
Raises
------
NameError
Sometimes
"""
try:
fake_func()
except (RuntimeError, ValueError):
raise
raise NameError("hi") | [
9,
416,
-1,
2028,
107,
45
] |
def METHOD_NAME(self, host_id: str, disk_paths: List[dict]) -> None:
self.api_client.select_installation_disk(infra_env_id=self.id, host_id=host_id, disk_paths=disk_paths) | [
1472,
1806,
3732,
113
] |
def METHOD_NAME(tmpdir: Path) -> None:
repo_path = os.fspath(_create_test_package(tmpdir, name="mypkg"))
commit = Git.get_revision(repo_path)
# a link referencing a sha should be cached
url = "git+https://g.c/o/r@" + commit + "#egg=mypkg"
req = ReqMock(link=Link(url), source_dir=repo_path)
assert wheel_builder._should_cache(cast(InstallRequirement, req))
# a link not referencing a sha should not be cached
url = "git+https://g.c/o/r@master#egg=mypkg"
req = ReqMock(link=Link(url), source_dir=repo_path)
assert not wheel_builder._should_cache(cast(InstallRequirement, req)) | [
9,
427,
596,
1493,
3818
] |
def METHOD_NAME(crystal):
C = make_path_crystal(crystal)
element = int(request.args.get("element"))
i = int(request.args.get("i"))
l = int(request.args.get("l"))
x = C[element]
if l >= 0:
y = x.f_string([i] * l)
else:
y = x.e_string([i] * -l)
ret = str(C.rank(y))
return 1 if ret == "NaN" else ret | [
15714,
13568
] |
def METHOD_NAME(old_dir, new_dir):
for dirname in os.listdir(old_dir):
old_run_dir = os.path.join(old_dir, dirname)
new_run_dir = os.path.join(new_dir, dirname)
if not os.path.isdir(old_run_dir):
continue
old_summary = get_summary(old_run_dir)
new_summary = get_summary(new_run_dir)
comp = RunComparison(old_summary, new_summary)
header = [Style.BRIGHT + dirname + Style.RESET_ALL,
Style.BRIGHT + 'old' + Style.RESET_ALL,
# Numeric suffix (MiB, GiB, sec).
'',
'std_dev',
Style.BRIGHT + 'new' + Style.RESET_ALL,
# Numeric suffix (MiB, GiB, sec).
'',
'std_dev',
Style.BRIGHT + 'delta' + Style.RESET_ALL]
rows = []
for field in comp.iter_field_names():
row = [field, comp.old(field), comp.old_suffix(field),
comp.old_stddev(field), comp.new(field),
comp.new_suffix(field), comp.new_stddev(field)]
diff_percent = comp.diff_percent(field)
diff_percent_str = '%.2f%%' % diff_percent
if diff_percent < 0:
diff_percent_str = (
Fore.GREEN + diff_percent_str + Style.RESET_ALL)
else:
diff_percent_str = (
Fore.RED + diff_percent_str + Style.RESET_ALL)
row.append(diff_percent_str)
rows.append(row)
print(tabulate(rows, headers=header, tablefmt='plain'))
print('') | [
979,
420
] |
def METHOD_NAME(app):
this_dir = os.path.dirname(__file__)
theme_dir = os.path.abspath(os.path.join(this_dir, 'theme'))
app.add_css_file('flames.css')
app.add_html_theme('flames', theme_dir)
app.connect("html-page-context", on_html_page_context) | [
102
] |
def METHOD_NAME(quantizer_setup: SingleConfigQuantizerSetup) -> List[List[Union[int, str]]]:
scope_vs_bitwidth = {}
for qp in quantizer_setup.quantization_points.values():
scope_vs_bitwidth[str(qp.insertion_point)] = qp.qconfig.num_bits
sorted_scope_vs_bitwidth = OrderedDict(sorted(scope_vs_bitwidth.items(), key=lambda x: x[0]))
full_bitwidth_per_scope = []
for scope, bitwidth in sorted_scope_vs_bitwidth.items():
full_bitwidth_per_scope.append([bitwidth, scope])
return full_bitwidth_per_scope | [
19,
3317,
2735,
913
] |
def METHOD_NAME() :
# Get the Gaffer and GafferUI modules, but only if the app actually
# imported them. We don't want to force their importation because it's
# just a waste of time if they weren't used.
Gaffer = sys.modules.get( "Gaffer" )
GafferUI = sys.modules.get( "GafferUI" )
if Gaffer is None and GafferUI is None :
return
# Clean up any garbage left behind by Cortex's wrapper mechanism - because
# the Gaffer.Application itself is derived from IECore.Parameterised, which
# as far as I can tell is wrapped unnecessarily, we must call this to allow
# the application to be deleted at all. Note that we're deliberately not also
# calling gc.collect() - our intention here isn't to clean up on shutdown, but
# to highlight problems caused by things not cleaning up after themselves during
# execution. We aim to eliminate all circular references from our code, to avoid
# garbage collection overhead and to avoid problems caused by referencing Qt widgets
# which were long since destroyed in C++.
## \todo Reevaluate the need for this call after Cortex 9 development.
IECore.RefCounted.collectGarbage()
# Importing here rather than at the top of the file prevents false
# positives being reported in gc.get_objects() below. I have no idea why,
# but if not imported here, get_objects() will report objects which have
# nothing referring to them and which should be dead, even with an
# explicit call to gc.collect() beforehand.
import gc
# Check for things that shouldn't exist at shutdown, and
# warn of anything we find.
scriptNodes = []
widgets = []
for o in gc.get_objects() :
if Gaffer is not None and isinstance( o, Gaffer.ScriptNode ) :
scriptNodes.append( o )
elif GafferUI is not None and isinstance( o, GafferUI.Widget ) :
widgets.append( o )
if scriptNodes :
IECore.msg(
IECore.Msg.Level.Debug,
"Gaffer shutdown", "%d remaining ScriptNode%s detected. Debugging with objgraph is recommended." % (
len( scriptNodes ),
"s" if len( scriptNodes ) > 1 else "",
)
)
if widgets :
count = {}
for widget in widgets :
widgetType = widget.__class__.__name__
count[widgetType] = count.get( widgetType, 0 ) + 1
summaries = [ "%s (%d)" % ( k, count[k] ) for k in sorted( count.keys() ) ]
IECore.msg(
IECore.Msg.Level.Debug,
"Gaffer shutdown", "%d remaining Widget%s detected : \n\n%s\n\nDebugging with objgraph is recommended." % (
len( widgets ),
"s" if len( widgets ) > 1 else "",
"\t" + "\n\t".join( summaries )
)
) | [
250,
1356,
538
] |
def METHOD_NAME(self):
""" reset """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0 | [
656
] |
def METHOD_NAME(
self,
newcase,
keepexe=False,
mach_dir=None,
project=None,
cime_output_root=None,
exeroot=None,
rundir=None,
):
# Need to disable unused-argument checking: keepexe is needed to match
# the interface of Case, but is not used in this fake implementation
#
# pylint: disable=unused-argument
"""
Create a clone of the current case. Also creates the CASEROOT directory
for the clone case (given by newcase).
Args:
newcase (str): full path to the new case. This directory should not
already exist; it will be created
keepexe (bool, optional): Ignored
mach_dir (str, optional): Ignored
project (str, optional): Ignored
cime_output_root (str, optional): New CIME_OUTPUT_ROOT for the clone
exeroot (str, optional): New EXEROOT for the clone
rundir (str, optional): New RUNDIR for the clone
Returns the clone case object
"""
newcaseroot = os.path.abspath(newcase)
newcasename = os.path.basename(newcase)
os.makedirs(newcaseroot)
clone = self.copy(newcasename=newcasename, newcaseroot=newcaseroot)
if cime_output_root is not None:
clone.set_value("CIME_OUTPUT_ROOT", cime_output_root)
if exeroot is not None:
clone.set_value("EXEROOT", exeroot)
if rundir is not None:
clone.set_value("RUNDIR", rundir)
return clone | [
129,
670
] |
def METHOD_NAME(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
return host, port, loop, limit, kwds | [
11729
] |
def METHOD_NAME(self, card, comment=None):
i = self.i
self.element_id[i] = integer(card, 1, 'eid')
self.K[i] = double(card, 2, 'k')
self.node_ids[i, :] = [integer(card, 3, 'G1'),
integer(card, 5, 'G2')]
self.components[i, :] = [integer_or_blank(card, 4, 'C1', 0),
integer_or_blank(card, 6, 'C2', 0)]
self.ge[i] = double_or_blank(card, 7, 'ge', 0.)
self.s[i] = double_or_blank(card, 8, 's', 0.)
assert len(card) <= 9, 'len(CELAS4 card) = %i\ncard=%s' % (len(card), card) + str(card)
self.i += 1 | [
238,
5427
] |
def METHOD_NAME(data):
"""
In some cases where there are an insane number of processes being created
on a system a PID can get recycled or assigned to a non-Salt process.
On Linux this fn checks to make sure the PID we are checking on is actually
a Salt process.
For non-Linux systems we punt and just return True
"""
if not salt.utils.platform.is_linux():
return True
pid = data.get("pid")
if not pid:
return False
if not os.path.isdir("/proc"):
return True
path = os.path.join("/proc/{}/cmdline".format(pid))
if not os.path.isfile(path):
return False
try:
with salt.utils.files.fopen(path, "rb") as fp_:
if b"salt" in fp_.read():
return True
except OSError:
return False | [
250,
4518
] |
def METHOD_NAME(self, context):
"""Perform picking in this item at given widget position.
:param PickContext context: Current picking context
:return: Object holding the results or None
:rtype: Union[None,PickingResult]
"""
info = self.__pickPreProcessing(context)
if info is not None:
picked, points, _ = info
if picked:
return PickingResult(self, positions=[points[0]])
return None | [
2981,
324
] |
def METHOD_NAME(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("keyLength", AAZIntType, ".key_length")
return self.serialize_content(_content_value) | [
459
] |
def METHOD_NAME():
for ext in ["", ".sort"]:
grass.try_remove(tmp + ext)
grass.run_command(
"g.remove", flags="f", type="vector", pattern=prefix + "_*", quiet=True
) | [
950
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.