text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(course):
return ExternalCollection.objects.filter(
course=course, uploader=True).first() | [
19,
7332
] |
def METHOD_NAME(self, A):
C = sym(A)
A, dims = A, (0, 1)
D = 0.5*as_matrix([[A[i, j] + A[j, i] for j in dims] for i in dims])
self.assertEqualValues(C, D) | [
9,
6794
] |
def METHOD_NAME(self, tensor):
self.decomposition_ = symmetric_parafac_power_iteration(
tensor,
self.rank,
n_repeat=self.n_repeat,
n_iteration=self.n_iteration,
verbose=self.verbose,
)
return self.decomposition_ | [
90,
1053
] |
def METHOD_NAME(
imagepath,
dirname="",
place_holder=False,
recursive=False,
ncase_cmp=True,
convert_callback=None,
verbose=False,
relpath=None,
check_existing=False,
force_reload=False,
):
"""
Return an image from the file path with options to search multiple paths
and return a placeholder if its not found.
:arg filepath: The image filename
If a path precedes it, this will be searched as well.
:type filepath: string
:arg dirname: is the directory where the image may be located - any file at
the end will be ignored.
:type dirname: string
:arg place_holder: if True a new place holder image will be created.
this is useful so later you can relink the image to its original data.
:type place_holder: bool
:arg recursive: If True, directories will be recursively searched.
Be careful with this if you have files in your root directory because
it may take a long time.
:type recursive: bool
:arg ncase_cmp: on non windows systems, find the correct case for the file.
:type ncase_cmp: bool
:arg convert_callback: a function that takes an existing path and returns
a new one. Use this when loading image formats blender may not support,
the CONVERT_CALLBACK can take the path for a GIF (for example),
convert it to a PNG and return the PNG's path.
For formats blender can read, simply return the path that is given.
:type convert_callback: function
:arg relpath: If not None, make the file relative to this path.
:type relpath: None or string
:arg check_existing: If true,
returns already loaded image datablock if possible
(based on file path).
:type check_existing: bool
:arg force_reload: If true,
force reloading of image (only useful when `check_existing`
is also enabled).
:type force_reload: bool
:return: an image or None
:rtype: :class:`bpy.types.Image`
"""
import os
import bpy
# -------------------------------------------------------------------------
# Utility Functions
def _image_load_placeholder(path):
name = path
if type(path) is str:
name = name.encode("utf-8", "replace")
name = name.decode("utf-8", "replace")
name = os.path.basename(name)
image = bpy.data.images.new(name, 128, 128)
# allow the path to be resolved later
image.filepath = path
image.source = 'FILE'
return image
def _image_load(path):
import bpy
if convert_callback:
path = convert_callback(path)
# Ensure we're not relying on the 'CWD' to resolve the path.
if not os.path.isabs(path):
path = os.path.abspath(path)
try:
image = bpy.data.images.load(path, check_existing=check_existing)
except RuntimeError:
image = None
if verbose:
if image:
print(" image loaded '%s'" % path)
else:
print(" image load failed '%s'" % path)
# image path has been checked so the path could not be read for some
# reason, so be sure to return a placeholder
if place_holder and image is None:
image = _image_load_placeholder(path)
if image:
if force_reload:
image.reload()
if relpath is not None:
# make relative
from bpy.path import relpath as relpath_fn
# can't always find the relative path
# (between drive letters on windows)
try:
filepath_rel = relpath_fn(path, start=relpath)
except ValueError:
filepath_rel = None
if filepath_rel is not None:
image.filepath_raw = filepath_rel
return image
def _recursive_search(paths, filename_check):
for path in paths:
for dirpath, _dirnames, filenames in os.walk(path):
# skip '.svn'
if dirpath[0] in {".", b'.'}:
continue
for filename in filenames:
if filename_check(filename):
yield os.path.join(dirpath, filename)
# -------------------------------------------------------------------------
imagepath = bpy.path.native_pathsep(imagepath)
if verbose:
print("load_image('%s', '%s', ...)" % (imagepath, dirname))
if os.path.exists(imagepath):
return _image_load(imagepath)
variants = [imagepath]
if dirname:
variants += [
os.path.join(dirname, imagepath),
os.path.join(dirname, bpy.path.basename(imagepath)),
]
for filepath_test in variants:
if ncase_cmp:
ncase_variants = (
filepath_test,
bpy.path.resolve_ncase(filepath_test),
)
else:
ncase_variants = (filepath_test, )
for nfilepath in ncase_variants:
if os.path.exists(nfilepath):
return _image_load(nfilepath)
if recursive:
search_paths = []
for dirpath_test in (os.path.dirname(imagepath), dirname):
if os.path.exists(dirpath_test):
search_paths.append(dirpath_test)
search_paths[:] = bpy.path.reduce_dirs(search_paths)
imagepath_base = bpy.path.basename(imagepath)
if ncase_cmp:
imagepath_base = imagepath_base.lower()
def image_filter(fn):
return (imagepath_base == fn.lower())
else:
def image_filter(fn):
return (imagepath_base == fn)
nfilepath = next(_recursive_search(search_paths, image_filter), None)
if nfilepath is not None:
return _image_load(nfilepath)
# None of the paths exist so return placeholder
if place_holder:
return _image_load_placeholder(imagepath)
# TODO comprehensiveImageLoad also searched in bpy.config.textureDir
return None | [
557,
660
] |
def METHOD_NAME():
""" Returns unique stations from raw list of stations """
stations = [
build_mock_station_group_member("1", "1"),
build_mock_station_group_member("1", "1")
]
result = unique_weather_stations_mapper(stations)
assert len(result) == 1
assert result[0].station_code == 1 | [
9,
2768,
2642,
3782,
1119
] |
def METHOD_NAME(schema, u, s):
c = URL(schema)
parts = u.path.split("/")
if len(parts) < 1 or len(parts) > 4:
raise JujuError("charm or bundle URL has invalid form {}".format(s))
# ~<username>
if parts[0].startswith("~"):
if schema == Schema.LOCAL:
raise JujuError("local charm or bundle URL with username {}".format(s))
c.user = parts[0][1:]
parts = parts[1:]
if len(parts) > 2:
raise JujuError("charm or bundle URL has invalid form {}".format(s))
# <series>
if len(parts) == 2:
c.series = parts[0]
parts = parts[1:]
# TODO (stickupkid) - validate the series.
if len(parts) < 1:
raise JujuError("URL without charm or bundle name {}".format(s))
(c.name, c.revision) = extract_revision(parts[0])
# TODO (stickupkid) - validate the name.
return c | [
214,
3392,
274
] |
def METHOD_NAME(user):
# Pull all the records that need to be rolled back
logger.info("Finding activities for %s" % user["_id"])
conns = User.GetConnectionRecordsByUser(user)
my_services = [conn.Service.ID for conn in conns]
my_ext_ids = [conn.ExternalID for conn in conns]
logger.info("Scanning uploads table for %s accounts with %s extids" % (my_services, my_ext_ids))
uploads = db.uploaded_activities.find({"Service": {"$in": my_services}, "UserExternalID": {"$in": my_ext_ids}})
pending_deletions = {}
for upload in uploads:
svc = upload["Service"]
upload_id = upload["ExternalID"]
svc_ext_id = upload["UserExternalID"]
# Filter back down to the pairing we actually need
if my_services.index(svc) != my_ext_ids.index(svc_ext_id):
continue
if svc not in pending_deletions:
pending_deletions[svc] = []
pending_deletions[svc].append(upload_id)
# Another case of "I should have an ORM"
return RollbackTask({"PendingDeletions": pending_deletions}) | [
129
] |
def METHOD_NAME(cls) -> List[argparse.ArgumentParser]:
"""@brief Add this subcommand to the subparsers object."""
list_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)
list_output = list_parser.add_argument_group("list output")
list_output.add_argument('-p', '--probes', action='store_true',
help="List available probes.")
list_output.add_argument('-t', '--targets', action='store_true',
help="List all known targets.")
list_output.add_argument('-b', '--boards', action='store_true',
help="List all known boards.")
list_output.add_argument('--plugins', action='store_true',
help="List available plugins.")
list_options = list_parser.add_argument_group('list options')
list_options.add_argument('-n', '--name',
help="Restrict listing to items matching the given name substring. Applies to targets and boards.")
list_options.add_argument('-r', '--vendor',
help="Restrict listing to items whose vendor matches the given name substring. Applies only to targets.")
list_options.add_argument('-s', '--source', choices=('builtin', 'pack'),
help="Restrict listing to targets from the specified source. Applies to targets.")
list_options.add_argument('-H', '--no-header', action='store_true',
help="Don't print a table header.")
return [cls.CommonOptions.COMMON, list_parser] | [
19,
335
] |
def METHOD_NAME(self, act_shape, wgt_shape, inp_stride, inp_offset, hexagon_session):
"""Test conv2d intrinsic implementation"""
assert act_shape[3] == wgt_shape[2]
# Currently, input offset does not affect the output shape
def get_out_shape(ash, wsh, inp_stride):
assert ash[3] == wsh[2]
osh = (
ash[0],
(ash[1] - wsh[0]) // inp_stride[0] + 1,
(ash[2] - wsh[1]) // inp_stride[1] + 1,
wsh[3],
)
assert tvm.tir.all([x > 0 for x in osh])
return osh
act = np.random.rand(*act_shape).astype("float16")
wgt = np.random.rand(*wgt_shape).astype("float16")
module = build_conv2d(get_hexagon_target("v68"))
mod = hexagon_session.load_module(module)
output = tvm.nd.array(
np.zeros(get_out_shape(act_shape, wgt_shape, inp_stride), dtype="float16"),
device=hexagon_session.device,
)
mod(
tvm.nd.array(act, device=hexagon_session.device),
tvm.nd.array(wgt, device=hexagon_session.device),
inp_offset[0], # off_t
inp_offset[1], # off_l
inp_stride[0], # stride_height
inp_stride[1], # stride_width
output,
)
out = output.numpy()
# Generate reference output and compare:
ref_out = conv2d_nhwc_python(
act.astype("float32"), wgt.astype("float32"), stride=inp_stride, padding="VALID"
).astype("float16")
tvm.testing.assert_allclose(out, ref_out, rtol=5e-2, atol=5e-2) | [
9,
3385
] |
def METHOD_NAME(self):
self.assertEqual(self.stamp_cost.quick_read('S', 'has_voted', ['a']), None)
self.stamp_cost.run_private_function(
f='tally_vote',
signer='stu',
vk='a',
obj=120
)
self.assertEqual(self.stamp_cost.quick_read('S', 'has_voted', ['a']), True) | [
9,
5969,
9811,
2757,
24,
220,
17560
] |
def METHOD_NAME(self, issuance_line):
"""
Credential Subject `id` property.
"""
expected_id = issuance_line.subject_id
composed_obv3 = OpenBadgesDataModel(issuance_line).data
assert composed_obv3["credentialSubject"]["id"] == expected_id | [
9,
2540,
1861,
147
] |
def METHOD_NAME(count_exact_amount):
"""Tests that the build coordinate protocol behaves correctly for substances
with exact amounts."""
import mdtraj
substance = Substance()
substance.add_component(Component("O"), MoleFraction(1.0))
substance.add_component(Component("C"), ExactAmount(1))
max_molecule = 11 if count_exact_amount else 10
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = max_molecule
build_coordinates.count_exact_amount = count_exact_amount
build_coordinates.substance = substance
with tempfile.TemporaryDirectory() as directory:
build_coordinates.execute(directory)
built_system = mdtraj.load_pdb(build_coordinates.coordinate_file_path)
assert built_system.n_residues == 11 | [
9,
56,
4645,
9699,
2017
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.CsmOperationDescription"]:
"""Implements Csm operations Api to exposes the list of available Csm Apis under the resource
provider.
Implements Csm operations Api to exposes the list of available Csm Apis under the resource
provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationDescription or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2018_02_01.models.CsmOperationDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2018-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2018-02-01"))
cls: ClsType[_models.CsmOperationCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_operations_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmOperationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245,
710
] |
def METHOD_NAME(package_obj, template):
'''Given a package object and its SPDX template mapping, return a SPDX
document block for the corresponding source package.
The mapping should have keys:
SourcePackageName
SourcePackageVersion
PackageLicenseDeclared
PackageCopyrightText
PackageDownloadLocation'''
block = ''
mapping = package_obj.to_dict(template)
# Source Package Name
block += 'PackageName: {}\n'.format(mapping['SourcePackageName'])
# Source SPDXID
_, spdx_ref_src = spdx_common.get_package_spdxref(package_obj)
block += 'SPDXID: {}\n'.format(spdx_ref_src)
# Source Package Version
if mapping['SourcePackageVersion']:
block += 'PackageVersion: {}\n'.format(
mapping['SourcePackageVersion'])
# Package Download Location (Same as binary)
if mapping['PackageDownloadLocation']:
block += 'PackageDownloadLoaction: {}\n'.format(
mapping['PackageDownloadLocation'])
else:
block += 'PackageDownloadLocation: NOASSERTION\n'
# Files Analyzed (always false for packages)
block += 'FilesAnalyzed: false\n'
# Package License Concluded (always NOASSERTION)
block += 'PackageLicenseConcluded: NOASSERTION\n'
# Package License Declared (use the license ref for this)
block += 'PackageLicenseDeclared: ' + spdx_common.get_package_license_declared(
mapping['PackageLicenseDeclared']) + '\n'
# Package Copyright Text
if mapping['PackageCopyrightText']:
block += 'PackageCopyrightText:' + spdx_formats.block_text.format(
message=mapping['PackageCopyrightText']) + '\n'
else:
block += 'PackageCopyrightText: NONE\n'
# Package Comments
block += spdx_formats.source_comment
return block | [
19,
1458,
360,
573
] |
def METHOD_NAME(
start_datetime_utc: datetime,
end_datetime_utc: datetime,
period_duration: timedelta,
overlap: Union[None, timedelta] = None,
) -> List[Period]:
"""
Returns a list of `Period` of duration `period_duration` covering the time range
from `start_datetime_utc` to `end_datetime_utc`.
If `overlap` is specified, the `Period` returned will overlap by the amount
specified, otherwise the end of one period will coincide with the start of the
next one.
If `period_duration` is shorter than the time between `start_datetime_utc` and
`end_datetime_utc`, returns a list with a single `Period` starting on
`start_datetime_utc` and ending on `end_datetime_utc`.
This is useful to break a long time range into smaller periods for processing time
series data that would take up too much memory to handle in one piece.
Args:
start_datetime_utc (datetime): start of the period to cover
end_datetime_utc (datetime): end of the period to cover
period_duration (timedelta): duration of the individual
periods returned
overlap (Union[None, timedelta]): overlap between successive
periods, if specified. Defaults to `None`.
"""
if not overlap:
overlap = timedelta(0)
try:
assert period_duration > overlap
except AssertionError:
raise ValueError("'period_duration' cannot be shorter than 'overlap'.")
try:
assert end_datetime_utc >= start_datetime_utc
except AssertionError:
raise ValueError(
"'end_datetime_utc' cannot be before than 'start_datetime_utc'."
)
if end_datetime_utc - start_datetime_utc <= period_duration:
return [Period(start=start_datetime_utc, end=end_datetime_utc)]
else:
periods = METHOD_NAME(
start_datetime_utc=start_datetime_utc + period_duration - overlap,
end_datetime_utc=end_datetime_utc,
period_duration=period_duration,
overlap=overlap,
)
periods.insert(
0,
Period(
start=start_datetime_utc,
end=start_datetime_utc + period_duration,
),
)
return periods | [
93,
7780
] |
def METHOD_NAME(database_connection_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStaticSiteDatabaseConnectionResult:
"""
Static Site Database Connection resource.
Azure REST API version: 2022-09-01.
:param str database_connection_name: Name of the database connection.
:param str name: Name of the static site
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['databaseConnectionName'] = database_connection_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web:getStaticSiteDatabaseConnection', __args__, opts=opts, typ=GetStaticSiteDatabaseConnectionResult).value
return AwaitableGetStaticSiteDatabaseConnectionResult(
configuration_files=pulumi.get(__ret__, 'configuration_files'),
connection_identity=pulumi.get(__ret__, 'connection_identity'),
connection_string=pulumi.get(__ret__, 'connection_string'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
region=pulumi.get(__ret__, 'region'),
resource_id=pulumi.get(__ret__, 'resource_id'),
type=pulumi.get(__ret__, 'type')) | [
19,
628,
1055,
463,
550
] |
def METHOD_NAME(salt_master, salt_cli, tmp_path, proxy_id):
test_file = tmp_path / "testfile"
top_sls = """
base:
'*':
- core
"""
core_state = """
{}:
file:
- managed
- source: salt://testfile
- makedirs: true
""".format(
test_file
)
with salt_master.state_tree.base.temp_file(
"top.sls", top_sls
), salt_master.state_tree.base.temp_file("core.sls", core_state):
ret = salt_cli.run("state.highstate", minion_tgt=proxy_id)
for value in ret.data.values():
assert value["result"] is True | [
9,
551,
3583
] |
def METHOD_NAME(self, varname):
parts = varname.split('.')
num_dots = varname.count('.')
# Logic to deal with predefined sections like [preview], [plugin] and
# etc.
if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
full_config = self._session.full_config
section, config_name = varname.split('.')
value = full_config.get(section, {}).get(config_name)
if value is None:
# Try to retrieve it from the profile config.
value = full_config['profiles'].get(
section, {}).get(config_name)
return value
if parts[0] == 'profile':
profile_name = parts[1]
config_name = parts[2]
remaining = parts[3:]
# Check if varname starts with 'default' profile (e.g.
# default.emr-dev.emr.instance_profile) If not, go further to check
# if varname starts with a known profile name
elif parts[0] == 'default' or (
parts[0] in self._session.full_config['profiles']):
profile_name = parts[0]
config_name = parts[1]
remaining = parts[2:]
else:
profile_name = self._session.get_config_variable('profile')
if profile_name is None:
profile_name = 'default'
config_name = parts[0]
remaining = parts[1:]
value = self._session.full_config['profiles'].get(
profile_name, {}).get(config_name)
if len(remaining) == 1:
try:
value = value.get(remaining[-1])
except AttributeError:
value = None
return value | [
19,
5428,
200,
99
] |
def METHOD_NAME(self) -> str:
"""
Unique identifier for this resource.
"""
return pulumi.get(self, "resource_guid") | [
191,
3983
] |
def METHOD_NAME():
client = boto3.client("quicksight", region_name="us-east-2")
resp = client.list_groups(AwsAccountId=ACCOUNT_ID, Namespace="default")
assert resp["GroupList"] == []
assert resp["Status"] == 200 | [
9,
245,
861,
2471
] |
def METHOD_NAME(self, other):
"""
The equals operation.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
pass | [
816
] |
def METHOD_NAME(self, *args, **kwargs):
self.trait_set(**traits)
return mock.DEFAULT | [
142,
4214
] |
def METHOD_NAME(dd_agent_check):
config = create_e2e_core_test_config('netgear-readynas')
aggregator = common.dd_agent_check_wrapper(dd_agent_check, config, rate=True)
ip_address = get_device_ip_from_config(config)
common_tags = [
'snmp_profile:netgear-readynas',
'snmp_host:netgear-readynas.device.name',
'device_namespace:default',
'snmp_device:' + ip_address,
] + []
# --- TEST EXTENDED METRICS ---
assert_extend_generic_ucd(aggregator, common_tags)
# --- TEST METRICS ---
assert_common_metrics(aggregator, common_tags)
tag_rows = [
[
'netgear_readynasos_disk_id:acted forward forward Jaded',
'netgear_readynasos_disk_interface:kept zombies acted quaintly',
'netgear_readynasos_disk_model:acted but driving',
'netgear_readynasos_disk_serial:their quaintly zombies acted zombies',
'netgear_readynasos_disk_slot_name:their oxen forward Jaded but',
'netgear_readynasos_disk_state:online',
],
[
'netgear_readynasos_disk_id:but kept',
'netgear_readynasos_disk_interface:Jaded kept forward oxen driving kept acted zombies',
'netgear_readynasos_disk_model:kept their',
'netgear_readynasos_disk_serial:but driving their driving acted driving zombies their Jaded',
'netgear_readynasos_disk_slot_name:acted',
'netgear_readynasos_disk_state:offline',
],
]
for tag_row in tag_rows:
aggregator.assert_metric(
'snmp.netgear.readynasos.ataError', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.diskCapacity', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.diskTemperature', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
tag_rows = [
['netgear_readynasos_fan_type:but oxen oxen acted forward Jaded kept Jaded Jaded'],
['netgear_readynasos_fan_type:forward zombies Jaded but zombies forward zombies zombies forward'],
]
for tag_row in tag_rows:
aggregator.assert_metric(
'snmp.netgear.readynasos.fanRPM', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.fanStatus', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
tag_rows = [
['netgear_readynasos_temperature_type:driving driving quaintly'],
['netgear_readynasos_temperature_type:their but zombies'],
]
for tag_row in tag_rows:
aggregator.assert_metric(
'snmp.netgear.readynasos.temperatureMax', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.temperatureMin', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.temperatureValue', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
tag_rows = [
[
'netgear_readynasos_volume_name:quaintly',
'netgear_readynasos_volume_status:redundant',
],
[
'netgear_readynasos_volume_name:zombies kept Jaded Jaded kept Jaded acted their',
'netgear_readynasos_volume_status:degraded',
],
]
for tag_row in tag_rows:
aggregator.assert_metric(
'snmp.netgear.readynasos.volumeRAIDLevel', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.volumeFreeSpace', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
aggregator.assert_metric(
'snmp.netgear.readynasos.volumeSize', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
tag_rows = [
[
'netgear_readynasos_psu_desc:Jaded forward but kept quaintly their but',
'netgear_readynasos_psu_status:quaintly zombies but zombies forward Jaded forward',
],
[
'netgear_readynasos_psu_desc:their acted Jaded oxen driving Jaded forward zombies',
'netgear_readynasos_psu_status:quaintly their acted kept zombies driving',
],
]
for tag_row in tag_rows:
aggregator.assert_metric(
'snmp.netgear.readynasos.psu', metric_type=aggregator.GAUGE, tags=common_tags + tag_row
)
# --- TEST METADATA ---
device = {
'description': 'netgear-readynas Device Description',
'id': 'default:' + ip_address,
'id_tags': ['device_namespace:default', 'snmp_device:' + ip_address],
'ip_address': '' + ip_address,
'name': 'netgear-readynas.device.name',
'profile': 'netgear-readynas',
'status': 1,
'sys_object_id': '1.3.6.1.4.1.4526.100.16.1',
'vendor': 'netgear',
}
device['tags'] = common_tags
assert_device_metadata(aggregator, device)
# --- CHECK COVERAGE ---
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics()) | [
9,
7255,
337,
-1,
-1
] |
def METHOD_NAME(self):
oRule = function.rule_506()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_lower, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, []) | [
9,
1112,
446,
11158,
826
] |
def METHOD_NAME(self):
if self.self_nomination is not None and self.self_nomination.other_affiliations:
return self.self_nomination.other_affiliations
return self.nominations.first().other_affiliations | [
52,
2395,
15389
] |
def METHOD_NAME(picologging_exists: bool) -> None:
with patch("litestar.logging.config.find_spec") as find_spec_mock:
find_spec_mock.return_value = picologging_exists
log_config = LoggingConfig()
if picologging_exists:
assert log_config.handlers == default_picologging_handlers
else:
assert log_config.handlers == default_handlers | [
9,
668,
235,
376,
0
] |
def METHOD_NAME(self) -> bool:
"""Shuffle dataset samples before batching"""
return self.config.get("shuffle", False) | [
1124
] |
f METHOD_NAME(self): | [
19,
1188,
235,
281
] |
f METHOD_NAME(self, action): | [
166,
527,
1006
] |
def METHOD_NAME():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(x1) | [
391,
1170
] |
def METHOD_NAME(self, ctx):
operator = op_conversion[ctx.getText().strip()]
self.parameters['operator'] = operator | [
538,
837
] |
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME() | [
1462
] |
def METHOD_NAME():
"""Returns a control rod guide tube universe."""
gt_inner_cell = openmc.Cell(fill=hot_water, region=-fuel_or)
gt_clad_cell = openmc.Cell(fill=clad, region=+fuel_or & -clad_or)
gt_outer_cell = openmc.Cell(fill=hot_water, region=+clad_or)
univ = openmc.Universe(name='Guide Tube')
univ.add_cells([gt_inner_cell, gt_clad_cell, gt_outer_cell])
return univ | [
1700,
16237,
2818
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(fname):
hash_md5 = hashlib.METHOD_NAME()
with open(fname, "rb") as f:
buf = f.read()
hash_md5.update(buf)
_DATA[hash_md5.hexdigest()] = buf
return hash_md5.hexdigest() | [
4179
] |
def METHOD_NAME(self, pid, name):
"""Wait for the given process to terminate.
@return tuple of exit code and resource usage
"""
try:
logging.debug("Waiting for process %s with pid %s", name, pid)
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError as e:
if self.PROCESS_KILLED and e.errno == errno.EINTR:
# Interrupted system call seems always to happen
# if we killed the process ourselves after Ctrl+C was pressed
# We can try again to get exitcode and resource usage.
logging.debug(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
try:
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError:
pass # original error will be handled and this ignored
logging.critical(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
return 0, None | [
618,
43,
356
] |
def METHOD_NAME(self):
# (line, (raw, effective))
tests = (('no spaces', (0, 0)),
# Internal space isn't counted.
(' space test', (8, 8)),
('\ttab test', (1, 8)),
('\t\tdouble tabs test', (2, 16)),
# Different results when mixing tabs and spaces.
(' \tmixed test', (9, 16)),
(' \t mixed test', (9, 10)),
('\t mixed test', (9, 16)),
# Spaces not divisible by tabwidth.
(' \tmixed test', (3, 8)),
(' \t mixed test', (3, 9)),
('\t mixed test', (3, 10)),
# Only checks spaces and tabs.
('\nnewline test', (0, 0)))
for line, expected in tests:
with self.subTest(line=line):
self.assertEqual(
editor.get_line_indent(line, tabwidth=8),
expected,
) | [
9,
-1,
1629
] |
async def METHOD_NAME(hass: HomeAssistant, entity_reg: EntityRegistry) -> None:
await create_input_boolean(hass)
domain_config = {
CONF_ENABLE_AUTODISCOVERY: False,
CONF_CREATE_DOMAIN_GROUPS: [
input_boolean.DOMAIN,
light.DOMAIN, # No light entities were created, so this group should not be created
],
}
await run_powercalc_setup(
hass,
get_simple_fixed_config("input_boolean.test", 100),
domain_config,
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
group_state = hass.states.get("sensor.all_input_boolean_power")
assert group_state
assert group_state.attributes.get(ATTR_ENTITIES) == {"sensor.test_power"}
assert not hass.states.get("sensor.all_light_power")
entity_entry = entity_reg.async_get("sensor.all_input_boolean_power")
assert entity_entry
assert entity_entry.platform == "powercalc" | [
9,
1674,
861
] |
def METHOD_NAME(self):
model_id = 'damo/speech_dfsmn_ans_psm_48k_causal'
ans = pipeline(Tasks.acoustic_noise_suppression, model=model_id)
output_path = os.path.abspath('output.wav')
with open(os.path.join(os.getcwd(), NOISE_SPEECH_FILE_48K), 'rb') as f:
data = f.read()
ans(data, output_path=output_path)
print(f'Processed audio saved to {output_path}') | [
9,
14070,
8425,
321
] |
def METHOD_NAME(self, arg_dict):
res = []
command = None
for key, value in arg_dict.items():
if key == 'command':
command = value
else:
res.append(self.mk_cli_from_key_value(key, value))
return command, res | [
3010,
615,
280,
553
] |
def METHOD_NAME(
self, METHOD_NAME: Callable[[], None], rom: NintendoDSRom, config: Pmd2Data
) -> None:
raise NotImplementedError() | [
8798
] |
def METHOD_NAME(self) -> None:
self.flight.flight_plan.tot_offset = -self.flight.flight_plan.tot_offset
self.update_departure_time() | [
766,
2927,
1540
] |
f METHOD_NAME(values, *s, bound=500): | [
7213,
912
] |
def METHOD_NAME(self, action_args=None):
"""Execute ``ansible-playbook`` and returns a string.
:return: str
"""
if self._ansible_command is None:
self.bake()
if not self._playbook:
LOG.warning("Skipping, %s action has no playbook.", self._config.action)
return None
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings("default", category=MoleculeRuntimeWarning)
self._config.driver.sanity_checks()
cwd = self._config.scenario_path
result = util.run_command(
cmd=self._ansible_command,
env=self._env,
debug=self._config.debug,
cwd=cwd,
)
if result.returncode != 0:
from rich.markup import escape
util.sysexit_with_message(
f"Ansible return code was {result.returncode}, command was: [dim]{escape(shlex.join(result.args))}[/dim]",
result.returncode,
warns=warns,
)
return result.stdout | [
750
] |
def METHOD_NAME(test_data):
"""
Test for checking it nodes are authorised.
"""
exp_cmd = [
"pcs",
"host",
"auth",
"-u",
test_data.username,
"-p",
test_data.password,
]
exp_cmd.extend(test_data.nodes)
mock_cmd = MagicMock()
patch_salt = patch.dict(
pcs.__salt__,
{"cmd.run_all": mock_cmd, "pkg.version_cmp": MagicMock(return_value=1)},
)
with patch_salt:
pcs.is_auth(
test_data.nodes, pcsuser=test_data.username, pcspasswd=test_data.password
)
assert mock_cmd.call_args_list[0][0][0] == exp_cmd | [
9,
137,
2433
] |
def METHOD_NAME(kernel_debug, leaf_debug):
@cuda.jit(device=True, debug=leaf_debug, opt=False)
def f3(x):
return x * x
@cuda.jit(device=True)
def f2(x):
return f3(x) + 1
@cuda.jit(device=True)
def f1(x, y):
return x - f2(y)
@cuda.jit(debug=kernel_debug, opt=False)
def kernel(x, y):
f1(x, y)
kernel[1, 1](1, 2) | [
2756,
398,
5685
] |
def METHOD_NAME(self, wdg, ctx: cairo.Context, *args):
if self.surface:
wdg.set_size_request(self.surface.get_width(), self.surface.get_height())
ctx.fill()
ctx.set_source_surface(self.surface, 0, 0)
ctx.get_source().set_filter(cairo.Filter.NEAREST)
ctx.paint()
return True | [
1100
] |
def METHOD_NAME(s, colordb):
# function called on every color
def scan_color(s, colordb=colordb):
try:
r, g, b = colordb.find_byname(s)
except ColorDB.BadColor:
try:
r, g, b = ColorDB.rrggbb_to_triplet(s)
except ColorDB.BadColor:
return None, None, None
return r, g, b
#
# First try the passed in color
r, g, b = scan_color(s)
if r is None:
# try the same color with '#' prepended, since some shells require
# this to be escaped, which is a pain
r, g, b = scan_color('#' + s)
if r is None:
print 'Bad initial color, using gray50:', s
r, g, b = scan_color('gray50')
if r is None:
usage(1, 'Cannot find an initial color to use')
# does not return
return r, g, b | [
2471,
36
] |
def METHOD_NAME(self):
return self.num_heads | [
181,
3998,
9694
] |
def METHOD_NAME(self, node):
if node is None or node.left is None:
return node
return self.METHOD_NAME(node.left) | [
19,
1835,
99,
1716
] |
def METHOD_NAME(self):
"""
Make sure when the OAuth2Config for this backend is updated, the new config is properly grabbed
"""
original_provider_config = self.configure_identityServer3_provider(enabled=True, slug="original")
updated_provider_config = self.configure_identityServer3_provider(
slug="updated",
backend_name="identityServer3"
)
assert self.id3_instance.get_config() == updated_provider_config
assert self.id3_instance.get_config() != original_provider_config | [
9,
200,
1887,
4592
] |
f METHOD_NAME(self, text): | [
1276,
58,
2483
] |
def METHOD_NAME(encoded_seqs):
return encoded_seqs[..., ::-1, ::-1] | [
1354,
9343
] |
def METHOD_NAME(self) -> None:
self.port = find_free_port()
config = self.get_config()
config.update({
'port': self.port,
'address': 'localhost'
})
self.add_config(config)
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
er = self.get_extension_registry()
self.add_extension(er)
self.server = Server(framework=TornadoFramework(), config=config,
extension_registry=er)
tornado = threading.Thread(target=self.server.start)
tornado.daemon = True
tornado.start()
self.http = urllib3.PoolManager() | [
0,
1
] |
def METHOD_NAME(self, compare_lps):
self.TEST_REGISTER.add("constraints.resource_max")
model = build_test_model(
{},
"simple_supply_plus,resample_two_days,investment_costs",
)
custom_math = {
"constraints": {"my_constraint": model.math.constraints.resource_max}
}
compare_lps(model, custom_math, "resource_max") | [
9,
191,
232
] |
def METHOD_NAME(df_groupby, item):
try:
hash(item)
hashable = True
except TypeError:
hashable = False
if hashable and item in df_groupby.dtypes:
output_types = [OutputType.series_groupby]
elif isinstance(item, Iterable) and all(it in df_groupby.dtypes for it in item):
output_types = [OutputType.dataframe_groupby]
else:
raise NameError(f"Cannot slice groupby with {item!r}")
if df_groupby.selection:
raise IndexError(f"Column(s) {df_groupby.selection!r} already selected")
op = GroupByIndex(selection=item, output_types=output_types)
return op(df_groupby) | [
2057,
2834,
5181
] |
def METHOD_NAME(data, preds):
"""Merge predictions with BIO format during prediction."""
results = []
for sent, pred in zip(data, preds):
results.append(
[
{"entity_group": value[-1], "start": key, "end": value[0]}
for key, value in merge_spans(sent, pred).items()
]
)
return results | [
411,
3111,
275
] |
def METHOD_NAME(self):
result = yield self.resolver.resolve("google.com", 80, socket.AF_INET)
self.assertIn((socket.AF_INET, ("1.2.3.4", 80)), result)
result = yield self.resolver.resolve("google.com", 80, socket.AF_INET6)
self.assertIn(
(socket.AF_INET6, ("2a02:6b8:7c:40c:c51e:495f:e23a:3", 80, 0, 0)), result
) | [
9,
1014,
11519
] |
def METHOD_NAME(self, policy, key, value):
subpolicy = {key: value}
with pytest.raises(e.ParamError):
aerospike.client({"hosts": [("localhost", 3000)], "policies": {policy: subpolicy}}) | [
9,
532,
-1,
414,
119
] |
def METHOD_NAME():
tokens = tokenize_target("foo -opt1=v,a,l,u,e,1 --flag")
expected_tokens = ["foo", "-opt1=v,a,l,u,e,1", "--flag"]
assert len(tokens) == len(expected_tokens)
assert tokens == expected_tokens | [
9,
4022,
1030,
41,
6512
] |
async def METHOD_NAME(
block_document: schemas.actions.BlockDocumentCreate,
db: PrefectDBInterface = Depends(provide_database_interface), | [
129,
573,
352
] |
def METHOD_NAME(upstream_and_remote):
upstream_repo_path = upstream_and_remote[0]
_test_srpm_symlinking(upstream_repo_path, ".") | [
9,
13611,
-1,
1821,
157
] |
def METHOD_NAME(
self, environment: Environment, log: Logger, node: Node
) -> None:
node_context = get_node_context(node)
if node_context.os_disk_base_file_fmt == DiskImageFormat.QCOW2:
self.host_node.tools[QemuImg].convert(
"qcow2",
node_context.os_disk_base_file_path,
"raw",
node_context.os_disk_file_path,
)
else:
self.host_node.execute(
f"cp {node_context.os_disk_base_file_path}"
f" {node_context.os_disk_file_path}",
expected_exit_code=0,
expected_exit_code_failure_message="Failed to copy os disk image",
)
if node_context.os_disk_img_resize_gib:
self.host_node.tools[QemuImg].resize(
src_file=node_context.os_disk_file_path,
size_gib=node_context.os_disk_img_resize_gib,
) | [
129,
1716,
350,
113
] |
def METHOD_NAME(self):
return '\n'.join('{}: {}'.METHOD_NAME(*i) for i in self) | [
275
] |
def METHOD_NAME(self, x: int, y: int, width: int, height: int):
wb.wb_display_draw_rectangle(self._tag, int(x), int(y), int(width), int(height)) | [
1100,
5928
] |
def METHOD_NAME (self):
'''
Starts (Subscribes) the client.
'''
self.sub = self.node.create_subscription(Odometry, self.topic, self.__callback,10) | [
447
] |
def METHOD_NAME(self) -> str:
"""
The name of the certificate.
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(*args):
sys.stdout = sys.stderr
for msg in args: print msg
print __doc__
sys.exit(0) | [
558
] |
def METHOD_NAME(setupcfg: ConfigUpdater, opts: ScaffoldOpts):
setupcfg["options"].set("packages", "find_namespace:")
return setupcfg, opts | [
369,
416,
41,
416,
1194
] |
def METHOD_NAME(self, f):
assert callable(f)
return Processor(self, f, *self.args, **self.kw) | [
231
] |
def METHOD_NAME(features):
"""
Registering the `mark` feature, which uses the `MARK` Draft.js inline style type,
and is stored as HTML with a `<mark>` tag.
"""
feature_name = 'strikethrough'
type_ = 'STRIKETHROUGH'
tag = 'strikethrough'
# 2. Configure how Draftail handles the feature in its toolbar.
control = {
'type': type_,
'label': 's',
'description': 'Strikethrough',
# This isn’t even required – Draftail has predefined styles for MARK.
# 'style': {'textDecoration': 'line-through'},
}
# 3. Call register_editor_plugin to register the configuration for Draftail.
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
# 4.configure the content transform from the DB to the editor and back.
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
# 5. Call register_converter_rule to register the content transformation conversion.
features.register_converter_rule('contentstate', feature_name, db_conversion)
# 6. (optional) Add the feature to the default features list to make it available
# on rich text fields that do not specify an explicit 'features' list
features.default_features.append('strikethrough') | [
372,
11629,
964
] |
def METHOD_NAME(msg):
print(msg, file=sys.stderr)
sys.exit(1) | [
3172
] |
def METHOD_NAME(self):
"""Returns info needed to reconstruct the expression besides the args.
"""
return [self.A] | [
19,
365
] |
def METHOD_NAME(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5]) | [
9,
5455,
2378
] |
def METHOD_NAME(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self):
return False | [
8315
] |
def METHOD_NAME(self, img, metadata):
# The absolute radiometric correction follows this equation
# L = GAIN * DN * abscalfactor / effective bandwidth + OFFSET
# absCalFactor and effective Bandwidth are in the image metafile (IMD)
GAIN = [0.905, 0.940, 0.938, 0.962, 0.964, 1.0, 0.961, 0.978,
1.20, 1.227, 1.199, 1.196, 1.262, 1.314, 1.346, 1.376]
OFFSET = [-8.604, -5.809, -4.996, -3.646, -3.021, -4.521, -5.522,
-2.992, -5.546, -2.6, -2.309, -1.676, -0.705, -0.669,
-0.512, -0.372]
absCalFactor = metadata['absCalFactor']
effectiveBandwidth = metadata['effectiveBandwidth']
corrected_img = img.copy()
for i in range(img.shape[2]):
corrected_img[:, :, i] = GAIN[i] * img[:, :, i] * \
(float(absCalFactor[i]) /
float(effectiveBandwidth[i])) + OFFSET[i]
return corrected_img | [
4653,
-1,
2451
] |
def METHOD_NAME(self):
c = Client()
url = reverse("oidc_onboarding_schemas_jwt_client_assertion")
res = c.get(url)
self.assertTrue(res.status_code == 200)
self.assertIn("Schemas jwt", res.content.decode()) | [
9,
135,
340,
8499
] |
def METHOD_NAME():
ax = make_3d_axis(1.0)
try:
plot_ellipsoid(ax, wireframe=False)
assert len(ax.collections) == 1
finally:
ax.remove() | [
9,
1288,
5963
] |
def METHOD_NAME(self):
return "-".join([HyperShift.NODEPOOL_NAMESPACE, self.name]) | [
1194
] |
def METHOD_NAME(self):
service_connection = MockConnection()
self.contents = '0123456789'
bucket = MockBucket(service_connection, 'mybucket')
key = bucket.new_key('mykey')
key.set_contents_from_string(self.contents)
self.keyfile = KeyFile(key) | [
0,
1
] |
def METHOD_NAME(self):
return self._cam.image_height_pixels | [
19,
8885,
1877
] |
def METHOD_NAME(self, world: nimblephysics_libs._nimblephysics.simulation.World, forces: numpy.ndarray[numpy.float64, _Shape[m, n]], perfLog: nimblephysics_libs._nimblephysics.performance.PerformanceLog = None) -> numpy.ndarray[numpy.int32, _Shape[m, 1]]: ... | [
86,
41,
4057
] |
def METHOD_NAME(filepath: str, index: int):
context.inputs.set_values(img_path_node_id, [filepath, directory, index]) | [
1553
] |
def METHOD_NAME(self) -> None:
"""Is the rotation is identity for 1 atom?"""
a1 = self.a[:1]
b1 = self.b[:1]
if hasattr(self.r, "as_matrix"):
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_matrix(), atol=self.delta)
else:
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_dcm(), atol=self.delta) | [
9,
206,
4637
] |
def METHOD_NAME():
pm = ErtPluginManager(plugins=[dummy_plugins])
expected = {
"job1": {
"config_file": "/dummy/path/job1",
"source_package": "dummy",
"source_function_name": "installable_jobs",
"description": "job description",
"examples": "example 1 and example 2",
"category": "test.category.for.job",
},
"job2": {
"config_file": "/dummy/path/job2",
"source_package": "dummy",
"source_function_name": "installable_jobs",
},
}
assert pm.get_documentation_for_jobs() == expected | [
9,
202,
1200
] |
def METHOD_NAME(self):
return self.microarchitecture.METHOD_NAME | [
156
] |
def METHOD_NAME():
RebootActive.reboot_shutdown = False | [
4589,
537
] |
def METHOD_NAME(self, course, user):
default_choices = {}
lang = user.profile.language
for field in self.filters:
self.filters[field].field.label = u'<strong>{0}</strong>'.format(self.filters[field].field.label)
teacher_choices = []
for teacher in course.get_teachers():
teacher_choices.append((teacher.id, teacher.get_full_name()))
if teacher.id == user.id:
default_choices['responsible'] = [str(teacher.id)]
self.filters['responsible'].field.choices = tuple(teacher_choices)
self.filters['followers'].field.choices = tuple(teacher_choices)
students_choices = [(teacher.id, teacher.get_full_name()) for teacher in course.get_students()]
self.filters['students'].field.choices = tuple(students_choices)
tasks_all = Task.objects\
.filter(course=course)\
.exclude(type=Task.TYPE_MATERIAL)\
.distinct()
seminars = tasks_all.filter(type=Task.TYPE_SEMINAR)
task_choices = [(task.id, task.get_title(lang)) for task in seminars]
self.filters['seminars'].field.choices = tuple(task_choices)
tasks = tasks_all.exclude(type=Task.TYPE_SEMINAR)
task_choices = [(task.id, task.get_title(lang)) for task in tasks]
self.filters['task'].field.choices = tuple(task_choices)
status_choices = []
for status in course.issue_status_system.statuses.exclude(tag=IssueStatus.STATUS_SEMINAR):
status_choices.append((status.id, status.get_name(lang)))
if status.tag == Issue.STATUS_VERIFICATION and default_choices:
default_choices['status_field'] = [str(status.id)]
for status_id in sorted(IssueStatus.HIDDEN_STATUSES.values(), reverse=True):
status_field = IssueStatus.objects.get(pk=status_id)
status_choices.insert(0, (status_field.id, status_field.get_name(lang)))
self.filters['status_field'].field.choices = tuple(status_choices)
return default_choices | [
0,
1122
] |
def METHOD_NAME(weekday, name):
return "%s%s-%s" % (WEEKDAY_PREFIX, weekday, name) | [
426,
6235
] |
def METHOD_NAME():
asset = load_asset('PrimalEarth/CoreBlueprints/DinoColorSet_Baryonyx')
props = asset.default_export.properties
colors = parse_colors(props)
assert colors[0] and colors[0]['name'] == 'Body'
assert colors[0]['values'][0] == 'Dino Dark Orange' and colors[0]['values'][-1] == 'Black'
assert colors[1] and colors[1]['name'] == 'Top Fins'
assert colors[1]['values'][0] == 'Dino Light Yellow' and colors[1]['values'][-1] == 'Black'
assert colors[2] == EMPTY_COLORS
assert colors[3] == EMPTY_COLORS
assert colors[4] and colors[4]['name'] == 'Top Stripes'
assert colors[4]['values'][0] == 'Dino Dark Grey' and colors[4]['values'][-1] == 'Dark Grey'
assert colors[5] and colors[5]['name'] == 'Underbelly'
assert colors[5]['values'][0] == 'BigFoot0' and colors[5]['values'][-1] == 'Light Grey' | [
9,
14156,
36,
0,
-1
] |
def METHOD_NAME(cls, basetyp, baseptr, indices, strides, srcinfo):
assert strides[-1] == "1"
idxs = indices[:-1] or ""
if idxs:
idxs = "[" + "][".join(idxs) + "]"
return f"{baseptr}{idxs}" | [
1092
] |
def METHOD_NAME(self):
if self._sc is not None:
spatial_factory = SpatialRDDFactory(self._sc)
else:
raise TypeError("Please initialize spark Session first")
return spatial_factory.create_linestring_rdd() | [
7936,
6580,
9894
] |
def METHOD_NAME(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>') | [
9,
343,
1901
] |
def METHOD_NAME(self):
return self.fcoe | [
365,
245
] |
def METHOD_NAME(self):
if self.model_type == "LeNet":
return LeNet()
elif self.model_type == "resnet56":
return resnet56(10, pretrained=False, path=None)
elif self.model_type == "resnet20":
return resnet20(10)
else:
raise Exception(f"do not support this model: {self.model_type}") | [
19,
578
] |
def METHOD_NAME(self):
pass | [
69,
238
] |
def METHOD_NAME(self) -> Optional[float]:
"""
NSX revision number.
"""
return pulumi.get(self, "revision") | [
71
] |
def METHOD_NAME():
pass | [
559
] |
def METHOD_NAME(self, path, content_type, retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, if_generation_match=None):
assert path == self.PATH
assert content_type == "application/zip"
assert isinstance(retry, (Retry, ConditionalRetryPolicy))
assert isinstance(if_generation_match, (int, type(None))) | [
172,
280,
1147
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.