text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(
self, path: AbstractGeometricPath, gridpoints: np.ndarray, *args, **kwargs
):
if path.dof != self.dof:
raise ValueError(
"Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format(
self.dof, path.dof
)
)
ps_vec = (path(gridpoints, order=1)).reshape((-1, path.dof))
pss_vec = (path(gridpoints, order=2)).reshape((-1, path.dof))
dof = path.dof
F_single = np.zeros((dof * 2, dof))
g_single = np.zeros(dof * 2)
g_single[0:dof] = self.alim[:, 1]
g_single[dof:] = -self.alim[:, 0]
F_single[0:dof, :] = np.eye(dof)
F_single[dof:, :] = -np.eye(dof)
if self.discretization_type == DiscretizationType.Collocation:
return (
ps_vec,
pss_vec,
np.zeros_like(ps_vec),
F_single,
g_single,
None,
None,
)
elif self.discretization_type == DiscretizationType.Interpolation:
return canlinear_colloc_to_interpolate(
ps_vec,
pss_vec,
np.zeros_like(ps_vec),
F_single,
g_single,
None,
None,
gridpoints,
identical=True,
)
else:
raise NotImplementedError("Other form of discretization not supported!") | [
226,
1126,
434
] |
def METHOD_NAME(self):
src1_data = [1, 2, 3, 0x50005004, 0x11001150]
expected_result = [~1, ~2, ~3, ~0x50005004, ~0x11001150]
op = blocks.not_ii()
self.help_ii(((src1_data),),
expected_result, op) | [
9,
130,
2490
] |
def METHOD_NAME(value):
return packages.PackageRestriction(
attr,
values.ContainmentMatch(
values_kls(token_kls(piece.strip()) for piece in value.split(","))
),
) | [
214
] |
async def METHOD_NAME(
project: str,
secrets: List[str] = fastapi.Query(None, alias="secret"),
provider: mlrun.common.schemas.SecretProviderName = mlrun.common.schemas.SecretProviderName.kubernetes,
token: str = fastapi.Header(
None, alias=mlrun.common.schemas.HeaderNames.secret_store_token
),
auth_info: mlrun.common.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session), | [
245,
155,
2161
] |
def METHOD_NAME(self):
"""
Test that source-built clang release_80 version string is parsed
correctly.
"""
with open('clang_8_src_version_output',
encoding="utf-8", errors="ignore") as version_output:
version_string = version_output.read()
parser = version.ClangVersionInfoParser()
version_info = parser.parse(version_string)
self.assertIsNot(version_info, None)
self.assertEqual(version_info.major_version, 8)
self.assertEqual(version_info.minor_version, 0)
self.assertEqual(version_info.patch_version, 1)
self.assertEqual(version_info.installed_dir, '/path/to/clang/bin') | [
9,
4737,
280,
1458,
4040,
1629
] |
def METHOD_NAME(self, id: str):
self.pool.discard(id) | [
14202
] |
async def METHOD_NAME(
page: Optional[int] = None,
page_size: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Fetch many clans from the database."""
query = f"""\
SELECT {READ_PARAMS}
FROM clans
"""
params = {}
if page is not None and page_size is not None:
query += """\
LIMIT :limit
OFFSET :offset
"""
params["limit"] = page_size
params["offset"] = (page - 1) * page_size
recs = await app.state.services.database.fetch_all(query, params)
return [dict(rec) for rec in recs] | [
1047,
1401
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(servicebus_management_client):
queues = list(servicebus_management_client.list_queues())
for queue in queues:
try:
servicebus_management_client.delete_queue(queue.name)
except:
pass | [
537,
6138
] |
def METHOD_NAME(self):
cols = [
byte_col(name="Byte", data=()),
char_col(name="Char", data=''),
short_col(name="Short", data=[]),
int_col(name="Int", data=[]),
long_col(name="Long", data=[]),
long_col(name="NPLong", data=np.array([], dtype=np.int8)),
float_col(name="Float", data=[]),
double_col(name="Double", data=[]),
string_col(name="String", data=[]),
datetime_col(name="Datetime", data=[]),
]
dh_table = new_table(cols=cols)
pa_table = dharrow.to_arrow(dh_table)
dh_table_rt = dharrow.to_table(pa_table)
self.assertEqual(dh_table_rt.size, 0)
self.assert_table_equals(dh_table, dh_table_rt) | [
9,
3834,
3835,
35
] |
def METHOD_NAME(self, n1SpoolUp, n1SpoolDown, n2SpoolUp, n2SpoolDown):
while self.fdm.run():
n1 = self.fdm['propulsion/engine/n1']
n2 = self.fdm['propulsion/engine/n2']
N2norm = (n2-self.idleN2)/self.N2_factor;
if n2 >= 100.:
# Trigger the engine spool down
self.fdm['fcs/throttle-cmd-norm'] = 0.0
if N2norm > 0.0:
self.assertAlmostEqual(n1, newN1)
self.assertAlmostEqual(n2, newN2)
if n2 > 15.0:
sigma = self.fdm['atmosphere/sigma']
n = min(1.0, N2norm + 0.1)
spoolup = self.delay / (1 + 3 * (1-n)*(1-n)*(1-n) + (1 - sigma))
throttlePos = self.fdm['fcs/throttle-cmd-norm']
targetN1 = self.idleN1+throttlePos*self.N1_factor
targetN2 = self.idleN2+throttlePos*self.N2_factor
newN1 = seek(n1, targetN1, n1SpoolUp(N2norm),
n1SpoolDown(N2norm))
newN2 = seek(n2, targetN2, n2SpoolUp(N2norm),
n2SpoolDown(N2norm))
if N2norm == 0.0 and self.fdm['fcs/throttle-cmd-norm'] == 0.0:
break | [
22,
782
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(first, second, *rest):
if not rest:
return urljoin(first, second)
return urljoin(urljoin(first, second), *rest) | [
2831
] |
def METHOD_NAME(self, v):
self._pack_into(ctypes.sizeof(ctypes.c_size_t) , "@N", v) | [
77,
2629,
1318,
791
] |
def METHOD_NAME(rank):
mp_size = gpc.get_world_size(ParallelMode.MODEL)
rank_within_mp_group = rank % mp_size
mp_local_rank = gpc.get_local_rank(ParallelMode.MODEL)
assert rank_within_mp_group == mp_local_rank | [
250,
578,
1498,
1499
] |
def METHOD_NAME():
image_root_url = ADDON.getSettingString('originalUrl')
preview_root_url = ADDON.getSettingString('previewUrl')
last_updated = ADDON.getSettingString('lastUpdated')
if not image_root_url or not preview_root_url or not last_updated or \
float(last_updated) < _get_date_numeric(datetime.now() - timedelta(days=30)):
conf = _get_configuration()
if conf:
image_root_url = conf['images']['secure_base_url'] + 'original'
preview_root_url = conf['images']['secure_base_url'] + 'w780'
ADDON.setSetting('originalUrl', image_root_url)
ADDON.setSetting('previewUrl', preview_root_url)
ADDON.setSetting('lastUpdated', str(
_get_date_numeric(datetime.now())))
return image_root_url, preview_root_url | [
557,
414,
2248
] |
def METHOD_NAME(
size_mb,
interface = "virtio-blk",
physical_block_size = 512,
logical_block_size = 512,
contains_kernel = None,
serial = None):
return disk_t(
package = "//antlir:empty",
additional_scratch_mb = size_mb,
interface = interface,
subvol = None,
physical_block_size = physical_block_size,
logical_block_size = logical_block_size,
contains_kernel = contains_kernel,
serial = serial,
) | [
80,
944,
7884,
113
] |
def METHOD_NAME(self):
"""
Context manager to create a tmp user, and automatically delete it after use
with self.get_tmp_user() as user:
...
"""
user = self.create_user()
try:
yield user
finally:
self.remove_user(user.user_name) | [
19,
4136,
21
] |
def METHOD_NAME(key, value):
if '.' in key and key.split(".")[0] == api_field:
prefix, qualifier = key.split(".", maxsplit=1)
try:
return date_filter(qualifier, value)
except ValueError as e:
raise InvalidFilterError(str(e))
return {} | [
153,
3553
] |
def METHOD_NAME(self, key, cert, domain, password: Incomplete | None = None) -> None: ... | [
238,
1548
] |
def METHOD_NAME(self):
"""LC_ALL,LC_COLLATE,LC_CTYPE,LC_MONETARY,LC_NUMERIC,LC_TIME,LC_CHAR_MAX"""
self.assertEqual(0,_locale.LC_ALL)
self.assertEqual(1,_locale.LC_COLLATE)
self.assertEqual(2,_locale.LC_CTYPE)
self.assertEqual(3,_locale.LC_MONETARY)
self.assertEqual(4,_locale.LC_NUMERIC)
self.assertEqual(5,_locale.LC_TIME)
self.assertEqual(127,_locale.CHAR_MAX) | [
9,
779,
253
] |
def METHOD_NAME(psy):
''' Transform a specific Schedule by making all loops
over vertical levels OpenMP parallel.
:param psy: the object holding all information on the PSy layer \
to be modified.
:type psy: :py:class:`psyclone.psyGen.PSy`
:returns: the transformed PSy object
:rtype: :py:class:`psyclone.psyGen.PSy`
'''
# Get the Schedule of the target routine
sched = psy.invokes.get('tra_adv').schedule
loops = [loop for loop in sched.walk(Loop) if loop.loop_type == "levels"]
idx = 0
# Loop over each of these loops over levels to see which neighbour each
# other in the Schedule and thus can be put in a single parallel region.
while idx < len(loops):
child = loops[idx]
posn = child.parent.children.index(child)
loop_list = [child]
current = idx + 1
# Look at the children of the parent of the current node, starting
# from the immediate sibling of the current node
for sibling in child.parent.children[posn+1:]:
# Is this immediate sibling also in our list of loops?
if current < len(loops) and sibling is loops[current]:
# It is so add it to the list and move on to the next sibling
loop_list.append(sibling)
current += 1
else:
# It's not so that's the end of the list of nodes that we
# can enclose in a single parallel region
break
idx = current
try:
OMP_PARALLEL_TRANS.apply(loop_list)
for loop in loop_list:
OMP_LOOP_TRANS.apply(loop)
except TransformationError:
pass
directives = sched.walk(Directive)
print(f"Added {len(directives)} Directives")
# Display the transformed PSyIR
print(sched.view())
# Return the modified psy object
return psy | [
4185
] |
def METHOD_NAME(api_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
schema_id: Optional[str] = None,
service_name: Optional[str] = None,
workspace_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceApiSchemaResult:
"""
Get the schema configuration at the API level.
Azure REST API version: 2022-09-01-preview.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str schema_id: Schema id identifier. Must be unique in the current API Management service instance.
:param str service_name: The name of the API Management service.
:param str workspace_id: Workspace identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['resourceGroupName'] = resource_group_name
__args__['schemaId'] = schema_id
__args__['serviceName'] = service_name
__args__['workspaceId'] = workspace_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement:getWorkspaceApiSchema', __args__, opts=opts, typ=GetWorkspaceApiSchemaResult).value
return AwaitableGetWorkspaceApiSchemaResult(
components=pulumi.get(__ret__, 'components'),
content_type=pulumi.get(__ret__, 'content_type'),
definitions=pulumi.get(__ret__, 'definitions'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'),
value=pulumi.get(__ret__, 'value')) | [
19,
1976,
58,
135
] |
def METHOD_NAME(self):
"""
Handles logic for converting pydantic classes into valid argument strings.
This should set arg standards for all integrations and should generally not
be overridden. If the need to override comes up, consider updating this method
instead.
:return: string of the full CLI command
"""
args_dict = self.run_args.dict()
args_string_list = []
for key, value in args_dict.items():
key = "--" + key
key = key.replace("_", "-") if self.dashed_keywords else key
# Handles bool type args (e.g. --do-train)
if isinstance(value, bool):
if value:
args_string_list.append(key)
elif isinstance(value, List):
if len(value) < 2:
raise ValueError(
"List arguments must have more one entry. "
f"Received {key}:{value}"
)
# Handles args that are both bool and value based (see evolve in yolov5)
if isinstance(value[0], bool):
if value[0]:
args_string_list.extend([key, str(value[1])])
# Handles args that have multiple values after the keyword.
# e.g. --freeze-layers 0 10 15
else:
args_string_list.append(key)
args_string_list.extend(map(str, value))
# Handles the most straightforward case of keyword followed by value
# e.g. --epochs 30
else:
if value is None:
continue
args_string_list.extend([key, str(value)])
pre_args = self.pre_args.split(" ") if self.pre_args else []
return pre_args + [self.command_stub] + args_string_list | [
129,
462,
782
] |
def METHOD_NAME(tv): return \ | [
14263,
741,
1482
] |
def METHOD_NAME(exception_handler=None, # pylint: disable=too-many-arguments
formatters=None, filters=None, handlers=None, loggers=None, config=None, variables=None):
urllib3.disable_warnings()
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
if exception_handler:
sys.excepthook = exception_handler
if formatters is None:
formatters = {
'default': {
'()': MultilineMessagesFormatter,
'format': '< t:%(asctime)s f:%(filename)-15s l:%(lineno)-4s c:%(name)-20s p:%(levelname)-5s > %(message)s'
},
}
if filters is None:
filters = {
'filter_remote': {
'()': FilterRemote
}
}
if handlers is None:
handlers = {
'console': {
'level': 'INFO',
'formatter': 'default',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
'filters': ['filter_remote']
},
'outfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '{log_dir}/sct.log',
'mode': 'a',
'formatter': 'default',
},
'argus': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '{log_dir}/argus.log',
'mode': 'a',
'formatter': 'default',
}
}
if loggers is None:
loggers = {
'': { # root logger
'handlers': ['console', 'outfile'],
'level': 'DEBUG',
'propagate': True
},
'botocore': {
'level': 'CRITICAL'
},
'boto3': {
'level': 'CRITICAL'
},
's3transfer': {
'level': 'CRITICAL'
},
'multiprocessing': {
'level': 'DEBUG',
'propagate': True,
},
'paramiko.transport': {
'level': 'CRITICAL'
},
'cassandra.connection': {
'level': 'INFO'
},
'invoke': {
'level': 'CRITICAL'
},
'anyconfig': {
'level': 'ERROR'
},
'urllib3.connectionpool': {
'level': 'INFO'
},
'selenium.webdriver.remote.remote_connection': {
'level': 'INFO'
},
'argus': {
'handlers': ['argus'],
'level': 'DEBUG',
'propagate': False
},
'sdcm.argus_test_run': {
'handlers': ['argus'],
'level': 'DEBUG',
'propagate': False
},
}
if config is None:
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': formatters,
'filters': filters,
'handlers': handlers,
'loggers': loggers
}
logging.config.dictConfig(replace_vars(config, variables)) | [
111,
663
] |
def METHOD_NAME(ee_enabled, tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
"""Confirm a invalid ansible.cfg raises errors using configurator.
:param ee_enabled: Indicate if EE support is enabled
:param tmp_path: The path to a test temporary directory
:param monkeypatch: The monkeypatch fixture
"""
cfg_path = tmp_path / "ansible.cfg"
with cfg_path.open(mode="w") as fh:
fh.write(ANSIBLE_CFG_INVALID)
monkeypatch.chdir(tmp_path)
application_configuration = deepcopy(NavigatorConfiguration)
application_configuration.internals.initializing = True
configurator = Configurator(
params=["--ee", str(ee_enabled)],
application_configuration=application_configuration,
)
_messages, exit_messages = configurator.configure()
assert application_configuration.internals.ansible_configuration.contents is Constants.NONE
assert application_configuration.internals.ansible_configuration.path is Constants.NONE
assert application_configuration.internals.ansible_configuration.text is Constants.NONE
assert "12345" in exit_messages[3].message | [
9,
532,
4481
] |
def METHOD_NAME(self):
"""Returns index of the coordinate set."""
acsi = self._acsi
if acsi >= self._ag._n_csets:
raise ValueError('{0} has fewer coordsets than assumed by {1}'
.format(str(self._ag), str(self)))
return acsi | [
19,
7697,
724
] |
def METHOD_NAME(self, column_data: Iterable[str]) -> torch.Tensor:
"""
Encodes pre-processed data into OHE. Unknown/unrecognized classes vector of all 0s.
:param column_data: Pre-processed data to encode
:returns: Encoded data of form :math:`N_{rows} x N_{categories}`
""" # noqa
if not self.is_prepared:
raise Exception(
'You need to call "prepare" before calling "encode" or "decode".'
)
ret = torch.zeros(size=(len(column_data), self.output_size))
for idx, word in enumerate(column_data):
index = self.map.get(word, None)
if index is not None:
ret[idx, index] = 1
if self.use_unknown and index is None:
ret[idx, 0] = 1
return torch.Tensor(ret) | [
421
] |
def METHOD_NAME(tag):
return '{http://www.gtk.org/introspection/core/1.0}%s' % tag | [
1542,
3619
] |
def METHOD_NAME(self):
self.cpp_info.libs = ["metis"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
self.cpp_info.defines.append("LINUX")
elif self.settings.os == "Windows":
self.cpp_info.defines.append("WIN32")
self.cpp_info.defines.append("MSC")
self.cpp_info.defines.append("_CRT_SECURE_NO_DEPRECATE")
elif self.settings.os == "Macos":
self.cpp_info.defines.append("MACOS")
elif self.settings.os == "SunOS":
self.cpp_info.defines.append("SUNOS")
if is_msvc(self):
self.cpp_info.defines.append("__thread=__declspec(thread)")
bits = 64 if self.options.with_64bit_types else 32
self.cpp_info.defines.append(f"IDXTYPEWIDTH={bits}")
self.cpp_info.defines.append(f"REALTYPEWIDTH={bits}")
# Defines for GKLib headers
if self.settings.os == "Windows" or self.options.enable_gkregex:
self.cpp_info.defines.append("USE_GKREGEX")
if self.options.enable_gkrand:
self.cpp_info.defines.append("USE_GKRAND")
if self.options.with_pcre:
self.cpp_info.defines.append("__WITHPCRE__")
if self.options.with_openmp:
self.cpp_info.defines.append("__OPENMP__") | [
360,
100
] |
def METHOD_NAME(foldername):
# force and velocity values go from 0 to num_windmills - 1
num_windmills = 2
values_name = 'values_'
vels_name = 'targetvelocity_'
rew_name = 'rewards_'
# contains link to all the sample folders in the test results
num_trials = 64
sample = 'sample000000'
folders = [path_to_test + foldername + "/" + sample + "{:02d}".format(trial) + '/' for trial in range(num_trials)]
# shape num_trials x num_windmills
# file names
value_files = [ [folder_ + values_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
vel_files = [ [folder_ + vels_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
rew_files = [ [folder_ + rew_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
dat = np.genfromtxt(value_files[0][0], delimiter=' ')
num_steps = dat.shape[0]
print(num_steps)
num_el = dat.shape[1]
# arrays
values = np.zeros((num_trials, num_windmills, num_steps, num_el))
vels = np.zeros((num_trials, num_steps, 2))
rewards = np.zeros((num_trials, num_windmills, num_steps, 3))
# factor to non dimensionalize the torque
u = 0.15
a = 0.0405
for trial in range(num_trials):
for mill in range(num_windmills):
data = np.genfromtxt(value_files[trial][mill], delimiter=' ')
# nondimensionalize the time
data[:, 0] *= u / a
# nondimensionalize the torque
data[:, 1] /= (u**2 * a**2)
# nondimensionalize the angular velocity
data[:, 3] *= a / u
print(data.shape)
values[trial, mill, :, : ] = data
reward = np.genfromtxt(rew_files[trial][mill], delimiter=' ')
# nondimensionalize the time
reward[:, 0] *= u / a
rewards[trial, mill, :, :] = reward
velo = np.genfromtxt(vel_files[trial][0], delimiter=' ')
# nondimensionalize the time
velo[:, 0] *= u / a
vels[trial, :, :] = velo
# compute the interesting values
# time, action, state (std and mean) for both windmills
# fan 1
tau_mean_0 = np.mean(values[:,0,:, 1], axis=0)
tau_std_0 = np.std(values[:,0,:, 1], axis=0)
ang_mean_0 = np.mean(values[:,0, :, 2], axis=0)
ang_std_0 = np.std(values[:,0, :, 2], axis=0)
ang_vel_mean_0 = np.mean(values[:,0, :, 3], axis=0)
ang_vel_std_0 = np.std(values[:,0, :, 3], axis=0)
# fan 2
tau_mean_1 = np.mean(values[:,1,:, 1], axis=0)
tau_std_1 = np.std(values[:,1,:, 1], axis=0)
ang_mean_1 = np.mean(values[:,1, :, 2], axis=0)
ang_std_1 = np.std(values[:,1, :, 2], axis=0)
ang_vel_mean_1 = np.mean(values[:,1, :, 3], axis=0)
ang_vel_std_1 = np.std(values[:,1, :, 3], axis=0)
# first element is time
out = np.stack( (values[0, 0, :, 0], tau_mean_0, tau_std_0, tau_mean_1, tau_std_1,
ang_mean_0, ang_std_0, ang_mean_1, ang_std_1,
ang_vel_mean_0, ang_vel_std_0, ang_vel_mean_1, ang_vel_std_1), axis=1)
np.save(output + foldername + "_values.npy", out)
# velocity at target point vs time (mean and std)
vels_mean = np.mean(vels, axis=0)
vels_std = np.std(vels, axis=0)
out2 = np.stack( (vels_mean[:, 0], vels_mean[:, 1], vels_std[:, 1]), axis=1)
np.save(output + foldername + "_vels.npy", out2)
# rewards vs time for the two fans
en_mean_0 = np.mean(rewards[:,0,:, 1], axis=0)
en_std_0 = np.std(rewards[:,0,:, 1], axis=0)
flow_mean_0 = np.mean(rewards[:,0, :, 2], axis=0)
flow_std_0 = np.std(rewards[:,0, :, 2], axis=0)
en_mean_1 = np.mean(rewards[:,1,:, 1], axis=0)
en_std_1 = np.std(rewards[:,1,:, 1], axis=0)
flow_mean_1 = np.mean(rewards[:,1, :, 2], axis=0)
flow_std_1 = np.std(rewards[:,1, :, 2], axis=0)
out3 = np.stack( (rewards[0, 0, :, 0], en_mean_0, en_std_0, en_mean_1, en_std_1,
flow_mean_0, flow_std_0, flow_mean_1, flow_std_1), axis=1)
np.save(output + foldername + "_rews.npy", out3) | [
370,
51
] |
def METHOD_NAME():
attempts = ATTEMPTS
interval = 60
while attempts:
attempts -= 1
(status, elapsed_dev) = init_ipmi_dev_intf()
if status is not True:
sleep(interval - elapsed_dev)
continue
(status, elapsed_oem) = init_ipmi_oem_cmd()
if status is not True:
sleep(interval - elapsed_dev - elapsed_oem)
continue
print('IPMI dev interface is ready.')
return True
print('Failed to initialize IPMI dev interface')
return False | [
176,
14497
] |
METHOD_NAME(field, size): | [
12126,
101,
19,
365,
4347
] |
def METHOD_NAME(self, X, y=None):
"""Fits imputer to data. 'None' values are converted to np.nan before imputation and are treated as the same.
Args:
X (pd.DataFrame or np.ndarray): the input training data of shape [n_samples, n_features]
y (pd.Series, optional): the target training data of length [n_samples]
Returns:
self
Raises:
ValueError: if the KNNImputer receives a dataframe with both Boolean and Categorical data.
"""
X = infer_feature_types(X)
nan_ratio = X.isna().sum() / X.shape[0]
# Keep track of the different types of data in X
self._all_null_cols = nan_ratio[nan_ratio == 1].index.tolist()
self._natural_language_cols = list(
X.ww.select("NaturalLanguage", return_schema=True).columns.keys(),
)
# Only impute data that is not natural language columns or fully null
self._cols_to_impute = [
col
for col in X.columns
if col not in self._natural_language_cols and col not in self._all_null_cols
]
# If the Dataframe only had natural language columns, do nothing.
if not self._cols_to_impute:
return self
self._component_obj.METHOD_NAME(X[self._cols_to_impute], y)
return self | [
90
] |
def METHOD_NAME(translation_a):
# Create new instance
translation = Translation.objects.get(pk=translation_a.pk)
translation.pk = None
resource = translation.entity.resource
resource.path = "test.po"
resource.format = Resource.Format.PO
resource.save()
translation.string = ""
translation.save()
yield translation | [
2518,
-1,
168
] |
def METHOD_NAME(
alert_public_api_setup,
make_user_for_organization,
make_public_api_token,
make_alert_group,
make_alert,
):
organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group, alert_raw_request_data)
admin = make_user_for_organization(organization)
_, token = make_public_api_token(admin, organization)
client = APIClient()
url = reverse("api-public:alerts-list")
response = client.get(url + "?search=impossible payload", format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["count"] == 0 | [
9,
5468,
1070,
41,
654,
51
] |
def METHOD_NAME():
"""
Test if it checks the md5sum between the given timezone, and
the one set in /etc/localtime. Returns True if they match,
and False if not. Mostly useful for running state checks.
"""
mock_read = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read}):
assert win_timezone.zone_compare("Asia/Calcutta") | [
9,
2456,
979
] |
def METHOD_NAME(self):
import platform
if platform.system() == 'Darwin':
from MAVProxy.modules.lib.MacOS import backend_wxagg
FigCanvas = backend_wxagg.FigureCanvasWxAgg
else:
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.close_button = wx.Button(self.panel, -1, "Close")
self.Bind(wx.EVT_BUTTON, self.on_close_button, self.close_button)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.close_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(1)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self) | [
129,
57,
519
] |
def METHOD_NAME(f, internal_name: str, func, display_name: str, default_option: str, outer=False):
color_options = func()
if outer:
color_options.append("Match Inner")
format_color = lambda color: color.replace(' ', '_').lower()
color_to_id = {format_color(color): index for index, color in enumerate(color_options)}
docstring = 'Choose a color. "random_choice" selects a random option. "completely_random" generates a random hex code.'
if outer:
docstring += ' "match_inner" copies the inner color for this option.'
f.write(f"class {internal_name}(Choice):\n")
f.write(f" \"\"\"{docstring}\"\"\"\n")
f.write(f" display_name = \"{display_name}\"\n")
for color, id in color_to_id.items():
f.write(f" option_{color} = {id}\n")
f.write(f" default = {color_options.index(default_option)}")
f.write(f"\n\n\n") | [
1893,
36,
1335
] |
def METHOD_NAME(request, manager_nospawn):
"""
Fixture provides a manager instance with StatusNotifier in the bar.
Widget can be customised via parameterize.
"""
class SNIConfig(libqtile.confreader.Config):
"""Config for the test."""
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
]
layouts = [libqtile.layout.Max()]
floating_layout = libqtile.resources.default_config.floating_layout
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[libqtile.widget.StatusNotifier(**getattr(request, "param", dict()))],
50,
),
)
]
yield SNIConfig | [
11978,
200
] |
def METHOD_NAME(self):
with patch("cm.models.os.kill") as kill_mock:
response: Response = self.client.post(
path=reverse(viewname="v2:joblog-terminate", kwargs={"pk": self.job_2.pk}), data={}
)
kill_mock.assert_called()
self.assertEqual(response.status_code, HTTP_200_OK) | [
9,
202,
1602,
1434
] |
def METHOD_NAME(self, path):
"""Extract the generated event tracking context for a given request for the given path."""
request = self.request_factory.get(path)
return self.get_context_for_request(request) | [
19,
198,
43,
157
] |
def METHOD_NAME(self):
res = users.check_passwd("cancer", self.test_sha)
self.assertEqual(res, True) | [
9,
250,
8052
] |
def METHOD_NAME(A, x, b, optimize):
"""``Ax = b``."""
op = Operator(Inc(b, A*x), opt=optimize)
op.apply()
info('Executed `Ax = b`') | [
2323,
3203
] |
def METHOD_NAME():
with dask.config.set({"distributed.diagnostics.nvml": False}):
nvml.init_once()
assert not nvml.is_initialized()
assert nvml.NVML_STATE == nvml.NVMLState.DISABLED_CONFIG
# Idempotent (once we've decided not to turn things on with
# configuration, it's set in stone)
nvml.init_once()
assert not nvml.is_initialized()
assert nvml.NVML_STATE == nvml.NVMLState.DISABLED_CONFIG | [
9,
1317,
193,
6298
] |
def METHOD_NAME(self):
self.rdlc.uninstall() | [
2112,
-1
] |
def METHOD_NAME():
failed_count, tests = doctest.testmod(
lscpu,
globs={'output': lscpu.LsCPU(context_wrap(LSCPU_1))}
)
assert failed_count == 0 | [
9,
1200
] |
f METHOD_NAME(self): | [
9,
15208,
1085
] |
def METHOD_NAME(self, tensor_mode=True):
"""
Patching several inference functions inside ROIHeads and its subclasses
Args:
tensor_mode (bool): whether the inputs/outputs are caffe2's tensor
format or not. Default to True.
"""
# NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference`
# are called inside the same file as BaseXxxHead due to using mock.patch.
kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
mock_ctx_managers = [
mock_fastrcnn_outputs_inference(
tensor_mode=tensor_mode,
check=True,
box_predictor_type=type(self.heads.box_predictor),
)
]
if getattr(self.heads, "keypoint_on", False):
mock_ctx_managers += [
mock_keypoint_rcnn_inference(
tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint
)
]
if getattr(self.heads, "mask_on", False):
mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
with contextlib.ExitStack() as stack: # python 3.3+
for mgr in mock_ctx_managers:
stack.enter_context(mgr)
yield | [
248,
65,
9694
] |
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsNodalToGlobal
"""
return super().METHOD_NAME | [
1461
] |
def METHOD_NAME(self, src_event, role):
if "Name" in src_event:
self.Name = src_event["Name"]
if role == "subject" and "SubjectUserName" in src_event:
self.Name = src_event["SubjectUserName"]
self.NTDomain = (
src_event["SubjectUserDomain"]
if "SubjectUserDomain" in src_event
else None
)
self.Sid = (
src_event["SubjectUserSid"] if "SubjectUserSid" in src_event else None
)
self.LogonId = (
src_event["SubjectLogonId"] if "SubjectLogonId" in src_event else None
)
if role == "target" and "TargetUserName" in src_event:
self.Name = src_event["TargetUserName"]
self.NTDomain = (
src_event["TargetUserDomain"]
if "TargetUserDomain" in src_event
else None
)
self.Sid = (
src_event["TargetUserSid"] if "TargetUserSid" in src_event else None
)
self.LogonId = (
src_event["TargetLogonId"] if "TargetLogonId" in src_event else None
)
self.AadTenantId = (
src_event["AadTenantId"] if "AadTenantId" in src_event else None
)
self.Sid = src_event["Sid"] if "Sid" in src_event else None
self.NTDomain = src_event["NtDomain"] if "NtDomain" in src_event else None
self.AadUserId = src_event["AadUserId"] if "AadUserId" in src_event else None
self.PUID = src_event["PUID"] if "PUID" in src_event else None
if "DisplayName" in src_event:
self.DisplayName = src_event["DisplayName"]
elif "AccountName" in src_event:
self.DisplayName = src_event["AccountName"]
else:
self.DisplayName = None
if "UPNSuffix" in src_event:
self.UPNSuffix = src_event["UPNSuffix"]
elif "UpnSuffix" in src_event:
self.UPNSuffix = src_event["UpnSuffix"]
else:
self.UPNSuffix = None | [
129,
280,
417
] |
def METHOD_NAME(T : RigidTransform) -> RigidTransform:
"""Returns the inverse of the transformation."""
(R,t) = T
Rinv = so3.METHOD_NAME(R)
tinv = [-Rinv[0]*t[0]-Rinv[3]*t[1]-Rinv[6]*t[2],
-Rinv[1]*t[0]-Rinv[4]*t[1]-Rinv[7]*t[2],
-Rinv[2]*t[0]-Rinv[5]*t[1]-Rinv[8]*t[2]]
return (Rinv,tinv) | [
5862
] |
def METHOD_NAME(self, response):
self.assertRedirect(response, "govdelivery:user_error") | [
638,
1736,
21,
168
] |
def METHOD_NAME(md):
"""
Convert given markdown to html.
:param md: string
:return: string - converted html
"""
return markdown(md) | [
108,
24,
382
] |
def METHOD_NAME():
"""py.test for gettables"""
thedata = (
(
[
("Site and Source Energy", [["a", "2"], ["3", "4"]]),
("Site to Source Energy Conversion Factors", [["b", "6"], ["7", "8"]]),
("Custom Monthly Report", [["c", "16"], ["17", "18"]]),
("Custom Monthly Report", [["d", "26"], ["27", "28"]]),
],
False,
), # titlerows, tofloat
(
[
("Site and Source Energy", [["a", 2], [3, 4]]),
("Site to Source Energy Conversion Factors", [["b", 6], [7, 8]]),
("Custom Monthly Report", [["c", 16], [17, 18]]),
("Custom Monthly Report", [["d", 26], [27, 28]]),
],
True,
), # titlerows, tofloat
)
for titlerows, tofloat in thedata:
result = readhtml.titletable(SAMPLE_HTML, tofloat=tofloat)
for (title1, rows1), (title2, rows2) in zip(result, titlerows):
assert title1 == title2
assert rows1 == rows2
assert result == titlerows | [
9,
-1
] |
def METHOD_NAME(outs):
"""Schedule for multibox_transform_loc
Parameters
----------
outs: Array of Tensor
The computation graph description of
multibox_transform_loc in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False) | [
507,
6266,
1053,
209
] |
def METHOD_NAME(df):
df = df[df.x>4]
assert df.x.tolist() == list(range(5,10))
df.add_variable("production", True)
df = df.filter("production", mode="or")
df = df[df.x>=0] # restore old filter (df_filtered)
df = df[df.x<10] # restore old filter (df_filtered)
assert df.x.tolist() == list(range(10)) | [
9,
527,
201,
1997,
1210
] |
def METHOD_NAME(context: Context) -> None: # noqa: C901
"""Create shortcuts to send requests to the server API."""
timeout = 20 # Make timeout long enough to not time out when generating the PDF.
def cookies() -> dict[str, str]:
"""Return the cookies."""
return {"session_id": context.session_id} if context.session_id else {}
def api_url(api: str) -> str:
"""Return the API URL."""
return f"{context.base_api_url}/{api}"
def get(api: str, headers: dict[str, str] | None = None) -> requests.Response | dict | list:
"""Get the resource."""
url = api_url(api)
for attribute in ("report_date", "min_report_date"):
if value := getattr(context, attribute):
sep = "&" if "?" in url else "?"
url += f"{sep}{attribute}={value}"
context.response = response = requests.get(url, headers=headers, cookies=cookies(), timeout=timeout)
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def post(api: str, json: dict | list | None = None) -> requests.Response | dict | list:
"""Post the resource."""
url = api_url(api)
response = requests.post(url, json=json, cookies=cookies(), timeout=timeout)
context.post_response = context.response = response
if not response.ok:
return response
if "session_id" in response.cookies:
context.session_id = response.cookies["session_id"]
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def put(api: str, json: dict | list | None = None) -> requests.Response | dict | list:
"""Post the resource."""
url = api_url(api)
response = requests.put(url, json=json, cookies=cookies(), timeout=timeout)
context.put_response = context.response = response
# Ignore non-ok responses for now since we don't have testcases where they apply
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def delete(api: str) -> requests.Response | dict | list:
"""Delete the resource."""
context.response = response = requests.delete(api_url(api), cookies=cookies(), timeout=timeout)
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
context.base_api_url = "http://localhost:5001/api/v3"
context.database = pymongo.MongoClient("mongodb://root:root@localhost:27017")["quality_time_db"]
context.session_id = None
context.report_date = None
context.min_report_date = None
context.response = None # Most recent respone
context.post_response = None # Most recent post response
# Create a typed local variable to prevent mypy error: Type cannot be declared in assignment to non-self attribute
uuid: dict[str, str] = {}
context.uuid = uuid # Keep track of the most recent uuid per item type
context.get = get
context.post = post
context.put = put
context.delete = delete
context.public_key = """-----BEGIN PUBLIC KEY----- | [
1553,
75
] |
def METHOD_NAME(filename, input_format="channels_last", standalone=False):
"""Demo function."""
from sleap.io.video import HDF5Video
video = HDF5Video(filename, "/box", input_format=input_format)
conf_data = HDF5Video(
filename, "/confmaps", input_format=input_format, convert_range=False
)
confmaps_ = [np.clip(conf_data.get_frame(i), 0, 1) for i in range(conf_data.frames)]
confmaps = np.stack(confmaps_)
return demo_confmaps(confmaps=confmaps, video=video, standalone=standalone) | [
697,
7912,
280,
7430
] |
def METHOD_NAME(handle):
""" Get max number of lanes in port according to chip type
Args:
handle (sx_api_handle_t): SDK handle
Returns:
int: max lanes in port
"""
# Get chip type
chip_type = sx_get_chip_type(handle)
limits = rm_resources_t()
modes = rm_modes_t()
rc = rm_chip_limits_get(chip_type, limits)
sx_check_rc(rc)
max_width = limits.port_map_width_max
# SPC2 ports have 8 lanes but SONiC is using 4
if chip_type == SX_CHIP_TYPE_SPECTRUM2:
max_width = 4
return max_width | [
19,
237,
232,
2327
] |
def METHOD_NAME(self, *args, **kwargs):
"""Plot the results from the experiment.data pandas dataframe. Store the
plots in a plots list attribute."""
if self.wait_for_data():
kwargs['title'] = self.title
ax = self.data.METHOD_NAME(*args, **kwargs)
self.plots.append({'type': 'plot', 'args': args, 'kwargs': kwargs, 'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
self._user_interrupt = False | [
1288
] |
def METHOD_NAME(self):
# this is necessary for compatibility with IndicatorSqlAdapter
# todo: will probably need to make this configurable at some point
return SQLSettings() | [
1621,
817
] |
def METHOD_NAME(self, args, dictionary, embed_tokens):
return ModelParallelTransformerEncoder(args, dictionary, embed_tokens) | [
56,
305
] |
def METHOD_NAME(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1) | [
2982,
2300
] |
def METHOD_NAME(self):
'''
Test that we can correctly handle rules split over multiple lines.
'''
filename = self.mkstemp()
# Create a split rule.
with open(filename, 'wt') as f:
f.write('target: a b \\\n'
'c \\\n'
'd\n')
# Confirm we get the correct dependencies.
with open(filename) as f:
deps = set(parse_makefile_rule(f))
self.assertSetEqual(deps, set(['a', 'b', 'c', 'd'])) | [
9,
265,
7728,
446
] |
def METHOD_NAME(self):
raise NotImplementedError | [
73
] |
def METHOD_NAME():
return HistoricalData.data["state"].unique() | [
19,
4085
] |
def METHOD_NAME(r, data_ptr_fn=None):
if isinstance(r, torch.Tensor):
if data_ptr_fn is not None:
r.data_ptr = data_ptr_fn
elif not r.data_ptr():
data_ptr = uuid.uuid1()
r.data_ptr = lambda: data_ptr | [
372,
948
] |
def METHOD_NAME(self, chunk_size=_DEFAULT_CHUNK_SIZE, keepends=False):
"""Return an iterator to yield lines from the raw stream.
This is achieved by reading chunk of bytes (of size chunk_size) at a
time from the raw stream, and then yielding lines from there.
"""
pending = b''
for chunk in self.iter_chunks(chunk_size):
lines = (pending + chunk).splitlines(True)
for line in lines[:-1]:
yield line.splitlines(keepends)[0]
pending = lines[-1]
if pending:
yield pending.splitlines(keepends)[0] | [
84,
513
] |
async def METHOD_NAME(profile: Profile, kid: str) -> str:
"""Resolve public key material from a kid."""
resolver = profile.inject(DIDResolver)
vmethod: Resource = await resolver.dereference(
profile,
kid,
)
if not isinstance(vmethod, VerificationMethod):
raise InvalidVerificationMethod(
"Dereferenced resource is not a verificaiton method"
)
if not isinstance(vmethod, SUPPORTED_VERIFICATION_METHOD_TYPES):
raise InvalidVerificationMethod(
f"Dereferenced method {type(vmethod).__name__} is not supported"
)
return vmethod.material | [
1014,
1609,
59,
604,
14507,
43,
1162
] |
def METHOD_NAME():
return cert, key | [
1076
] |
def METHOD_NAME(body):
response = requests.Response()
response._content = json.dumps(body).encode("utf-8")
return response | [
129,
17
] |
def METHOD_NAME(
self,
*,
operations: Set[PrimitiveOperation],
key_types: Set[KeyType],
client_parameters: ClientParameters,
) -> Dict[str, Dict[Parameter, int]]:
"""
Count the amount of specified operations in the program and group by tags and parameters.
Args:
operations (Set[PrimitiveOperation]):
set of operations used to filter the statistics
key_types (Set[KeyType]):
set of key types used to filter the statistics
client_parameters (ClientParameters):
client parameters required for grouping by parameters
Returns:
Dict[str, Dict[Parameter, int]]:
number of specified operations per tag per parameter in the program
"""
result: Dict[str, Dict[int, int]] = {}
for statistic in self.statistics:
if statistic.operation not in operations:
continue
tag, _, _ = re.match(REGEX_LOCATION, statistic.location).groups()
# remove the @
tag = tag[1:] if tag else ""
tag_components = tag.split(".")
for i in range(1, len(tag_components) + 1):
current_tag = ".".join(tag_components[0:i])
if current_tag == "":
continue
if current_tag not in result:
result[current_tag] = {}
for key_type, key_index in statistic.keys:
if key_type not in key_types:
continue
parameter = Parameter(client_parameters, key_type, key_index)
if parameter not in result[current_tag]:
result[current_tag][parameter] = 0
result[current_tag][parameter] += statistic.count
return result | [
29,
2735,
82,
2735,
511
] |
def METHOD_NAME(self):
return self._context | [
198
] |
def METHOD_NAME(*_):
return _mock_dev_tool_portal(enable_live_view=True) | [
248,
1111,
19,
828,
3081,
2211
] |
def METHOD_NAME(inp):
filt = SanitizerConfig().get_filter('name', 'PASS_ALL')
assert filt(inp) | [
9,
129,
156,
527,
654,
49,
235
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters | [
539,
386
] |
def METHOD_NAME():
""" Reads current git branch from stdout.
:return: name of current git branch
:rtype: str
"""
command = [ 'git', 'rev-parse', '--abbrev-ref', 'HEAD']
(stdoutdata, _stderrdata, ret) = run_command(command)
if ret == 0:
return stdoutdata.strip()
else:
raise RuntimeError("failed to get current branch name") | [
1056,
3653,
156
] |
def METHOD_NAME(cls, **cfg):
super().METHOD_NAME(**cfg) | [
22
] |
def METHOD_NAME(self, **kwargs):
"""
Returns active memberships
"""
kwargs['status'] = kwargs.get('status', True)
kwargs['status_detail'] = kwargs.get('status_detail', 'active')
return self.filter(
Q(expire_dt__gt=datetime.now()) | Q(expire_dt__isnull=True), **kwargs) | [
923
] |
def METHOD_NAME(evaluation, user):
return evaluation.METHOD_NAME(user) | [
137,
21,
2977,
894,
4666
] |
def METHOD_NAME():
grid = RasterModelGrid((4, 5))
nodes_to_close = ((6, 12),)
grid.status_at_node[nodes_to_close] = grid.BC_NODE_IS_CLOSED
np.all(
_active_links_at_node(grid)
== np.array(
[
[
-1,
-1,
-1,
-1,
-1,
-1,
-1,
6,
7,
-1,
-1,
-1,
-1,
16,
-1,
-1,
23,
-1,
25,
-1,
],
[
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
11,
12,
-1,
18,
-1,
-1,
21,
-1,
-1,
-1,
-1,
-1,
],
[
-1,
-1,
6,
7,
-1,
-1,
-1,
-1,
16,
-1,
-1,
23,
-1,
25,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
-1,
-1,
-1,
-1,
-1,
-1,
-1,
11,
12,
-1,
18,
-1,
-1,
21,
-1,
-1,
-1,
-1,
-1,
-1,
],
]
)
) | [
9,
3439,
-1
] |
def METHOD_NAME(self):
for name, country in data:
Person.objects.create(first_name=name, last_name=country)
queryset = Person.objects.all()
class MyTable(tables.Table):
class Meta:
model = Person
fields = ("first_name", "last_name")
self.assertEqual(
list(MyTable(queryset).columns.columns.keys()), ["first_name", "last_name"]
)
table = MyTable(
queryset,
extra_columns=[("first_name", tables.Column(attrs={"td": {"style": "color: red;"}}))],
)
# we still should have two columns
self.assertEqual(list(table.columns.columns.keys()), ["first_name", "last_name"])
# the attrs should be applied to the `first_name` column
self.assertEqual(
table.columns["first_name"].attrs["td"], {"style": "color: red;", "class": None}
) | [
9,
7509,
345,
803,
4207,
1951
] |
def METHOD_NAME(rank):
for _ in range(10):
num_indices = random.randint(20, 50)
indices = torch.LongTensor(num_indices).random_(0, NUM_EMBEDDINGS)
# Generate offsets.
offsets = []
start = 0
batch_size = 0
while start < num_indices:
offsets.append(start)
start += random.randint(1, 10)
batch_size += 1
offsets_tensor = torch.LongTensor(offsets)
target = torch.LongTensor(batch_size).random_(8).cuda(rank)
yield indices, offsets_tensor, target | [
19,
243,
2277
] |
def METHOD_NAME(response):
if getattr(response.connection, 'proxy_manager', False):
proxy_info = {}
request_url = response.request.url
if request_url.startswith('https://'):
proxy_info['method'] = 'CONNECT'
proxy_info['request_path'] = request_url
return proxy_info
return None | [
19,
127,
1691
] |
def METHOD_NAME(query_dict: dict) -> Iterable[str]:
return ARAX_query.ARAXQuery().query_return_stream(query_dict, mode='RTXKG2') | [
22,
539,
61,
1413,
763,
1443,
919
] |
def METHOD_NAME():
data = np.arange(9 * 5 * 3).reshape((5, 9, 3))
array = ak.from_numpy(data)
result = ak.from_buffers(*ak.to_buffers(array), highlevel=False)
assert np.shares_memory(result.data, data)
assert ak.almost_equal(array, result) | [
9,
3834,
3835
] |
def METHOD_NAME():
pass | [
176
] |
def METHOD_NAME(assertion):
start = datetime.now()
while (datetime.now() - start).seconds < 120:
sleep(0.1) # Limit CPU usage
if assertion():
return
assert assertion() | [
638,
2019,
1887,
-1,
3146,
232
] |
def METHOD_NAME(feature, container_id, image_name, image_tag):
client = docker.from_env()
try:
container = client.containers.get(container_id)
# Tag this image for given name & tag
container.image.tag(image_name, image_tag, force=True)
syslog.syslog(syslog.LOG_INFO,
"Tagged image for {} with container-id={} to {}:{}".
format(feature, container_id, image_name, image_tag))
ret = _remove_container(client, feature)
return ret
except Exception as err:
syslog.syslog(syslog.LOG_ERR, "Image tag: container:{} {}:{} failed with {}".
format(container_id, image_name, image_tag, str(err)))
return -1 | [
82,
224,
660
] |
def METHOD_NAME(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import METHOD_NAME
return METHOD_NAME(value) | [
3746
] |
def METHOD_NAME(args, suppress_output=False):
"""
Runs a shell command.
Returns True if the command was successful, and False otherwise.
"""
sup = subprocess.DEVNULL if suppress_output else None
try:
subprocess.check_call(args, stderr=sup, stdout=sup)
return True
except (subprocess.CalledProcessError, OSError) as err:
print(err)
return False | [
22,
2770,
462
] |
def METHOD_NAME(
hostname: str, port=1433, database="MOSAIQ"
):
"""Get username and password from keyring storage.
Parameters
----------
hostname : str
The MSSQL server hostname
port : int, optional
The MSSQL server port, by default 1433
database : str, optional
The MSSQL database name, by default "MOSAIQ"
Returns
-------
username, password
"""
username = get_username(hostname=hostname, port=port, database=database)
password = get_password(hostname=hostname, port=port, database=database)
return username, password | [
19,
2072,
61,
2897,
529,
2995,
1008
] |
def METHOD_NAME():
raise Exception("unknownFormat") | [
3451
] |
def METHOD_NAME(self, start: int | None = ..., end: int | None = ...) -> tuple[str, str]: ... | [
1092,
896,
1346,
447,
1798
] |
def METHOD_NAME(self):
self.mc.setSession()
self.mc.enableKeepAlive(60)
self.mc.assertResources() | [
9,
206,
3550,
8077
] |
def METHOD_NAME(self):
config = super().METHOD_NAME()
config.update({"data_format": self.data_format})
return config | [
19,
200
] |
f METHOD_NAME(self): | [
137,
616
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.