text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, channel, disable):
"""
Sets the tx_disable for specified SFP channels
Args:
channel : A hex of 4 bits (bit 0 to bit 3) which represent channel 0 to 3,
e.g. 0x5 for channel 0 and channel 2.
disable : A boolean, True to disable TX channels specified in channel,
False to enable
Returns:
A boolean, True if successful, False if not
"""
return False | [
2543,
193,
307
] |
def METHOD_NAME(diagnostic_settings_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestDiagnosticsSettingResult]:
"""
Gets guest diagnostics settings.
Azure REST API version: 2018-06-01-preview.
:param str diagnostic_settings_name: The name of the diagnostic setting.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... | [
19,
6483,
7643,
1333,
146
] |
def METHOD_NAME(self, direction):
"""
Tests:
>>> l = listen()
>>> r = remote('localhost', l.lport)
>>> r.connected()
True
>>> l.close()
>>> time.sleep(0.1) # Avoid race condition
>>> r.connected()
False
"""
# If there's no socket, it's definitely closed
if not self.sock:
return False
# If we have noticed a connection close in a given direction before,
# return fast.
if self.closed.get(direction, False):
return False
# If a connection is closed in all manners, return fast
if all(self.closed.values()):
return False
# Use poll() to determine the connection state
want = {
'recv': select.POLLIN,
'send': select.POLLOUT,
'any': select.POLLIN | select.POLLOUT,
}[direction]
poll = select.poll()
poll.register(self, want | select.POLLHUP | select.POLLERR)
for fd, event in poll.poll(0):
if event & select.POLLHUP:
self.close()
return False
if event & select.POLLIN:
return True
if event & select.POLLOUT:
return True
return True | [
2261,
772
] |
def METHOD_NAME(self, node: ast.FunctionDef) -> None:
if node.name == 'main_demo':
docstring = ast.get_docstring(node)
if docstring is None:
api = getattr(ui, self.topic) if hasattr(ui, self.topic) else getattr(app, self.topic)
docstring = api.__doc__ or api.__init__.__doc__
for name, method in api.__dict__.items():
if not name.startswith('_') and inspect.isfunction(method):
# add method name to docstring
docstring += name + ' '
docstring += method.__doc__ or ''
lines = cleanup(docstring).splitlines()
self.add_to_search_index(lines[0], lines[1:], main=True)
for decorator in node.decorator_list:
if isinstance(decorator, ast.Call):
function = decorator.func
if isinstance(function, ast.Name) and function.id == 'text_demo':
title = decorator.args[0].s
content = cleanup(decorator.args[1].s).splitlines()
self.add_to_search_index(title, content)
if isinstance(function, ast.Name) and function.id == 'element_demo':
attr_name = decorator.args[0].attr
obj_name = decorator.args[0].value.id
if obj_name == 'app':
docstring: str = getattr(app, attr_name).__doc__
docstring = ' '.join(l.strip() for l in docstring.splitlines()).strip()
self.current_content.append(cleanup(docstring))
else:
print(f'Unknown object: {obj_name} for element_demo', flush=True)
self.generic_visit(node) | [
716,
559,
2483
] |
def METHOD_NAME():
# type: () -> None
with named_temporary_file() as fp:
name = fp.name
fp.write(b"hi")
fp.flush()
assert os.path.exists(name)
with open(name) as new_fp:
assert new_fp.read() == "hi"
assert not os.path.exists(name) | [
9,
1640,
5944,
171
] |
def METHOD_NAME() -> int:
raise NotImplementedError() | [
391,
1221,
1079,
2374
] |
def METHOD_NAME(
pipeline_funcs,
function_name,
output_path,
type_check,
mode: Optional[dsl.PipelineExecutionMode] = None,
pipeline_conf: Optional[dsl.PipelineConf] = None):
if len(pipeline_funcs) == 0:
raise ValueError(
'A function with @dsl.pipeline decorator is required in the py file.'
)
if len(pipeline_funcs) > 1 and not function_name:
func_names = [x.__name__ for x in pipeline_funcs]
raise ValueError(
'There are multiple pipelines: %s. Please specify --function.' %
func_names)
if function_name:
pipeline_func = next(
(x for x in pipeline_funcs if x.__name__ == function_name), None)
if not pipeline_func:
raise ValueError('The function "%s" does not exist. '
'Did you forget @dsl.pipeline decoration?' %
function_name)
else:
pipeline_func = pipeline_funcs[0]
kfp.deprecated.compiler.Compiler(mode=mode).compile(pipeline_func,
output_path, type_check,
pipeline_conf) | [
296,
1148,
559
] |
def METHOD_NAME(self, scraper_config):
response = self.access_api(scraper_config, '/v3/maintenance/status')
leader = response.get('leader')
member = response.get('header', {}).get('member_id')
return leader and member and leader == member | [
137,
2919
] |
def METHOD_NAME(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=websecurityscannerCallTransformer(), | [
1112,
1537
] |
def METHOD_NAME(self):
"""Test the task check_for_updates."""
# Check that setting should be empty
self.assertEqual(InvenTreeSetting.get_setting('_INVENTREE_LATEST_VERSION'), '')
# Get new version
InvenTree.tasks.offload_task(InvenTree.tasks.check_for_updates)
# Check that setting is not empty
response = InvenTreeSetting.get_setting('_INVENTREE_LATEST_VERSION')
self.assertNotEqual(response, '')
self.assertTrue(bool(response)) | [
9,
758,
250,
43,
682
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(request):
"""
Return request's 'Authorization:' header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if isinstance(auth, str):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth | [
19,
1355,
572
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_file_systems_request(
url=self._config.url,
prefix=prefix,
continuation=continuation,
max_results=max_results,
request_id_parameter=request_id_parameter,
timeout=timeout,
resource=resource,
version=self._config.version,
template_url=self.list_file_systems.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(cmd, client,
resource_group_name,
name,
input_path):
properties_lenses, properties_metadata = parse_properties_json(input_path)
return client.update(resource_group_name=resource_group_name, dashboard_name=name, lenses=properties_lenses, metadata=properties_metadata) | [
2211,
3029,
86
] |
def METHOD_NAME():
files = []
symlinkFilenames = []
symlinkObjects = []
read_ls_tree_line("""100644 blob a786c385bd1812410d01177affb6ce834d85facd 459 .gitattributes""",
files, symlinkFilenames, symlinkObjects)
assert files == []
assert symlinkFilenames == []
assert symlinkObjects == [] | [
9,
19,
1598,
151,
534,
5696
] |
def METHOD_NAME(index, character):
if not raise_on_unexpected: # not raising, so we dump the buffer into output and append this character
output.extend(multibyte_buffer)
multibyte_buffer.clear()
output.append(character)
nonlocal is_in_multibyte
is_in_multibyte = False
nonlocal multibytes_expected
multibytes_expected = 0
else:
raise ValueError(f"Expected multibyte continuation at index: {index}") | [
276,
1068,
365
] |
def METHOD_NAME(self, tag):
if tag.startswith(self.github_info["prefix_prod"]):
return tag.replace(self.github_info["prefix_prod"], "")
elif tag.startswith(self.github_info["prefix_beta"]):
return tag.replace(self.github_info["prefix_beta"], "")
raise ValueError("Could not determine version number from tag {}".format(tag)) | [
19,
281,
280,
82
] |
def METHOD_NAME(self):
torch.cuda.synchronize()
for k in self.keys:
starts = self.start_events[k]
ends = self.end_events[k]
if len(starts) == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
if len(ends) != len(starts):
raise ValueError("Call stop before checking value!")
time = 0
for start, end in zip(starts, ends):
time += start.elapsed_time(end)
self.running_times[k] += time * 1e-3
self.n[k] += len(starts)
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list) | [
1157
] |
def METHOD_NAME(tmp_path):
return os.path.join(tmp_path, "artifacts-to-log") | [
1831,
1190
] |
def METHOD_NAME(self, level):
self.assert_highlighted_chr(
"sword = 12346 / 78564",
"TTTTT K NNNNN K NNNNN",
level=level, lang="en") | [
9,
4862,
137,
106,
926,
5367
] |
def METHOD_NAME(temp_dir):
random_str = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
dir_path = os.path.join(temp_dir.name, "cache_" + random_str)
os.mkdir(dir_path)
return {"exists": True, "path": dir_path} | [
129,
596,
1190
] |
def METHOD_NAME(self) -> str:
"""
Gets or sets secondaryKey of the created AuthorizationRule
"""
return pulumi.get(self, "secondary_key") | [
3288,
59
] |
def METHOD_NAME(client: Client):
inp1 = messages.TxInputType(
address_n=parse_path("m/0"),
amount=1_000_000,
prev_hash=b"\x42" * 32,
prev_index=0,
script_type=messages.InputScriptType.SPENDWITNESS,
sequence=4_294_967_293,
)
out1 = messages.TxOutputType(
address_n=parse_path("m/84h/0h/0h/1/0"),
amount=1_000_000,
script_type=messages.OutputScriptType.PAYTOWITNESS,
)
with client:
client.set_expected_responses(
[
request_input(0),
messages.Failure(code=messages.FailureType.DataError),
]
)
try:
btc.sign_tx(client, "Testnet", [inp1], [out1])
except TrezorFailure:
pass | [
9,
532,
157,
180,
16803
] |
def METHOD_NAME():
assert utils.find_last_reset(weekheader) == (31, 35, '\x1b[0m')
assert utils.find_last_reset(today_line) == (13, 17, '\x1b[0m')
assert utils.find_last_reset(calendarline) == (99, 103, '\x1b[0m')
assert utils.find_last_reset('Hello World') == (-2, -1, '') | [
9,
679,
656
] |
def METHOD_NAME():
return "bar" | [
681,
9981
] |
def METHOD_NAME(name):
if name == "":
return
shared.prompt_styles.styles.pop(name, None)
shared.prompt_styles.save_styles(shared.styles_filename)
return '', '', '' | [
34,
641
] |
def METHOD_NAME(idea: Idea, language_code: str) -> str:
if (
language_code != idea.language.language_code
and idea.translate_idea.filter(language__language_code=language_code).exists()
):
return (
idea.translate_idea.filter(language__language_code=language_code)
.first()
.name_translation
)
return idea.name | [
19,
6536,
156
] |
def METHOD_NAME(self) -> Optional['outputs.RestorePointCollectionSourcePropertiesResponse']:
"""
The properties of the source resource that this restore point collection is created from.
"""
return pulumi.get(self, "source") | [
1458
] |
def METHOD_NAME(self, configurationAttributes, requestParameters, step):
if (step == 1):
print "Basic (multi login). Prepare for Step 1"
return True
else:
return False | [
123,
43,
367
] |
def METHOD_NAME():
merged_reads = list(prepare_reads("AAACCCTTTGGGAAACCC",
"ATACCCTTTGGGAAACCC",
None, # unmerged pair
"AAACCCTTTGGGAAACCC"))
expected_consensus = "AAACCCTTTGGGAAACCC"
builder = ConsensusBuilder()
returned_reads = list(builder.build(merged_reads))
assert merged_reads == returned_reads
assert expected_consensus == builder.get_consensus() | [
9,
9604,
637
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(description="ostree image tests")
parser.add_argument(
"--store",
metavar="DIRECTORY",
type=os.path.abspath,
default=None,
help="directory where intermediary os trees are stored")
parser.add_argument(
"--output-directory",
metavar="DIRECTORY",
type=os.path.abspath,
default=None,
help="directory where result objects are stored")
args = parser.parse_args()
print(f"Running in {os.path.realpath(os.curdir)}")
tmpdir = "/var/osbuild/tmp"
os.makedirs(tmpdir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=tmpdir) as tmp:
results = run_tests(args, tmp)
n = len(results)
failed = len(list(filter(lambda x: x.get("error"), results)))
ok = n - failed
print("tests/ok/failed", end=": ")
print(f"{n}/{GREEN}{ok}{RESET}/{RED}{failed}{RESET}")
if failed:
sys.exit(1) | [
57
] |
def METHOD_NAME(func):
if Specs.iris_messages_log in filters._CACHE:
del filters._CACHE[Specs.iris_messages_log]
if Specs.iris_messages_log in filters.FILTERS:
del filters.FILTERS[Specs.iris_messages_log]
if func is test_iris_working_messages_log:
filters.add_filter(Specs.iris_messages_log, ["Generic.Event"])
if func is test_iris_working_messages_log_no_match_filter:
filters.add_filter(Specs.iris_messages_log, ["test_no_match_filter"])
if func is test_iris_working_messages_log_no_filter:
filters.add_filter(Specs.iris_messages_log, []) | [
102,
559
] |
def METHOD_NAME():
"""
Returns a list of names of classes that should
not be treated like the normal fluids.
"""
ignList = []
ignList += getBaseClassNames()
ignList += getExampleNames()
return ignList | [
19,
684,
83
] |
def METHOD_NAME(self, audio_signal, length=None):
max_audio_length: int = audio_signal.size(-1)
if length is None:
length = audio_signal.new_full(
audio_signal.size(0), max_audio_length, dtype=torch.int32, device=self.seq_range.device
)
audio_signal = torch.transpose(audio_signal, 1, 2)
if isinstance(self.pre_encode, nn.Linear):
audio_signal = self.pre_encode(audio_signal)
else:
audio_signal, length = self.pre_encode(audio_signal, length)
for lth, layer in enumerate(self.layers):
audio_signal = layer(audio_signal)
if isinstance(audio_signal, tuple):
audio_signal, _ = audio_signal
audio_signal = torch.transpose(audio_signal, 1, 2)
return audio_signal, length | [
76
] |
def METHOD_NAME(cls):
return relationship(
"Group",
secondary="group_" + cls.backref_name(),
cascade="save-update, merge, refresh-expire, expunge",
passive_deletes=True,
doc="Groups that can see the SurveyEfficiencyAnalysis.",
) | [
861
] |
def METHOD_NAME():
#repo_id
assert issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0
#repo_group_id
assert issue_backlog(10).iloc[0]['issue_backlog'] > 0 | [
9,
946,
3041
] |
def METHOD_NAME(name_filter=None, summarizer=summarize_activation):
"""Summarize activations, using `summarize_activation` to summarize."""
return summarize_collection(ops.GraphKeys.ACTIVATIONS, name_filter,
summarizer) | [
823,
10123
] |
def METHOD_NAME(self):
return self._pfdr | [
5661
] |
def METHOD_NAME(self):
"""This method builds the content of the test script which will return a list
of shell commands that will be written to file.
A typical test will contain: shebang line, job directives, environment variables and variable declaration,
and content of ``run`` property. For ``shell: python`` we write a python script and
return immediately. The variables, environment section are not applicable
for python scripts
Returns:
List of shell commands that will be written to file
"""
# start of each test should have the shebang
lines = [self.shebang]
# if shell is python the generated testscript will be run via bash, we invoke
# python script in bash script.
if self.shell.name == "python":
lines = ["#!/bin/bash"]
sched_lines = self.get_job_directives()
if sched_lines:
lines += sched_lines
if self.burstbuffer:
burst_buffer_lines = self._get_burst_buffer(self.burstbuffer)
if burst_buffer_lines:
lines += burst_buffer_lines
if self.datawarp:
data_warp_lines = self._get_data_warp(self.datawarp)
if data_warp_lines:
lines += data_warp_lines
# for python scripts we generate python script and return lines
if self.shell.name == "python":
self.logger.debug(f"[{self.name}]: Detected python shell")
self.write_python_script()
py_script = "%s.py" % format(os.path.join(self.stage_dir, self.name))
python_wrapper = self.buildexecutor.executors[self.executor]._settings[
"shell"
]
python_wrapper_buildspec = shlex.split(self.recipe.get("shell"))[0]
# if 'shell' property in buildspec specifies 'shell: python' or 'shell: python3' then we use this instead
if python_wrapper_buildspec.endswith(
"python"
) or python_wrapper_buildspec.endswith("python3"):
python_wrapper = python_wrapper_buildspec
lines.append(f"{python_wrapper} {py_script}")
return lines
# section below is for shell-scripts (bash, sh, csh, zsh, tcsh, zsh)
if self.compiler:
compiler_variables = {
"BUILDTEST_CC": self.cc,
"BUILDTEST_CXX": self.cxx,
"BUILDTEST_FC": self.fc,
"BUILDTEST_CFLAGS": self.cflags,
"BUILDTEST_CXXFLAGS": self.cxxflags,
"BUILDTEST_FFLAGS": self.fflags,
"BUILDTEST_CPPFLAGS": self.cppflags,
"BUILDTEST_LDFLAGS": self.ldflags,
}
lines += self._get_variables(compiler_variables)
if self.compiler_settings["env"]:
lines += self._get_environment(self.compiler_settings["env"])
if self.compiler_settings["vars"]:
lines += self._get_variables(self.compiler_settings["vars"])
# Add environment variables
env_section = deep_get(
self.recipe, "executors", self.executor, "env"
) or self.recipe.get("env")
var_section = deep_get(
self.recipe, "executors", self.executor, "vars"
) or self.recipe.get("vars")
env_lines = self._get_environment(env_section)
if env_lines:
lines += env_lines
var_lines = self._get_variables(var_section)
if var_lines:
lines += var_lines
if self.compiler_settings["modules"]:
lines += self.compiler_settings["modules"]
lines.append("# Content of run section")
# Add run section
lines += [self.recipe["run"]]
return lines | [
567,
782
] |
f METHOD_NAME(self): | [
9,
756
] |
def METHOD_NAME():
import ctypes
import sys
try:
d = dpctl.SyclDevice()
except dpctl.SyclDeviceCreationError:
pytest.skip("Could not create default-constructed device")
mod = sys.modules[d.__class__.__module__]
# get capsule storign SyclContext_Make function ptr
make_d_fn_cap = mod.__pyx_capi__["SyclDevice_Make"]
# construct Python callable to invoke "SyclDevice_Make"
cap_ptr_fn = ctypes.pythonapi.PyCapsule_GetPointer
cap_ptr_fn.restype = ctypes.c_void_p
cap_ptr_fn.argtypes = [ctypes.py_object, ctypes.c_char_p]
make_d_fn_ptr = cap_ptr_fn(
make_d_fn_cap, b"struct PySyclDeviceObject *(DPCTLSyclDeviceRef)"
)
callable_maker = ctypes.PYFUNCTYPE(ctypes.py_object, ctypes.c_void_p)
make_d_fn = callable_maker(make_d_fn_ptr)
d2 = make_d_fn(d.addressof_ref())
assert d == d2 | [
9,
13563,
58,
17247,
398,
93
] |
def METHOD_NAME(self, text, state):
"Generic readline completion entry point."
buffer = readline.get_line_buffer()
line = buffer.split()
return (self.complete_filename(line) + [None])[state] | [
676
] |
def METHOD_NAME(self, info):
if isinstance(info, qt.QFileIconProvider.IconType):
# It's another C++ method signature:
# QIcon QFileIconProvider::icon(QFileIconProvider::IconType type)
return super(SafeFileIconProvider, self).METHOD_NAME(info)
style = qt.QApplication.instance().style()
path = info.filePath()
if path in ["", "/"]:
# That's the computer root on Windows or Linux
result = style.standardIcon(qt.QStyle.SP_ComputerIcon)
elif sys.platform == "win32" and path[-2] == ":":
# That's a drive on Windows
result = self.__windowsDriveIcon(info)
elif self.__filterDirAndFiles:
if info.isDir():
result = style.standardIcon(qt.QStyle.SP_DirIcon)
else:
result = style.standardIcon(qt.QStyle.SP_FileIcon)
else:
result = qt.QFileIconProvider.METHOD_NAME(self, info)
return result | [
875
] |
def METHOD_NAME(self, loss):
pass | [
90
] |
def METHOD_NAME(self) -> Optional[bool]:
"""
A flag indicating whether email notifications are supported for detections for this rule
"""
return pulumi.get(self, "supports_email_notifications") | [
1466,
487,
609
] |
def METHOD_NAME():
model = get_model(io.StringIO(par))
with pytest.raises(ValueError):
indices = model.add_DMX_ranges([54500, 55500], [55000, 56000], dmxs=[2, 3, 4]) | [
9,
107,
2810,
909,
2810
] |
def METHOD_NAME(self) -> List[str]:
return ["ActorAndLevelLoader"] | [
5253,
69
] |
def METHOD_NAME(user, interface, cluster=None):
if interface == 'jobs':
from jobbrowser.apis.job_api import JobApi
return JobApi(user)
elif interface == 'queries-impala':
from jobbrowser.apis.query_api import QueryApi
return QueryApi(user, cluster=cluster)
elif interface == 'queries-hive':
from jobbrowser.apis.hive_query_api import HiveQueryApi
return HiveQueryApi(user, cluster=cluster)
elif interface == 'workflows':
from jobbrowser.apis.workflow_api import WorkflowApi
return WorkflowApi(user)
elif interface == 'schedules':
from jobbrowser.apis.schedule_api import ScheduleApi
return ScheduleApi(user)
elif interface == 'bundles':
from jobbrowser.apis.bundle_api import BundleApi
return BundleApi(user)
elif interface == 'celery-beat':
from jobbrowser.apis.beat_api import BeatApi
return BeatApi(user)
elif interface == 'schedule-hive':
from jobbrowser.apis.schedule_hive import HiveScheduleApi
return HiveScheduleApi(user)
elif interface == 'history':
from jobbrowser.apis.history import HistoryApi
return HistoryApi(user)
elif interface == 'engines':
from jobbrowser.apis.clusters import ClusterApi
return ClusterApi(user)
elif interface == 'dataeng-clusters':
from jobbrowser.apis.data_eng_api import DataEngClusterApi
return DataEngClusterApi(user)
elif interface == 'dataware-clusters':
from jobbrowser.apis.data_warehouse import DataWarehouseClusterApi
return DataWarehouseClusterApi(user)
elif interface == 'dataware2-clusters':
from jobbrowser.apis.data_warehouse import DataWarehouseClusterApi
return DataWarehouseClusterApi(user, version=2)
elif interface == 'dataeng-jobs':
from jobbrowser.apis.data_eng_api import DataEngJobApi
return DataEngJobApi(user)
elif interface == 'livy-sessions':
from jobbrowser.apis.livy_api import LivySessionsApi
return LivySessionsApi(user)
elif interface == 'livy-job':
from jobbrowser.apis.livy_api import LivyJobApi
return LivyJobApi(user)
elif interface == 'slas':
return Api(user)
else:
raise PopupException(_('Interface %s is unknown') % interface) | [
19,
58
] |
def METHOD_NAME(self):
"""
Test info with incorrect policy.
"""
policy = {"timeout": 0.5}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.info_random_node("bins", policy)
assert err_info.value.code == -2
assert err_info.value.msg == "timeout is invalid" | [
9,
100,
236,
1716,
2302,
41,
3534
] |
def METHOD_NAME(group, resp_create_badge):
badge = group.badges.create(new_badge)
assert isinstance(badge, GroupBadge)
assert badge.image_url == image_url | [
9,
129,
846,
5303
] |
def METHOD_NAME(
shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
):
"""Return a model and any parameters it may have"""
h_index = weight_format.index("H")
w_index = weight_format.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
a = relay.var("input", shape=shape, dtype=dtype)
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zero_point,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
weight = tvm.nd.array(
rng.integers(
kmin,
high=kmax,
size=weight_shape,
dtype=kernel_dtype,
)
)
weight_const = relay.const(weight, kernel_dtype)
conv = relay.qnn.op.conv2d(
a,
weight_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype="int32",
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype="int32"))
bias_const = relay.const(bias, "int32")
last_op = relay.nn.bias_add(conv, bias_const, axis=3) if enable_bias else conv
requant_input_sc = [sc * input_scale for sc in kernel_scale]
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": weight, "b": bias}
return last_op, params | [
93,
578
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_resource_skus_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(tmpdir):
from jans.pycloudlib.utils import cert_to_truststore
tmp = tmpdir.mkdir("jans")
keystore_file = tmp.join("jans.jks")
cert_file = tmp.join("jans.crt")
# dummy cert
cert_file.write("""-----BEGIN CERTIFICATE----- | [
9,
1941,
24,
-1
] |
f METHOD_NAME(self): | [
9,
19,
1997,
2718,
199,
791,
5376
] |
def METHOD_NAME(self):
# get mongo DB host and port from line like:
# seeds: host1:27017,host2:27017
# take just the very first URI and ignore possible failover
# if no such config is present, default to localhost:27017
# further, take optional user credentials - here we assume the
# credentials dont contain a whitespace character (that would
# make the parsing more difficult)
#
# further, collect location of CA file for contacting qpid in section
# [messaging]
# certfile: /etc/pki/katello/qpid_client_striped.crt
self.dbhost = "localhost"
self.dbport = "27017"
self.dbuser = ""
self.dbpassword = ""
self.messaging_cert_file = ""
in_messaging_section = False
try:
for line in open("/etc/pulp/server.conf").read().splitlines():
if match(r"^\s*seeds:\s+\S+:\S+", line):
uri = line.split()[1].split(',')[0].split(':')
self.dbhost = uri[0]
self.dbport = uri[1]
if match(r"\s*username:\s+\S+", line):
self.dbuser = "-u %s" % line.split()[1]
if match(r"\s*password:\s+\S+", line):
self.dbpassword = "-p %s" % line.split()[1]
if line.startswith("[messaging]"):
in_messaging_section = True
if in_messaging_section and line.startswith("certfile:"):
self.messaging_cert_file = line.split()[1]
in_messaging_section = False
except IOError:
# fallback when the cfg file is not accessible
pass
self.add_file_tags({
'/etc/default/pulp_workers': 'pulp_worker_defaults'
})
self.add_copy_spec([
"/etc/pulp/*.conf",
"/etc/pulp/settings.py",
"/etc/pulp/server/plugins.conf.d/",
"/etc/default/pulp*",
"/var/log/httpd/pulp-http.log*",
"/var/log/httpd/pulp-https.log*",
"/var/log/httpd/pulp-http_access_ssl.log*",
"/var/log/httpd/pulp-https_access_ssl.log*",
"/var/log/httpd/pulp-http_error_ssl.log*",
"/var/log/httpd/pulp-https_error_ssl.log*"
])
num_tasks = self.get_option('tasks')
mtasks = self.build_mongo_cmd(
'\"DBQuery.shellBatchSize=%s;; '
'db.task_status.find().sort({finish_time: -1})'
'.pretty().shellPrint()\"' % num_tasks
)
mres = self.build_mongo_cmd(
'\"DBQuery.shellBatchSize=%s;; '
'db.reserved_resources.find().pretty().shellPrint()\"' % num_tasks
)
prun = self.build_mongo_cmd(
r'"DBQuery.shellBatchSize=%s;; '
r'db.task_status.find({state:{\$ne: \"finished\"}}).pretty()'
r'.shellPrint()"' % num_tasks
)
# prints mongo collection sizes sorted from biggest and in human
# readable output
csizes = self.build_mongo_cmd(
'\"function humanReadable(bytes) {'
' var i = -1;'
' var byteUnits = [\'kB\', \'MB\', \'GB\', \'TB\', \'PB\', '
' \'EB\', \'ZB\', \'YB\'];'
' do {'
' bytes = bytes / 1024;'
' i++;'
' } while (bytes > 1024);'
' return Math.max(bytes, 0.1).toFixed(1) + \' \' + byteUnits[i];'
'};'
'var collectionNames = db.getCollectionNames(), stats = [];'
'collectionNames.forEach(function (n) {'
' stats.push(db[n].stats());'
' });'
'stats = stats.sort(function(a, b) {'
' return b[\'size\'] - a[\'size\']; });'
'for (var c in stats) {'
' print(stats[c][\'ns\'] + \': \' +'
' humanReadable(stats[c][\'size\']) + \' (\' +'
' humanReadable(stats[c][\'storageSize\']) + \')\'); }\"'
)
dbstats = self.build_mongo_cmd('\"db.stats()\"')
self.add_cmd_output(mtasks, suggest_filename="mongo-task_status")
self.add_cmd_output(mres, suggest_filename="mongo-reserved_resources")
self.add_cmd_output(prun, suggest_filename="pulp-running_tasks")
self.add_cmd_output(csizes, suggest_filename="mongo-collection_sizes")
self.add_cmd_output(dbstats, suggest_filename="mongo-db_stats")
for opt in "quc":
self.add_cmd_output(
f"qpid-stat -{opt} --ssl-certificate="
f"{self.messaging_cert_file} -b amqps://localhost:5671",
tags=f"qpid_stat_{opt}")
self.add_cmd_output(
"sudo -u pulp PULP_SETTINGS='/etc/pulp/settings.py' "
"DJANGO_SETTINGS_MODULE='pulpcore.app.settings' dynaconf list",
suggest_filename="dynaconf_list"
) | [
102
] |
def METHOD_NAME(source_files: SourceFiles, bandit: Bandit) -> Tuple[str, ...]:
args = []
if bandit.config is not None:
args.append(f"--config={bandit.config}")
args.extend(bandit.args)
args.extend(source_files.files)
return tuple(args) | [
567,
5987
] |
def METHOD_NAME(self, message: str, **kwargs):
"""converts the provided xml string to a dictionary
Args:
message (str): xml string to be converted to a dictionary
"""
self.response.set_value("dict", xmltodict.parse(message)) | [
197,
399,
24,
553
] |
def METHOD_NAME():
exchange = Iconomi('iconomi1', 'a', b'a', object(), object())
assert exchange.location == Location.ICONOMI
assert exchange.name == 'iconomi1' | [
9,
156
] |
def METHOD_NAME(x):
return isinstance(x, list) | [
137,
245
] |
def METHOD_NAME(*, db_session, project_id: int) -> CasePriority:
"""Returns the default case priority or raises a ValidationError if one doesn't exist."""
case_priority = get_default(db_session=db_session, project_id=project_id)
if not case_priority:
raise ValidationError(
[
ErrorWrapper(
NotFoundError(msg="No default case priority defined."),
loc="case_priority",
)
],
model=CasePriorityRead,
)
return case_priority | [
19,
235,
894,
241
] |
def METHOD_NAME():
# This will work since it's a callable
scorer = (lambda true, pred: np.nan)
assert _check_scoring(scorer) is scorer
# fails for bad metric
with pytest.raises(ValueError):
_check_scoring('bad metric')
# fails for anything else
with pytest.raises(TypeError):
_check_scoring(123) | [
9,
250,
5451
] |
def METHOD_NAME(self, outfile):
"raises UnsupportedCodec if the codec doesn't support copy"
if not self._codec["copy"] or not self._codec["copy_format"]:
raise UnsupportedCodec(f"{self.codec_name} doesn't support copy")
return [
"-map",
f"0:{self.index}",
"-c:s",
"copy",
"-f",
self._codec["copy_format"],
outfile,
] | [
215,
335
] |
def METHOD_NAME(self, text: str) -> bool:
return self.root.METHOD_NAME(text.lower()) | [
1070
] |
def METHOD_NAME(line: Union[bytes, str]) -> Union[bytes, str]:
if isinstance(line, bytes):
return bytes("", json.detect_encoding(line))
elif isinstance(line, str):
return "" | [
3974,
3464
] |
def METHOD_NAME(self):
""" Left Mouse Button """
own = self.object.worldPosition
end = self.object.worldPosition + self.object.worldOrientation @ Vector([10,0,0])
ray = self.object.rayCast(end,own,1000)
if ray[0]:
hit = ray[1]
bge.render.drawLine(own, hit, (1,0,0))
else:
bge.render.drawLine(own, end, (0,1,0))
bge.logic.getCurrentScene().resetTaaSamples = True | [
17654,
17654,
9095
] |
def METHOD_NAME(condition: str) -> List[str]:
"""
This is literally just a condition. At time of writing, it follows one of the following form
- Must have completed \d UOC
- Must complete all level \d [CATEGORY] courses
That is, follow under the classification of UOC and CORE conditions
Assumption (True as of 2023): There are no composite dependencies
"""
if re.search("UOC", condition):
return tokenise_uoc_dependency(condition)
if re.search("(level|prescribed|core|cores)", condition):
return tokenise_core_dependency(condition)
# Ideally shouldn't get to this point since caller should verify
# only parseable strings passed in; TODO: raise Error
return [condition] | [
6672,
2913
] |
def METHOD_NAME(cmd, client, resource_group_name, workspace_name, integration_runtime_name, integration_runtime_type,
description=None, if_match=None, location='AutoResolve', compute_type='General',
core_count=8, time_to_live=0, no_wait=False):
property_files = {}
property_files['type'] = integration_runtime_type
property_files['description'] = description
if integration_runtime_type == 'Managed':
property_files['typeProperties'] = {}
property_files['typeProperties']['computeProperties'] = {}
property_files['typeProperties']['computeProperties']['location'] = location
property_files['typeProperties']['computeProperties']['dataFlowProperties'] = {}
property_files['typeProperties']['computeProperties']['dataFlowProperties']['computeType'] = compute_type
property_files['typeProperties']['computeProperties']['dataFlowProperties']['coreCount'] = core_count
property_files['typeProperties']['computeProperties']['dataFlowProperties']['timeToLive'] = time_to_live
properties = IntegrationRuntimeResource(type=integration_runtime_type, properties=property_files)
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, workspace_name,
integration_runtime_name, properties, if_match) | [
129
] |
def METHOD_NAME():
"""Download and partitions the MNIST dataset."""
(x_train, y_train), testset = tf.keras.datasets.mnist.load_data()
partitions = []
# We keep all partitions equal-sized in this example
partition_size = math.floor(len(x_train) / NUM_CLIENTS)
for cid in range(NUM_CLIENTS):
# Split dataset into non-overlapping NUM_CLIENT partitions
idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size
partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))
return partitions, testset | [
2312,
1697
] |
async def METHOD_NAME(self, **kwargs: Any) -> _models.AvailableRuntimeVersions:
"""Lists all of the available runtime versions supported by Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailableRuntimeVersions or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_03_01_preview.models.AvailableRuntimeVersions
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2022-03-01-preview")
)
cls: ClsType[_models.AvailableRuntimeVersions] = kwargs.pop("cls", None)
request = build_list_runtime_versions_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailableRuntimeVersions", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | [
245,
1888,
295
] |
def METHOD_NAME(self):
# Add the full report
out = collections.defaultdict(list)
out[("all", "all")] = [artifact.path for artifact in self.artifacts]
# Group by suite first
suites = itertools.groupby(
sorted(self.artifacts, key=lambda a: a.suite), lambda a: a.suite
)
for suite, artifacts in suites:
artifacts = list(artifacts)
# List all available platforms
platforms = {a.platform for a in artifacts}
platforms.add("all")
# And list all possible permutations with suite + platform
out[("all", suite)] += [artifact.path for artifact in artifacts]
for platform in platforms:
if platform != "all":
out[(platform, "all")] += [
artifact.path
for artifact in artifacts
if artifact.platform == platform
]
out[(platform, suite)] = [
artifact.path
for artifact in artifacts
if platform == "all" or artifact.platform == platform
]
return out | [
19,
7599
] |
def METHOD_NAME(self):
return "https://api.unsplash.com/photos/random?count=30&client_id={}{}".format(
Util.unxor(UnsplashDownloader.HASH, UnsplashDownloader.API_KEY),
"&orientation=landscape" if self.get_variety().options.use_landscape_enabled else "",
) | [
19,
17876,
58,
274
] |
def METHOD_NAME(
batch_size,
epochs,
learning_rate,
num_gpus,
training_channel,
testing_channel,
hosts,
current_host,
model_dir,
):
(train_labels, train_images) = load_data(training_channel)
(test_labels, test_images) = load_data(testing_channel)
# Data parallel training - shard the data so each host
# only trains on a subset of the total data.
shard_size = len(train_images) // len(hosts)
for i, host in enumerate(hosts):
if host == current_host:
start = shard_size * i
end = start + shard_size
break
train_iter = mx.io.NDArrayIter(
train_images[start:end], train_labels[start:end], batch_size, shuffle=True
)
val_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)
logging.getLogger().setLevel(logging.DEBUG)
kvstore = "local" if len(hosts) == 1 else "dist_sync"
mlp_model = mx.mod.Module(symbol=build_graph(), context=get_training_context(num_gpus))
mlp_model.fit(
train_iter,
eval_data=val_iter,
kvstore=kvstore,
optimizer="sgd",
optimizer_params={"learning_rate": learning_rate},
eval_metric="acc",
batch_end_callback=mx.callback.Speedometer(batch_size, 100),
num_epoch=epochs,
)
if current_host == scheduler_host(hosts):
save(model_dir, mlp_model)
assert_can_track_sagemaker_experiments() | [
849
] |
def METHOD_NAME():
test_support.run_unittest(CopyRegTestCase) | [
9,
57
] |
def METHOD_NAME(cls, name):
"""An internal classmethod for the 'Get' expression dict for a `Parameter`.
Args:
name (str): The name of the parameter.
"""
return {"Get": f"Parameters.{name}"} | [
2078
] |
async def METHOD_NAME(self):
assert task_run.id is None | [
9,
147,
137,
98,
1646,
130,
0
] |
def METHOD_NAME(self):
"""
Executes a configuration test to determine the endianness
"""
tmp = []
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT, features='c grep_for_endianness',
msg='Checking for endianness', define='ENDIANNESS', tmp=tmp,
okmsg=check_msg, confcache=None)
return tmp[0] | [
250,
3434
] |
def METHOD_NAME(self):
'''
:returns: the member of the structure that is being accessed.
:rtype: (sub-class of) :py:class:`psyclone.psyir.nodes.Member`
:raises InternalError: if the first child of this node is not an \
instance of Member.
'''
if not isinstance(self.children[0], Member):
raise InternalError(
f"{type(self).__name__} malformed or incomplete. The first "
f"child must be an instance of Member, but found "
f"'{type(self.children[0]).__name__}'")
return self.children[0] | [
1823
] |
def METHOD_NAME(self):
"""Returns ``True`` if a higher score is better."""
return not self.lower_is_better() | [
5417,
137,
5912
] |
def METHOD_NAME(r, i, v): # function needed for use in lambda
r[i] = v
return True | [
5719
] |
def METHOD_NAME(self):
if self.options.shared:
self.options.rm_safe("fPIC")
if self.options.use_gflags != "deprecated":
self.output.warning("use_gflags option is deprecated") | [
111
] |
def METHOD_NAME(sentry_init, capture_events, integrations):
sentry_init(default_integrations=False, integrations=integrations)
events = capture_events()
def crash():
1 / 0
t = Thread(target=crash)
t.start()
t.join()
if integrations:
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "threading"
assert not exception["mechanism"]["handled"]
else:
assert not events | [
9,
378,
504
] |
def METHOD_NAME(
model_buffer: bytearray) -> List[_schema_fb.TensorType]:
"""Gets a list of the input tensor types."""
subgraph = _get_subgraph(model_buffer)
tensor_types = []
for i in range(subgraph.InputsLength()):
index = subgraph.Inputs(i)
tensor_types.append(subgraph.Tensors(index).Type())
return tensor_types | [
19,
362,
768,
119
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag") | [
431
] |
def METHOD_NAME(self):
self.surface.fill(self.background_color.backend_color)
self._elements = OrderedDict() | [
537
] |
def METHOD_NAME(url: URL) -> URL:
"""Strip hashes from the url so that it can be used as part of a issue key."""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(str(url))
path = re.sub(HASH_SUB[0], HASH_SUB[1], path)
query = re.sub(HASH_SUB[0], HASH_SUB[1], query)
fragment = re.sub(HASH_SUB[0], HASH_SUB[1], fragment)
return URL(urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))) | [
-1
] |
def METHOD_NAME(encoding_name: str) -> Generator[None, None, None]:
old_encoding = 'ascii' # Default fallback value
try:
old_encoding = urwid.util._target_encoding
urwid.set_encoding(encoding_name)
yield
finally:
urwid.set_encoding(old_encoding) | [
2300
] |
def METHOD_NAME(n_messages, messages, p_response, app_data):
"""
Simple conversation function that responds to any
prompt where the echo is off with the supplied password
"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0 | [
1192,
1306
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters | [
274,
386
] |
def METHOD_NAME(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss | [
226,
1572
] |
def METHOD_NAME(response, verbose=True):
if verbose:
print(json.dumps(response, indent=4, ensure_ascii=False))
print()
return response | [
2191
] |
def METHOD_NAME(obj):
return obj.get(CommonYamlFields.TYPE, None) in [ControlFlowType.DO_WHILE, ControlFlowType.PARALLEL_FOR] | [
137,
1751,
1716,
553
] |
def METHOD_NAME(sendgrid_erasure_identity_email: str, sendgrid_secrets):
"""
Confirm whether contact exists by calling contact search by email api and comparing resulting firstname str.
Returns contact ID if it exists, returns None if it does not.
"""
base_url = f"https://{sendgrid_secrets['domain']}"
body = {"emails": [sendgrid_erasure_identity_email]}
headers = {
"Authorization": f"Bearer {sendgrid_secrets['api_key']}",
}
contact_response = requests.post(
url=f"{base_url}/v3/marketing/contacts/search/emails",
headers=headers,
json=body,
)
# we expect 404 if contact doesn't exist
if 404 == contact_response.status_code:
return None
return contact_response.json()["result"][sendgrid_erasure_identity_email]["contact"] | [
1492,
954
] |
def METHOD_NAME(self, view):
SEEN.discard(view.id())
util.view.handle_closed_view(view) | [
69,
1462
] |
def METHOD_NAME(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output | [
19,
663,
146
] |
def METHOD_NAME() -> None:
config = AwsConfig(access_key_id="foo", secret_access_key="bar")
config.sessions().session_class_factory = BotoFileBasedSession
client = AwsClient(config, "test")
instances = client.list("ec2", "describe-instances", "Reservations")
assert len(instances) == 3 | [
9,
128
] |
def METHOD_NAME():
assert ThreeDQubit.parallelep(1, 2, 2, x0=5, y0=6, z0=7) == [
ThreeDQubit(5, 6, 7),
ThreeDQubit(5, 7, 7),
ThreeDQubit(5, 6, 8),
ThreeDQubit(5, 7, 8),
]
assert ThreeDQubit.parallelep(2, 2, 2) == [
ThreeDQubit(0, 0, 0),
ThreeDQubit(1, 0, 0),
ThreeDQubit(0, 1, 0),
ThreeDQubit(1, 1, 0),
ThreeDQubit(0, 0, 1),
ThreeDQubit(1, 0, 1),
ThreeDQubit(0, 1, 1),
ThreeDQubit(1, 1, 1),
] | [
9,
-1,
1529
] |
def METHOD_NAME(self, b_material, n_mat_prop, b_dtype, n_dtype):
"""Export the material alpha or color controller data."""
# get fcurves
fcurves = [fcu for fcu in b_material.animation_data.action.fcurves if b_dtype in fcu.data_path]
if not fcurves:
return
# just set the names of the nif data types, main difference between alpha and color
if b_dtype == "alpha":
keydata = "NiFloatData"
interpolator = "NiFloatInterpolator"
controller = "NiAlphaController"
else:
keydata = "NiPosData"
interpolator = "NiPoint3Interpolator"
controller = "NiMaterialColorController"
# create the key data
n_key_data = block_store.create_block(keydata, fcurves)
n_key_data.data.num_keys = len(fcurves[0].keyframe_points)
n_key_data.data.interpolation = NifClasses.KeyType.LINEAR_KEY
n_key_data.data.reset_field("keys")
# assumption: all curves have same amount of keys and are sampled at the same time
for i, n_key in enumerate(n_key_data.data.keys):
frame = fcurves[0].keyframe_points[i].co[0]
# add each point of the curves
n_key.arg = n_key_data.data.interpolation
n_key.time = frame / self.fps
if b_dtype == "alpha":
n_key.value = fcurves[0].keyframe_points[i].co[1]
else:
n_key.value.x, n_key.value.y, n_key.value.z = [fcu.keyframe_points[i].co[1] for fcu in fcurves]
# if key data is present
# then add the controller so it is exported
if fcurves[0].keyframe_points:
n_mat_ctrl = block_store.create_block(controller, fcurves)
n_mat_ipol = block_store.create_block(interpolator, fcurves)
n_mat_ctrl.interpolator = n_mat_ipol
self.set_flags_and_timing(n_mat_ctrl, fcurves)
# set target color only for color controller
if n_dtype:
n_mat_ctrl.set_target_color(n_dtype)
n_mat_ctrl.data = n_key_data
n_mat_ipol.data = n_key_data
# attach block to material property
n_mat_prop.add_controller(n_mat_ctrl) | [
294,
6139,
1139,
36,
2951
] |
def METHOD_NAME(self) -> _TABLE_DATA:
report = []
for action, d in self.recorded_durations.items():
d_tensor = torch.tensor(d)
sum_d = torch.sum(d_tensor).item()
report.append((action, sum_d / len(d), sum_d))
report.sort(key=lambda x: x[1], reverse=True)
return report | [
93,
339
] |
def METHOD_NAME():
return (
*collect_rules(),
*kotlin_parser.METHOD_NAME(),
*symbol_mapper.METHOD_NAME(),
*jvm_symbol_mapper.METHOD_NAME(),
*artifact_mapper.METHOD_NAME(),
UnionRule(InferDependenciesRequest, InferKotlinSourceDependencies),
UnionRule(InferDependenciesRequest, InferKotlinRuntimeDependencyRequest),
) | [
1634
] |
def METHOD_NAME():
champ = Champ()
champ.filename="COH2_GS.trexio"
champ.motype="RHF"
champ.back_end='HDF5'
champ.gamessfile=None
champ.save_geometry=True
champ.save_lcao = True
champ.save_basis = True
champ.save_eigenvalues = False
champ.save_ecp = True
champ.save_symmetry = False
champ.save_determinants = True
champ.save_csfs = False
# Optional argument for controlling the names of the output files
champ.basis_prefix = "TEST2"
champ.run()
assert champ is not None
assert champ.nucleus_num == 4
assert champ.ao_num == 66
assert champ.mo_num == 66
assert champ.shell_num == 26
assert champ.prim_num == 62
assert champ.ecp_num == 14
assert champ.num_dets == 1862
assert champ.num_states == 1 | [
9,
-1,
3636,
551
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.