text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
if self.options.shared:
self.options.rm_safe("fPIC") | [
111
] |
f METHOD_NAME(self, **kwargs): | [
1452
] |
def METHOD_NAME(inputPath: nimblephysics_libs._nimblephysics.common.Uri, skel: nimblephysics_libs._nimblephysics.dynamics.Skeleton, outputPath: str) -> None:
pass | [
369,
10562,
10563
] |
def METHOD_NAME(server_process, metrics, interval, socket):
""" Monitor the metrics of server_process and its child processes
"""
while True:
message = []
collected_metrics = get_metrics(server_process, get_child_processes(server_process), logger)
metrics_msg = []
for metric in metrics:
message.append(str(collected_metrics.get(metric, 0)))
if collected_metrics.get(metric) is not None:
metrics_msg.append("{0} : {1}".format(metric, collected_metrics.get(metric, 0)))
message = "\t".join(message) + "\t\n"
logger.info("%s", " -- ".join(metrics_msg))
if socket:
try:
socket.send(message.encode("latin-1"))
except BrokenPipeError:
logger.info("Stopping monitoring as socket connection is closed.")
break
# TODO - log metrics to a file METRICS_LOG_FILE if METRICS_LOG_FILE is provided
gevent.sleep(interval) | [
1863,
4491
] |
def METHOD_NAME(self) -> 'outputs.CollectorPropertiesResponse':
return pulumi.get(self, "properties") | [
748
] |
def METHOD_NAME(
inputs,
outputs,
backend=CoreMLComputeUnit.CPU,
allow_low_precision=True,
quantization_mode=CoreMLQuantizationMode.NONE,
mlmodel_export_path=None,
):
return (
inputs,
outputs,
backend,
allow_low_precision,
quantization_mode,
mlmodel_export_path,
) | [
296,
1457
] |
def METHOD_NAME(self):
dims = self.dataset.shape.nav.dims
if dims not in (1, 2, 3):
raise ValueError(
"can only handle 1D/2D/3D nav currently, received %s dimensions" % dims
)
zyx = (
self.parameters.get('z'),
self.parameters.get('y'),
self.parameters.get('x'),
)
messages = {
1: "Need x, not y and not z to index 1D dataset, received z=%s, y=%s, x=%s",
2: "Need x, y and not z to index 2D dataset, received z=%s, y=%s, x=%s",
3: "Need x, y z to index 3D dataset, received z=%s, y=%s, x=%s",
}
keep = zyx[-dims:]
drop = zyx[:-dims]
if (None in keep) or not all(d is None for d in drop):
raise ValueError(messages[dims] % zyx)
return keep | [
19,
1788
] |
def METHOD_NAME():
# Create a list of dictionaries where each key is "labels(n)"
# and each value is a list containing a node label
labels = "MATCH (n) RETURN distinct labels(n)"
query = run_query(labels)
data = query.data()
label_list = []
# Iterate through the list and dicitionaries to create a list
# of node labels
for dictionary in data:
for key in dictionary:
value = dictionary[key]
value_string = value[0]
label_list.append(value_string)
return label_list | [
1716,
415
] |
def METHOD_NAME(result: Result) -> Iterator[Row]:
if "totals" in result:
return itertools.chain(result["data"], [result["totals"]])
else:
return iter(result["data"]) | [
3972,
1346
] |
def METHOD_NAME(func, lis):
"""Python2/3 compatibility.
replace map(int, list) with lmap(int, list) that always returns a list
instead of an iterator. Otherwise conflicts with np.array in python3.
"""
return list(map(func, lis)) | [
-1
] |
def METHOD_NAME(config_filename):
"""Parse a INI config with profiles.
This will parse an INI config file and map top level profiles
into a top level "profile" key.
If you want to parse an INI file and map all section names to
top level keys, use ``raw_config_parse`` instead.
"""
... | [
557,
200
] |
def METHOD_NAME():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
bitstrings = [0, 1, 2]
f1 = cirq.xeb_fidelity(circuit, bitstrings, (q0, q1))
f2 = cirq.xeb_fidelity(circuit, tuple(bitstrings), (q0, q1))
assert f1 == f2 | [
9,
12699,
9102,
1815,
362
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME():
package = Package.describe("data/tables/chunk*.csv")
assert package.to_descriptor() == {
"resources": [
{
"path": "data/tables/chunk1.csv",
"name": "chunk1",
"type": "table",
"scheme": "file",
"format": "csv",
"encoding": "utf-8",
"mediatype": "text/csv",
"schema": {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
},
},
{
"name": "chunk2",
"path": "data/tables/chunk2.csv",
"type": "table",
"scheme": "file",
"format": "csv",
"encoding": "utf-8",
"mediatype": "text/csv",
"schema": {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
},
},
],
} | [
9,
2517,
360
] |
def METHOD_NAME(stub, model_name, model_input, metadata):
with open(model_input, "rb") as f:
data = f.read()
input_data = {"data": data}
response = stub.Predictions(
inference_pb2.PredictionsRequest(model_name=model_name, input=input_data),
metadata=metadata,
)
try:
prediction = response.prediction.decode("utf-8")
print(prediction)
except grpc.RpcError as e:
exit(1) | [
1852
] |
async def METHOD_NAME() -> None:
"""Test that scheduling an animation works."""
app = AnimApp()
delay = 0.1
async with app.run_test() as pilot:
styles = app.query_one(Static).styles
styles.background = "black"
styles.animate("background", "white", delay=delay, duration=0)
await pilot.pause(0.9 * delay)
assert styles.background.rgb == (0, 0, 0) # Still black
await pilot.wait_for_scheduled_animations()
assert styles.background.rgb == (255, 255, 255) | [
9,
4817,
4537
] |
def METHOD_NAME(args):
"""Parses the command line to determine the Command and its arguments.
:param args: the arguments provided at the command line
:return: Command and command-specific arguments
"""
platforms = get_platforms()
width = max(min(shutil.get_terminal_size().columns, 80) - 2, 20)
briefcase_description = textwrap.fill(
"Briefcase is a tool for converting a Python project "
"into a standalone native application for distribution.",
width=width,
)
description_max_pad_len = max(len(cmd.command) for cmd in COMMANDS) + 2
command_description_list = "\n".join(
f" {cmd.command}{' ' * (description_max_pad_len - len(cmd.command))}{cmd.description}"
for cmd in COMMANDS
)
platform_list = ", ".join(sorted(platforms, key=str.lower))
additional_instruction = textwrap.fill(
"Each command, platform, and format has additional options. "
"Use the -h option on a specific command for more details.",
width=width,
)
parser = argparse.ArgumentParser(
prog="briefcase",
description=(
f"{briefcase_description}\n"
"\n"
"Commands:\n"
f"{command_description_list}\n"
"\n"
"Platforms:\n"
f" {platform_list}\n"
"\n"
f"{additional_instruction}"
),
usage="briefcase [-h] <command> [<platform>] [<format>] ...",
add_help=False,
formatter_class=lambda prog: RawDescriptionHelpFormatter(prog, width=width),
)
parser.add_argument("-V", "--version", action="version", version=__version__)
# <command> isn't actually optional; but if it's marked as required,
# there's no way to get help for subcommands. So; treat <command>
# as optional, handle the case where <command> isn't provided
# as the case where top-level help is displayed, and provide an explicit
# usage string so that the instructions displayed are correct
parser.add_argument(
"command",
choices=list(cmd.command for cmd in COMMANDS),
metavar="command",
nargs="?",
help=argparse.SUPPRESS,
)
# To make the UX a little forgiving, we normalize *any* case to the case
# actually used to register the platform. This function maps the lower-case
# version of the registered name to the actual registered name.
def normalize(name):
return {n.lower(): n for n in platforms.keys()}.get(name.lower(), name)
# argparse handles `--` specially, so make the passthrough args bypass the parser.
def parse_known_args(args):
args, passthough = split_passthrough(args)
options, extra = parser.parse_known_args(args)
if passthough:
extra += ["--"] + passthough
return options, extra
# Use parse_known_args to ensure any extra arguments can be ignored,
# and parsed as part of subcommand handling. This will capture the
# command, platform (filling a default if unspecified) and format
# (with no value if unspecified).
options, extra = parse_known_args(args)
# If no command has been provided, display top-level help.
if options.command is None:
raise NoCommandError(parser.format_help())
# Commands agnostic to the platform and format
if options.command == "new":
Command = NewCommand
elif options.command == "dev":
Command = DevCommand
elif options.command == "upgrade":
Command = UpgradeCommand
# Commands dependent on the platform and format
else:
parser.add_argument(
"platform",
choices=list(platforms.keys()),
default={
"darwin": "macOS",
"linux": "linux",
"win32": "windows",
}[sys.platform],
metavar="platform",
nargs="?",
type=normalize,
help="The platform to target (one of %(choices)s; default: %(default)s",
)
# <format> is also optional, with the default being platform dependent.
# There's no way to encode option-dependent choices, so allow *any*
# input, and we'll manually validate.
parser.add_argument(
"output_format",
metavar="format",
nargs="?",
help="The output format to use (the available output formats are platform dependent)",
)
# Re-parse the arguments, now that we know it is a command that makes use
# of platform/output_format.
options, extra = parse_known_args(args)
# Import the platform module
platform_module = platforms[options.platform]
# If the output format wasn't explicitly specified, check to see
# Otherwise, extract and use the default output_format for the platform.
if options.output_format is None:
output_format = platform_module.DEFAULT_OUTPUT_FORMAT
else:
output_format = options.output_format
output_formats = get_output_formats(options.platform)
# Normalise casing of output_format to be more forgiving.
output_format = {n.lower(): n for n in output_formats}.get(
output_format.lower(), output_format
)
# We now know the command, platform, and format.
# Get the command class that corresponds to that definition.
try:
format_module = output_formats[output_format]
Command = getattr(format_module, options.command)
except KeyError:
raise InvalidFormatError(
requested=output_format,
choices=list(output_formats.keys()),
)
except AttributeError:
raise UnsupportedCommandError(
platform=options.platform,
output_format=output_format,
command=options.command,
)
return Command, extra | [
214,
4518
] |
def METHOD_NAME(self):
return orm.SqliteLogCollection(self) | [
1099
] |
def METHOD_NAME(
http_proxy: Optional[str] = None,
https_proxy: Optional[str] = None,
retry_sleeps: Optional[List[float]] = None,
) -> None:
"""
Configure snap to use http and https proxies.
:param http_proxy: http proxy to be used by snap. If None, it will
not be configured
:param https_proxy: https proxy to be used by snap. If None, it will
not be configured
:param retry_sleeps: Optional list of sleep lengths to apply between
retries. Specifying a list of [0.5, 1] tells subp to retry twice
on failure; sleeping half a second before the first retry and 1 second
before the second retry.
"""
if not is_snapd_installed():
LOG.debug("Skipping configure snap proxy. snapd is not installed.")
return
if http_proxy or https_proxy:
event.info(messages.SETTING_SERVICE_PROXY.format(service="snap"))
if http_proxy:
system.subp(
["snap", "set", "system", "proxy.http={}".format(http_proxy)],
retry_sleeps=retry_sleeps,
)
if https_proxy:
system.subp(
["snap", "set", "system", "proxy.https={}".format(https_proxy)],
retry_sleeps=retry_sleeps,
) | [
111,
4792,
127
] |
def METHOD_NAME(url: str) -> List:
"""
Returns the list of known repositories from the given url.
The list of repos are known hence not using any parsers. This allows us not add any
additional packages. In the future, we may want to change the approach to include
parsers if the requirement changes.
Args:
url (str): The link to check for the presence of known repositories.
Returns:
list - of repos
"""
repos = list()
try:
url.rstrip("/")
r = requests.get(f"{url}/compose")
if "MON" in r.text:
repos.append("MON")
if "OSD" in r.text:
repos.append("OSD")
if "Tools" in r.text:
repos.append("Tools")
except BaseException as be: # noqa
LOG.error(be)
return repos | [
19,
4822
] |
def METHOD_NAME(self):
"""
Tests creating and renaming an instance on EC2 (classic)
"""
# create the instance
ret_val = self.run_cloud(
"-p ec2-test {} --no-deploy".format(self.instance_name), timeout=TIMEOUT
)
# check if instance returned
self.assertInstanceExists(ret_val)
changed_name = self.instance_name + "-changed"
rename_result = self.run_cloud(
"-a rename {} newname={} --assume-yes".format(
self.instance_name, changed_name
),
timeout=TIMEOUT,
)
self.assertFalse(
self._instance_exists(),
"Instance wasn't renamed: |\n{}".format(rename_result),
)
self.assertInstanceExists(instance_name=changed_name)
self.assertDestroyInstance(changed_name) | [
9,
89,
2010
] |
def METHOD_NAME(self, expression):
eav = {":id": {"S": "asdasdasd"}}
desired_hash_key, comparison, range_values = parse_expression(
expression_attribute_values=eav,
key_condition_expression=expression,
schema=self.schema,
expression_attribute_names=dict(),
)
assert desired_hash_key == eav[":id"]
assert comparison is None
assert range_values == [] | [
9,
1161,
59,
246
] |
def METHOD_NAME(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id | [
56,
135,
1066,
191,
203
] |
def METHOD_NAME():
detector = managers.plugin_manager.detector
face_plugins = managers.plugin_manager.filter_face_plugins(
_get_face_plugin_names()
)
face_plugins = face_detection_skip_check(face_plugins)
rawfile = base64.b64decode(request.get_json()["file"])
faces = detector(
img=read_img(rawfile),
det_prob_threshold=_get_det_prob_threshold(),
face_plugins=face_plugins
)
plugins_versions = {p.slug: str(p) for p in [detector] + face_plugins}
faces = _limit(faces, request.values.get(ARG.LIMIT))
FaceDetection.SKIPPING_FACE_DETECTION = False
return jsonify(plugins_versions=plugins_versions, result=faces) | [
416,
5626,
2426,
72
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Sku"]:
"""Lists the available SKUs supported by Microsoft.Storage for given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Sku or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2017_06_01.models.Sku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2017-06-01"))
cls: ClsType[_models.StorageSkuListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageSkuListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME(self):
with self._open_file(self.filename) as data_file:
for array_key, ds_name in self.datasets.items():
if ds_name not in data_file:
raise RuntimeError("%s not in %s" % (ds_name, self.filename))
spec = self.__read_spec(array_key, data_file, ds_name)
self.provides(array_key, spec) | [
102
] |
async def METHOD_NAME(metadata_store, rest_api):
"""
Test whether an error is returned if try to modify both the status and name of a torrent
"""
with db_session:
chan = metadata_store.ChannelMetadata.create_channel(title="bla")
patch_params = {'status': TODELETE, 'title': 'test'}
await do_request(
rest_api,
'metadata/%s/%i' % (hexlify(chan.public_key), chan.id_),
request_type='PATCH',
post_data=patch_params,
expected_code=400,
) | [
9,
86,
475,
452,
61,
156
] |
def METHOD_NAME(self):
"""Puts in a non dictionary header"""
self.header_dict = "nchans nifs nbits fch1 foff tstart" | [
9,
256,
553
] |
def METHOD_NAME(self, window: Any) -> Any:
raise NotImplementedError | [
129,
1519
] |
def METHOD_NAME(self):
"""Tests the DefAttributesUsingSerialize overload WITH docstrings.
"""
# Basic read / write.
dut = MyData2(some_double=1.0)
self.assertEqual(dut.some_double, 1.0)
dut.some_double = -1.0
self.assertEqual(dut.some_double, -1.0)
# We'll just spot-check a few of the docs; that should be enough.
self.assertEqual(inspect.getdoc(MyData2.some_double),
"Field docstring for a double.")
self.assertEqual(inspect.getdoc(MyData2.some_vector),
"Field docstring for a vector.")
# N.B. Fields are tested below. | [
9,
177,
626,
183,
41,
672
] |
def METHOD_NAME(
y_pred: npt.NDArray[np.float32], y_true: npt.NDArray[np.float32]
) -> npt.NDArray[np.float32]:
"""
Computes the mean reciprocal rank of the predictions.
Args:
y_pred: Array of shape [N, D] with the predictions (N: number of elements, D: number of
metrics).
y_true: Array of shape [N, D] with the true metrics.
Returns:
Array of shape [D] with the average MRR for each metric.
"""
minimum_indices = y_pred.argmin(0) # [D]
true_ranks = st.rankdata(y_true, method="min", axis=0) # [N, D]
ranks = np.take_along_axis(
true_ranks, minimum_indices[None, :], axis=0
) # [N, D]
result = 1 / ranks
return result.mean(0) | [
8996
] |
def METHOD_NAME(example_subarray, tmp_path):
"""Test plain array display"""
from ctapipe.visualization.bokeh import ArrayDisplay
display = ArrayDisplay(example_subarray)
output_path = tmp_path / "test.html"
output_file(output_path)
save(display.figure, filename=output_path) | [
9,
877,
52,
654,
199
] |
def METHOD_NAME(self, thread_factory, executable_factory):
""" HEARTBEAT (CORE): Multiple instance with removal"""
pids = [self._pid() for _ in range(4)]
threads = [thread_factory() for _ in range(4)]
executable = executable_factory()
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 1, 'nr_threads': 2}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 2}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 2, 'nr_threads': 3}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 3}
die(executable, 'host0', pids[0], threads[0])
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 2, 'nr_threads': 3}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 0, 'nr_threads': 3}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 1, 'nr_threads': 3}
die(executable, 'host2', pids[2], threads[2])
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 1, 'nr_threads': 2} | [
9,
1797,
988
] |
def METHOD_NAME(self):
server, client = self._connected_pair()
ep = select.epoll(2)
ep2 = select.epoll.fromfd(ep.fileno())
ep2.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep2.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
events = ep.poll(1, 4)
events2 = ep2.poll(0.9, 4)
self.assertEqual(len(events), 2)
self.assertEqual(len(events2), 2)
ep.close()
try:
ep2.poll(1, 4)
except IOError, e:
self.assertEqual(e.args[0], errno.EBADF, e)
else:
self.fail("epoll on closed fd didn't raise EBADF") | [
9,
3465
] |
def METHOD_NAME(
query_body: MutableMapping[str, Any],
expected_entity: EntityKey,
) -> None:
dataset = get_dataset("discover")
# HACK until these are converted to proper SnQL queries
if not query_body.get("conditions"):
query_body["conditions"] = []
query_body["conditions"] += [
["timestamp", ">=", "2020-01-01T12:00:00"],
["timestamp", "<", "2020-01-02T12:00:00"],
["project_id", "=", 1],
]
if not query_body.get("selected_columns"):
query_body["selected_columns"] = ["project_id"]
request = json_to_snql(query_body, "discover")
request.validate()
query, _ = parse_snql_query(str(request.query), dataset)
entity = query.get_from_clause()
assert isinstance(entity, EntitySource)
assert entity.key == expected_entity | [
9,
365,
1458
] |
def METHOD_NAME():
mx.rnd.seed(seed_state=1234)
model = DeepTPPPredictionNetwork(
num_marks=5,
time_distr_output=WeibullOutput(),
interval_length=1.0,
prediction_interval_length=10.0,
)
model.initialize()
past_ia_times = nd.array([[0.1, 0.2, 0.1, 0.12], [0.3, 0.15, 0.1, 0.12]])
past_marks = nd.array([[1, 2, 0, 2], [0, 0, 1, 2]])
past_valid_length = nd.array([3, 4])
past_target = nd.stack(past_ia_times, past_marks, axis=-1)
pred_target, pred_valid_length = model(past_target, past_valid_length)
# pred_target must have shape
# (num_parallel_samples, batch_size, max_sequence_length, 2)
assert pred_target.ndim == 4
assert pred_target.shape[0] == model.num_parallel_samples
assert pred_target.shape[1] == past_ia_times.shape[0]
assert pred_target.shape[3] == 2 # TPP prediction contains ia_time & mark
# pred_valid_length must have shape (num_parallel_samples, batch_size)
assert pred_valid_length.ndim == 2
assert pred_valid_length.shape[0] == model.num_parallel_samples
assert pred_valid_length.shape[1] == past_ia_times.shape[0]
pred_ia_times = pred_target[..., 0].asnumpy()
pred_marks = pred_target[..., 1].asnumpy()
assert pred_marks.min() >= 0
assert pred_marks.max() < model.num_marks
assert (pred_ia_times >= 0).all()
# ia_times are set to zero above valid_length (see DeepTPPPredictionNetwork)
assert (pred_ia_times.sum(-1) < model.prediction_interval_length).all() | [
9,
2726,
1228,
146
] |
def METHOD_NAME(self, fn):
out.info(u'Skipping existing file "%s".' % fn) | [
1985,
3451
] |
def METHOD_NAME():
return {
docker_mod: {"__context__": {"docker.docker_version": ""}},
docker_state: {"__opts__": {"test": False}},
} | [
111,
467,
468
] |
def METHOD_NAME():
"""Create a graphic with the additive adjustment factors estimated after applying the adapt_freq method."""
n = 10000
x = series(synth_rainfall(2, 2, wet_freq=0.25, size=n), "pr") # sim
y = series(synth_rainfall(2, 2, wet_freq=0.5, size=n), "pr") # ref
xp = adapt_freq(x, y, thresh=0).sim_ad
fig, (ax1, ax2) = plt.subplots(2, 1)
sx = x.sortby(x)
sy = y.sortby(y)
sxp = xp.sortby(xp)
# Original and corrected series
ax1.plot(sx.values, color="blue", lw=1.5, label="x : sim")
ax1.plot(sxp.values, color="pink", label="xp : sim corrected")
ax1.plot(sy.values, color="k", label="y : ref")
ax1.legend()
# Compute qm factors
qm_add = QuantileDeltaMapping(kind="+", group="time").train(y, x).ds
qm_mul = QuantileDeltaMapping(kind="*", group="time").train(y, x).ds
qm_add_p = QuantileDeltaMapping(kind="+", group="time").train(y, xp).ds
qm_mul_p = QuantileDeltaMapping(kind="*", group="time").train(y, xp).ds
qm_add.cf.plot(ax=ax2, color="cyan", ls="--", label="+: y-x")
qm_add_p.cf.plot(ax=ax2, color="cyan", label="+: y-xp")
qm_mul.cf.plot(ax=ax2, color="brown", ls="--", label="*: y/x")
qm_mul_p.cf.plot(ax=ax2, color="brown", label="*: y/xp")
ax2.legend(loc="upper left", frameon=False)
return fig | [
3655,
3702,
303
] |
def METHOD_NAME(
video=False, port=None, address=None, remote=False, desktop=None
):
"""Runs the FiftyOne quickstart.
This method loads an interesting dataset from the Dataset Zoo, launches the
App, and prints some suggestions for exploring the dataset.
Args:
video (False): whether to launch a video dataset
port (None): the port number to serve the App. If None,
``fiftyone.config.default_app_port`` is used
address (None): the address to serve the App. If None,
``fiftyone.config.default_app_address`` is used
remote (False): whether this is a remote session, and opening the App
should not be attempted
desktop (None): whether to launch the App in the browser (False) or as
a desktop App (True). If None, ``fiftyone.config.desktop_app`` is
used. Not applicable to notebook contexts
Returns:
a tuple containing
- dataset: the :class:`fiftyone.core.dataset.Dataset` that was loaded
- session: the :class:`fiftyone.core.session.Session` instance for
the App that was launched
"""
if video:
return _video_quickstart(port, address, remote, desktop)
return _quickstart(port, address, remote, desktop) | [
9240
] |
def METHOD_NAME(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0))) | [
799,
623,
321
] |
def METHOD_NAME(
graphql_client, site_factory, generic_page_factory, locale
):
root_site_1 = generic_page_factory()
root_site_2 = generic_page_factory()
page_1 = generic_page_factory(
slug="bubble-tea",
locale=locale("en"),
parent=root_site_1,
body__0__text_section__title__value="I've Got a Lovely Bunch of Coconuts",
)
page_2 = generic_page_factory(
slug="chocolate",
locale=locale("en"),
parent=root_site_2,
body__0__text_section__title__value="There they are, all standing in a row",
)
site_factory(hostname="site1", root_page=root_site_1)
site_factory(hostname="site2", root_page=root_site_2)
page_1.copy_for_translation(locale=locale("it"))
page_2.copy_for_translation(locale=locale("it"))
query = """
query Page ($hostname: String!, $language: String!, $slug: String!) {
cmsPage(hostname: $hostname, language: $language, slug: $slug){
...on GenericPage {
body {
...on TextSection {
title
}
}
}
}
}
"""
response = graphql_client.query(
query, variables={"hostname": "site2", "slug": "chocolate", "language": "en"}
)
assert response.data == {
"cmsPage": {"body": [{"title": "There they are, all standing in a row"}]}
} | [
9,
1174,
527,
604,
1055,
61,
2938
] |
f METHOD_NAME(self): | [
656
] |
def METHOD_NAME(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result | [
772,
877
] |
def METHOD_NAME(cls, project_paths):
projects = None
for project_path in project_paths:
try:
with open(project_path, 'r') as projects_file:
projects = cls()
projects.load(projects_file)
break
except:
projects = None
if not projects:
raise RuntimeError('No project definitions found in {!r}'.format(
project_paths))
return projects | [
1070
] |
def METHOD_NAME(self):
"""
Security API: Cannot create a guest token without authorization
"""
self.login(username="gamma")
response = self.client.post(self.uri)
self.assert403(response) | [
9,
72,
6483,
466,
3166
] |
def METHOD_NAME(delay):
run_coroutine(asyncio.sleep, delay) | [
958,
5893
] |
def METHOD_NAME():
input_map = dict(
args=dict(
argstr="%s",
),
count=dict(
argstr="-count %s",
sep=",",
),
environ=dict(
nohash=True,
usedefault=True,
),
flip_any_direction=dict(
argstr="-any_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_negative_direction=dict(
argstr="-negative_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_positive_direction=dict(
argstr="-positive_direction",
xor=(
"flip_positive_direction",
"flip_negative_direction",
"flip_any_direction",
),
),
flip_x_any=dict(
argstr="-xanydirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_x_negative=dict(
argstr="-xdirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_x_positive=dict(
argstr="+xdirection",
xor=("flip_x_positive", "flip_x_negative", "flip_x_any"),
),
flip_y_any=dict(
argstr="-yanydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_y_negative=dict(
argstr="-ydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_y_positive=dict(
argstr="+ydirection",
xor=("flip_y_positive", "flip_y_negative", "flip_y_any"),
),
flip_z_any=dict(
argstr="-zanydirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
flip_z_negative=dict(
argstr="-zdirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
flip_z_positive=dict(
argstr="+zdirection",
xor=("flip_z_positive", "flip_z_negative", "flip_z_any"),
),
image_maximum=dict(
argstr="-image_maximum %s",
),
image_minimum=dict(
argstr="-image_minimum %s",
),
image_range=dict(
argstr="-image_range %s %s",
),
input_file=dict(
argstr="%s",
extensions=None,
mandatory=True,
position=-2,
),
nonormalize=dict(
argstr="-nonormalize",
xor=("normalize", "nonormalize"),
),
normalize=dict(
argstr="-normalize",
xor=("normalize", "nonormalize"),
),
out_file=dict(
argstr="> %s",
extensions=None,
genfile=True,
position=-1,
),
output_file=dict(
extensions=None,
hash_files=False,
keep_extension=False,
name_source=["input_file"],
name_template="%s.raw",
position=-1,
),
start=dict(
argstr="-start %s",
sep=",",
),
write_ascii=dict(
argstr="-ascii",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_byte=dict(
argstr="-byte",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_double=dict(
argstr="-double",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_float=dict(
argstr="-float",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_int=dict(
argstr="-int",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_long=dict(
argstr="-long",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_range=dict(
argstr="-range %s %s",
),
write_short=dict(
argstr="-short",
xor=(
"write_ascii",
"write_ascii",
"write_byte",
"write_short",
"write_int",
"write_long",
"write_float",
"write_double",
"write_signed",
"write_unsigned",
),
),
write_signed=dict(
argstr="-signed",
xor=("write_signed", "write_unsigned"),
),
write_unsigned=dict(
argstr="-unsigned",
xor=("write_signed", "write_unsigned"),
),
)
inputs = Extract.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value | [
9,
297,
1461
] |
def METHOD_NAME(args):
"""remove a repository from Spack's configuration"""
repos = spack.config.get("repos", scope=args.scope)
namespace_or_path = args.namespace_or_path
# If the argument is a path, remove that repository from config.
canon_path = spack.util.path.canonicalize_path(namespace_or_path)
for repo_path in repos:
repo_canon_path = spack.util.path.canonicalize_path(repo_path)
if canon_path == repo_canon_path:
repos.remove(repo_path)
spack.config.set("repos", repos, args.scope)
tty.msg("Removed repository %s" % repo_path)
return
# If it is a namespace, remove corresponding repo
for path in repos:
try:
repo = spack.repo.Repo(path)
if repo.namespace == namespace_or_path:
repos.remove(path)
spack.config.set("repos", repos, args.scope)
tty.msg("Removed repository %s with namespace '%s'." % (repo.root, repo.namespace))
return
except spack.repo.RepoError:
continue
tty.die("No repository with path or namespace: %s" % namespace_or_path) | [
522,
188
] |
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags") | [
114
] |
def METHOD_NAME(fpath):
try:
with open(fpath, "rb") as f:
return f.read(4) == b"TZif"
except Exception: # pragma: nocover
return False | [
1205,
59
] |
def METHOD_NAME(disk):
storage_id = disk.get("storage_id")
if storage_id:
storage = get_dict_from_item_in_table("storage", storage_id)
disk.update(
{
"file": _get_filename(storage),
"parent": storage.get("parent"),
"path_selected": storage.get("directory_path"),
}
) | [
408,
948
] |
async def METHOD_NAME(app):
# placeholder
LOGGER.info(">>>>> on_cleanup() <<<<<") | [
69,
991,
950
] |
def METHOD_NAME(number): # type: (int) -> str
"""
Given a number, format result as a C array of words
(little-endian, same as ESP32 RSA peripheral or mbedTLS)
"""
result = []
while number != 0:
result.append('0x%08x' % (number & 0xFFFFFFFF))
number >>= 32
return '{ ' + ', '.join(result) + ' }' | [
106,
947,
17811,
1473
] |
def METHOD_NAME(port, per_listener, global_en, user_en, pattern_en):
acl_file = os.path.basename(__file__).replace('.py', '.acl')
write_acl(acl_file, global_en=global_en, user_en=user_en, pattern_en=pattern_en)
if global_en:
single_test(port, per_listener, username=None, topic="topic/global", expect_deny=False)
single_test(port, per_listener, username="username", topic="topic/global", expect_deny=True)
single_test(port, per_listener, username=None, topic="topic/global/except", expect_deny=True)
if user_en:
single_test(port, per_listener, username=None, topic="topic/username", expect_deny=True)
single_test(port, per_listener, username="username", topic="topic/username", expect_deny=False)
single_test(port, per_listener, username="username", topic="topic/username/except", expect_deny=True)
if pattern_en:
single_test(port, per_listener, username=None, topic="pattern/username", expect_deny=True)
single_test(port, per_listener, username="username", topic="pattern/username", expect_deny=False)
single_test(port, per_listener, username="username", topic="pattern/username/except", expect_deny=True) | [
1918,
9
] |
def METHOD_NAME() -> None:
"""
Check that the autograph=False for quadrature.ndiagquad does not throw an error.
Regression test for https://github.com/GPflow/GPflow/issues/1547.
"""
@tf.function(autograph=False)
def func_ndiagquad_autograph_false() -> tf.Tensor:
mu: AnyNDArray = np.array([1.0, 1.3])
var: AnyNDArray = np.array([3.0, 3.5])
num_gauss_hermite_points = 25
return quadrature.ndiagquad(
[lambda *X: tf.exp(X[0])], num_gauss_hermite_points, [mu], [var]
)
func_ndiagquad_autograph_false() | [
9,
17781,
870,
130,
1471,
168
] |
def METHOD_NAME(collection):
collection.load() | [
557,
1098
] |
def METHOD_NAME(self):
pass | [
86,
-1
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(logdir):
"""Create an empty event file if not already exists.
This event file indicates that we have a plugins/profile/ directory in the
current logdir.
Args:
logdir: log directory.
"""
for file_name in gfile.ListDirectory(logdir):
if file_name.endswith(_EVENT_FILE_SUFFIX):
return
# TODO(b/127330388): Use summary_ops_v2.create_file_writer instead.
event_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(logdir, 'events')))
event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX)) | [
2946,
129,
417,
171
] |
def METHOD_NAME(self):
""" Test ActivationSampler for a Conv module """
AimetLogger.set_level_for_all_areas(logging.DEBUG)
model = TinyModel().eval()
sim = QuantizationSimModel(model, dummy_input=torch.randn(1, 3, 32, 32), quant_scheme='tf_enhanced',
default_param_bw=4)
for module in sim.model.modules():
if isinstance(module, QcQuantizeWrapper):
for quantizer in module.input_quantizers + module.output_quantizers:
quantizer.enabled = False
quantizer.enabled = False
for quantizer in sim.model.conv1.input_quantizers + sim.model.conv1.output_quantizers:
self.assertFalse(quantizer.encoding)
self.assertTrue(sim.model.conv1.param_quantizers['weight'])
dataset_size = 100
batch_size = 10
image_size = (3, 32, 32)
data_loader = create_fake_data_loader(dataset_size, batch_size, image_size)
possible_batches = dataset_size // batch_size
def forward_fn(model, inputs):
inputs, _ = inputs
model(inputs)
act_sampler = ActivationSampler(model.conv1, sim.model.conv1, model, sim.model, forward_fn)
quant_inp, orig_out = act_sampler.sample_and_place_all_acts_on_cpu(data_loader)
self.assertEqual(list(quant_inp.shape), [batch_size * possible_batches, 3, 32, 32])
self.assertEqual(list(orig_out.shape), [batch_size * possible_batches, 32, 18, 18]) | [
9,
648,
8755,
1306
] |
def METHOD_NAME(self):
"""Applies the controls. I.e. sets the variables from the controls."""
if not super().METHOD_NAME() or self.lock:
return False
return True | [
231,
2004,
1103
] |
async def METHOD_NAME(self, root: Root) -> None:
url_type = PlatformURIType()
fobj = Path(__file__).parent
ret = await url_type._find_matches(fobj.as_uri() + "/", root)
assert [i.value for i in ret] == [
f.name + "/" if f.is_dir() else f.name for f in fobj.iterdir()
]
assert {i.type for i in ret} == {"uri"}
assert {i.prefix for i in ret} == {fobj.as_uri() + "/"} | [
9,
416,
855,
1190
] |
def METHOD_NAME(
plan: dict[str, Any]) -> tuple[list[str], list[str], list[str]]:
'''
Given a Postgres Query Plan object (parsed from the output of an EXPLAIN
query), returns a tuple with three items:
* A list of tables involved
* A list of remaining queries to parse
* A list of function names involved
'''
table_names: list[str] = []
queries: list[str] = []
functions: list[str] = []
if plan.get('Relation Name'):
table_names.append(plan['Relation Name'])
if 'Function Name' in plan:
if plan['Function Name'].startswith(
'crosstab'):
try:
queries.append(_get_subquery_from_crosstab_call(
plan['Function Call']))
except ValueError:
table_names.append('_unknown_crosstab_sql')
else:
functions.append(plan['Function Name'])
if 'Plans' in plan:
for child_plan in plan['Plans']:
t, q, f = METHOD_NAME(child_plan)
table_names.extend(t)
queries.extend(q)
functions.extend(f)
return table_names, queries, functions | [
214,
539,
145
] |
def METHOD_NAME(self, subject_id, entities=None, check_not_on_or_after=True):
"""Get all the identity information that has been received and
are still valid about the subject.
:param subject_id: The identifier of the subject
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
res = {}
oldees = []
if not entities:
for item in self._cache.find({"subject_id": subject_id}):
try:
info = self._get_info(item, check_not_on_or_after)
except TooOld:
oldees.append(item["entity_id"])
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
else:
for entity_id in entities:
try:
info = self.get(subject_id, entity_id, check_not_on_or_after)
except TooOld:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees | [
19,
2989
] |
def METHOD_NAME(self):
surface = surfaces.RevolutionSurface3D.load_from_file(
"surfaces/objects_revolution_tests/revolutionsurface_linesegment2d_to_3d.json")
linesegment1 = vme.LineSegment2D.load_from_file("surfaces/objects_revolution_tests/linesegment2d_arc3d.json")
arc = surface.linesegment2d_to_3d(linesegment1)[0]
self.assertAlmostEqual(arc.circle.radius, 0.02404221842799788)
linesegment2 = vme.LineSegment2D.load_from_file(
"surfaces/objects_revolution_tests/linesegment2d_rotated_primitive.json")
arc = surface.linesegment2d_to_3d(linesegment2)[0]
self.assertAlmostEqual(arc.circle.radius, 0.022500000035448893)
self.assertAlmostEqual(arc.angle, 0.7195087615152496, 5)
linesegment3 = vme.LineSegment2D.load_from_file(
"surfaces/objects_revolution_tests/linesegment2d_split_primitive.json")
arc = surface.linesegment2d_to_3d(linesegment3)[0]
self.assertAlmostEqual(arc.circle.radius, 0.022500000035448893)
self.assertAlmostEqual(arc.angle, 0.15581712793343738) | [
9,
-1,
24,
1529
] |
def METHOD_NAME(self, userid):
"""
Login as specified user, does not depend on auth backend (hopefully)
This is based on Client.login() with a small hack that does not
require the call to authenticate()
"""
if not 'django.contrib.sessions' in settings.INSTALLED_APPS:
raise AssertionError("Unable to login without django.contrib.sessions in INSTALLED_APPS")
try:
user = User.objects.get(username=userid)
except User.DoesNotExist:
user = User(username=userid, password='')
user.save()
user.backend = "%s.%s" % ("django.contrib.auth.backends",
"ModelBackend")
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
#if self.session:
# request.session = self.session
#else:
request.session = engine.SessionStore()
login(request, user)
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
# Save the session values.
request.session.save() | [
273,
21
] |
def METHOD_NAME() -> Position:
x, y, z = np.random.random(3) * 2 - 1
return Position(x, y, z) | [
236,
195
] |
METHOD_NAME(self, plugin_name): | [
19,
2793,
1200
] |
def METHOD_NAME(app):
app.add_css_file('css/other.css')
app.is_parallel_allowed('write') | [
102
] |
def METHOD_NAME(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir() | [
57
] |
def METHOD_NAME(
subject,
runs,
path=None,
force_update=False,
update_path=None,
base_url=EEGMI_URL,
verbose=None,
): # noqa: D301
"""Get paths to local copies of EEGBCI dataset files.
This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is
also available at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
subject : int
The subject to use. Can be in the range of 1-109 (inclusive).
runs : int | list of int
The runs to use (see Notes for details).
path : None | path-like
Location of where to look for the EEGBCI data. If ``None``, the environment
variable or config parameter ``MNE_DATASETS_EEGBCI_PATH`` is used. If neither
exists, the ``~/mne_data`` directory is used. If the EEGBCI dataset is not found
under the given path, the data will be automatically downloaded to the specified
folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If ``True``, set ``MNE_DATASETS_EEGBCI_PATH`` in the configuration to the given
path. If ``None``, the user is prompted.
base_url : str
The URL root for the data.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
The run numbers correspond to:
========= ===================================
run task
========= ===================================
1 Baseline, eyes open
2 Baseline, eyes closed
3, 7, 11 Motor execution: left vs right hand
4, 8, 12 Motor imagery: left vs right hand
5, 9, 13 Motor execution: hands vs feet
6, 10, 14 Motor imagery: hands vs feet
========= ===================================
For example, one could do::
>>> from mne.datasets import eegbci
>>> eegbci.load_data(1, [6, 10, 14], "~/datasets") # doctest:+SKIP
This would download runs 6, 10, and 14 (hand/foot motor imagery) runs from subject 1
in the EEGBCI dataset to "~/datasets" and prompt the user to store this path in the
config (if it does not already exist).
References
----------
.. footbibliography::
"""
import pooch
t0 = time.time()
if not hasattr(runs, "__iter__"):
runs = [runs]
# get local storage path
config_key = "MNE_DATASETS_EEGBCI_PATH"
folder = "MNE-eegbci-data"
name = "EEGBCI"
path = _get_path(path, config_key, name)
# extract path parts
pattern = r"(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?"
match = re.compile(pattern).match(base_url)
if match is None:
raise ValueError(
"base_url does not match the expected EEGMI folder "
"structure. Please notify MNE-Python developers."
)
base_path = op.join(path, folder, *match.groups())
# create the download manager
fetcher = pooch.create(
path=base_path,
base_url=base_url,
version=None, # data versioning is decoupled from MNE-Python version
registry=None, # registry is loaded from file (below)
retry_if_failed=2, # 2 retries = 3 total attempts
)
# load the checksum registry
registry = files("mne").joinpath("data", "eegbci_checksums.txt")
fetcher.load_registry(registry)
# fetch the file(s)
data_paths = []
sz = 0
for run in runs:
file_part = f"S{subject:03d}/S{subject:03d}R{run:02d}.edf"
destination = Path(base_path, file_part)
data_paths.append(destination)
if destination.exists():
if force_update:
destination.unlink()
else:
continue
if sz == 0: # log once
logger.info("Downloading EEGBCI data")
fetcher.fetch(file_part)
# update path in config if desired
sz += destination.stat().st_size
_do_path_update(path, update_path, config_key, name)
if sz > 0:
_log_time_size(t0, sz)
return data_paths | [
557,
365
] |
def METHOD_NAME(*messages: str) -> t.List[dict]:
"""Create a list of compacted sections blocks"""
return [
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": normalize_message(message),
}
for message in messages[i : i + 2]
],
}
for i in range(0, len(messages), 2)
] | [
-1,
1446,
37
] |
def METHOD_NAME(self) -> Tuple[slice, str]:
"""Generate a tuple of this fix for deduping."""
return (self.source_slice, self.fixed_raw) | [
3696,
1815
] |
def METHOD_NAME(self):
changed_files = 0
for source, config in self.main_config.get_sources_with_configs():
self.config = config
disabler_finder = RegisterDisablers(self.config.formatting.start_line, self.config.formatting.end_line)
try:
stdin = False
if str(source) == "-":
stdin = True
if self.config.verbose:
click.echo("Loading file from stdin")
source = self.load_from_stdin()
elif self.config.verbose:
click.echo(f"Transforming {source} file")
model = self.get_model(source)
model_path = model.source
disabler_finder.visit(model)
if disabler_finder.file_disabled:
continue
diff, old_model, new_model, model = self.transform_until_stable(model, disabler_finder)
if diff:
changed_files += 1
self.output_diff(model_path, old_model, new_model)
if stdin:
self.print_to_stdout(new_model)
elif diff:
self.save_model(model_path, model)
except DataError:
click.echo(
f"Failed to decode {source}. Default supported encoding by Robot Framework is UTF-8. Skipping file"
)
pass
if not self.config.check or not changed_files:
return 0
return 1 | [
1053,
1537
] |
def METHOD_NAME(metadata_file):
"""Populate dictionary with hash-including filenames from the given file."""
metadata = load_json(tempdir / metadata_file)
for target, target_data in metadata['signed']['targets'].items():
for hash_ in target_data['hashes'].values():
target_path = Path(target)
target_with_hash = 'targets' / target_path.parent / f'{hash_}.{target_path.name}'
hashed_target_files.setdefault(target, []).append(str(target_with_hash)) | [
557,
1030,
3932
] |
def METHOD_NAME():
try:
self_test()
except:
print("sr_http: TEST FAILED")
raise | [
57
] |
def METHOD_NAME():
'''Test the _operator method.'''
meta_arg = OperatorArgMetadata("GH_REAL", "GH_WRITE", "W0", "W1")
metadata = LFRicKernelMetadata(
operates_on="cell_column", meta_args=[meta_arg])
cls = call_method("_operator", meta_arg, metadata)
assert len(cls._info) == 1
# pylint: disable=unsubscriptable-object
assert cls._info[1] == 0
assert cls._index == 2 | [
9,
837
] |
def METHOD_NAME(self, active_config):
"""Create a bot."""
active_config.extend({"webdriver_type": "firefox"})
from dallinger.bots import BotBase
bot = BotBase("http://dallinger.io")
assert isinstance(bot.driver, webdriver.Firefox) | [
9,
1227,
626,
4663
] |
def METHOD_NAME(self, user_id, project_id):
group = f"creds_{project_id}"
key = f"creds_{project_id}_{user_id}"
val = None
if self.rds.hexists(group, key):
val = self.rds.hget(group, key)
if val == "True":
val = True
else:
val = False
return val | [
19,
6904,
596
] |
def METHOD_NAME(self):
"""
Test width and height on <td> tag;
If it works, width: 2pt will be equal to 2.0 and height: 3pt will be equal to 3.0 in the ReportLab table
"""
html = """
<html>
<head>
<style>
td { width: 2pt; height: 3pt }
</style>
</head>
<body>
<table>
<tr>
<td>AAA</td>
<td>BBB</td>
</tr>
<tr>
<td>CCC</td>
<td>DDD</td>
</tr>
</table>
</body>
</html>
"""
context = pisaParser(BytesIO(html.encode('utf-8')), pisaContext(None))
table = context.story[0]
col_widths = table._colWidths
row_heights = table._rowHeights
for width in col_widths:
self.assertEqual(width, 2.0, '<td> width in CSS not equal with output!')
for height in row_heights:
self.assertEqual(height, 3.0, '<td> height in CSS not equal with output!') | [
9,
7653,
2327,
61,
1877
] |
f METHOD_NAME(self): | [
10203,
434
] |
def METHOD_NAME(self):
return "g{0}".format(self.version.up_to(1)) | [
4197,
156
] |
def METHOD_NAME(work: LightningWork, to_trim: typing.List[str]) -> typing.Iterator[None]:
"""Context manager to trim the work object to remove attributes that are not picklable."""
holder = {}
for arg in to_trim:
holder[arg] = getattr(work, arg)
setattr(work, arg, None)
yield
for arg in to_trim:
setattr(work, arg, holder[arg]) | [
2441,
3160
] |
def METHOD_NAME(self, xp, dtype, order):
# from cupy/cupy#4193
a = self._test_ndim_limit(xp, 32, dtype, order)
return a | [
9,
4333,
-1
] |
def METHOD_NAME():
strings_value = ['first', 'second', 'third']
integer_value = 2
model = init_from_tuple(UnitTestModel, ('strings', 'integer'), (strings_value, integer_value))
assert model.strings == ['first', 'second', 'third'] and model.integer == integer_value | [
9,
176,
280,
1815
] |
def METHOD_NAME():
# Create our simulation object.
sim = TestSimulation("globalids", "globalids.sim2")
sim.addargument("-echo")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("globalids00", sim)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("globalids01", sim.metadata())
test0(sim)
test1(sim)
# Close down the simulation.
if started:
sim.endsim() | [
57
] |
f METHOD_NAME(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable | [
1572,
667
] |
def METHOD_NAME(self, with_error):
"""Runs and tests server and client in parallel."""
server_flags = ["--file=" + file, "--done_file=" + done_file]
client_flags = ["--file=" + file]
if with_error:
server_flags += ["--with_error"]
client_flags += ["--stop_on_error"]
with scoped_file(file, is_fifo=True), scoped_file(done_file):
with open(done_file, 'w') as f:
f.write("0\n")
# Start client.
client = subprocess.Popen([client_bin] + client_flags)
# Start server.
server = subprocess.Popen([server_bin] + server_flags)
# Join with processes, check return codes.
server_valid_statuses = [0]
if with_error:
# If the C++ binary has not finished by the time the Python
# client exits due to failure, then the C++ binary will fail
# with SIGPIPE.
server_valid_statuses.append(SIGPIPE_STATUS)
self.assertIn(server.wait(), server_valid_statuses)
if not with_error:
# Execute once more.
server = subprocess.Popen([server_bin] + server_flags)
self.assertIn(server.wait(), server_valid_statuses)
# Wait until the client has indicated that the server process
# has run twice. We want to run twice to ensure that looping on
# the client end runs correctly.
wait_for_done_count(2)
client.send_signal(signal.SIGINT)
client_status = client.wait()
self.assertEqual(client_status, int(with_error)) | [
22,
163,
61,
340
] |
def METHOD_NAME():
"""Run command."""
from mne.commands.utils import get_optparser, _add_verbose_flag
parser = get_optparser(__file__)
parser.add_option(
"-s", "--subject", dest="subject", help="Subject name (required)", default=None
)
parser.add_option(
"--model",
dest="model",
help="Output file name. Use a name <dir>/<name>-bem.fif",
default=None,
type="string",
)
parser.add_option(
"--ico",
dest="ico",
help="The surface ico downsampling to use, e.g. "
" 5=20484, 4=5120, 3=1280. If None, no subsampling"
" is applied.",
default=None,
type="int",
)
parser.add_option(
"--brainc",
dest="brainc",
help="Defines the brain compartment conductivity. "
"The default value is 0.3 S/m.",
default=0.3,
type="float",
)
parser.add_option(
"--skullc",
dest="skullc",
help="Defines the skull compartment conductivity. "
"The default value is 0.006 S/m.",
default=None,
type="float",
)
parser.add_option(
"--scalpc",
dest="scalpc",
help="Defines the scalp compartment conductivity. "
"The default value is 0.3 S/m.",
default=None,
type="float",
)
parser.add_option(
"--homog",
dest="homog",
help="Use a single compartment model (brain only) "
"instead a three layer one (scalp, skull, and "
" brain). If this flag is specified, the options "
"--skullc and --scalpc are irrelevant.",
default=None,
action="store_true",
)
parser.add_option(
"-d",
"--subjects-dir",
dest="subjects_dir",
help="Subjects directory",
default=None,
)
_add_verbose_flag(parser)
options, args = parser.parse_args()
if options.subject is None:
parser.print_help()
sys.exit(1)
subject = options.subject
fname = options.model
subjects_dir = options.subjects_dir
ico = options.ico
brainc = options.brainc
skullc = options.skullc
scalpc = options.scalpc
homog = True if options.homog is not None else False
verbose = True if options.verbose is not None else False
# Parse conductivity option
if homog is True:
if skullc is not None:
warn(
"Trying to set the skull conductivity for a single layer "
"model. To use a 3 layer model, do not set the --homog flag."
)
if scalpc is not None:
warn(
"Trying to set the scalp conductivity for a single layer "
"model. To use a 3 layer model, do not set the --homog flag."
)
# Single layer
conductivity = [brainc]
else:
if skullc is None:
skullc = 0.006
if scalpc is None:
scalpc = 0.3
conductivity = [brainc, skullc, scalpc]
# Create source space
bem_model = mne.make_bem_model(
subject,
ico=ico,
conductivity=conductivity,
subjects_dir=subjects_dir,
verbose=verbose,
)
# Generate filename
if fname is None:
n_faces = list(str(len(surface["tris"])) for surface in bem_model)
fname = subject + "-" + "-".join(n_faces) + "-bem.fif"
else:
if not (fname.endswith("-bem.fif") or fname.endswith("_bem.fif")):
fname = fname + "-bem.fif"
# Save to subject's directory
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = subjects_dir / subject / "bem" / fname
# Save source space to file
mne.write_bem_surfaces(fname, bem_model)
# Compute the solution
sol_fname = os.path.splitext(str(fname))[0] + "-sol.fif"
bem_sol = mne.make_bem_solution(bem_model, verbose=verbose)
mne.write_bem_solution(sol_fname, bem_sol) | [
22
] |
def METHOD_NAME(self):
app = self.make_app({"multiauth.policies": "basicauth"})
self.headers["Authorization"] = "Carrier"
app.get(self.plural_url, headers=self.headers, status=401)
self.headers["Authorization"] = "Carrier pigeon"
app.get(self.plural_url, headers=self.headers, status=401) | [
9,
4632,
472,
4422,
217,
46,
2433
] |
def METHOD_NAME(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
"on platform %s" % os.name)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build')) | [
977,
1881
] |
def METHOD_NAME(cobbler_api: CobblerAPI) -> replicate.Replicate:
"""
Creates a blank replicate object for a single test.
"""
return replicate.Replicate(cobbler_api) | [
3275,
122
] |
def METHOD_NAME(self, width):
height = self.doLayout(QRect(0, 0, width, 0), True)
return height | [
1877,
43,
2327
] |
def METHOD_NAME(self) -> None:
known_hosts_file = self._identity_dir + "/known_hosts"
with open(known_hosts_file, "w") as f:
f.write(self._known_hosts)
os.chmod(known_hosts_file, 0o600)
self.known_hosts_file = known_hosts_file | [
176,
3478,
3175,
171
] |
def METHOD_NAME(etherscan):
count = 0
def mock_requests_get(_url, timeout): # pylint: disable=unused-argument
nonlocal count
if count == 0:
response = (
'{"status":"0","message":"NOTOK",'
'"result":"Max rate limit reached, please use API Key for higher rate limit"}'
)
else:
response = '{"jsonrpc":"2.0","id":1,"result":"0x1337"}'
count += 1
return MockResponse(200, response)
return patch.object(etherscan.session, 'get', wraps=mock_requests_get) | [
1575,
-1
] |
def METHOD_NAME(v0, v1):
for i in range(0, 3):
assert v0[0].value() == v1[0] | [
638,
926
] |
def METHOD_NAME(self):
'''
*Histrogram generated by calling an optimized C++ function that
calculates all the profile at once.*
'''
bm.sparse_histogram(self.Beam.dt, self.n_macroparticles_array,
self.cut_left_array, self.cut_right_array,
self.bunch_indexes, self.n_slices_bucket) | [
6069,
2629
] |
def METHOD_NAME(staged_logs):
for logger, level, obj, args, kwargs in staged_logs:
with logger.as_raw():
logger.log(level, obj, *args, **kwargs) | [
1579
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.