text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
assert rxname.id(_hash("a")) == _hash("a")
assert rxname.id(_hash("total")) == _hash("total")
assert rxname.id(long(_hash("a"))) == _hash("a")
assert rxname.id(long(_hash("total"))) == _hash("total")
assert rxname.id(str(_hash("a"))) == _hash("a")
assert rxname.id(str(_hash("total"))) == _hash("total") | [
9,
147,
308
] |
def METHOD_NAME(self, output, outputChannelIndex):
outputMap = self.getOutputMap(output)
try:
outputChannel = outputMap[str(outputChannelIndex)]
inputID = outputChannel['input']
inputChannelIndex = outputChannel['channel_index']
except KeyError:
res = self.test.FAIL("Could not find 'input' or 'channel_index' field in /active "
"for Output {} channel {}".format(output.id, outputChannelIndex))
raise NMOSTestException(res)
return inputID, inputChannelIndex | [
19,
362,
147,
307,
724
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(self):
pl_item = self.create_price_list_item()
wiz_form = self.get_wiz_form()
price = pl_item.fixed_price
self.carrier_pricelist.invoice_policy = "pricelist"
wiz_form.carrier_id = self.carrier_pricelist
rec = wiz_form.save()
rec.button_confirm()
so = self.sale_normal_delivery_charges
so.action_confirm()
self.assertEqual(so.carrier_id, wiz_form.carrier_id)
link = wiz_form.carrier_id.get_tracking_link(so.picking_ids)
self.assertFalse(link)
result = wiz_form.carrier_id.send_shipping(so.picking_ids)
expecting = [{"exact_price": price, "tracking_number": False}]
self.assertEqual(result, expecting) | [
9,
8035,
353,
850
] |
def METHOD_NAME(self):
"""Test bad HTML tag."""
self.check_markdown(
R'''
/// html | 3tag
Some *content*
///
''',
R'''
<p>/// html | 3tag
Some <em>content</em>
///</p>
''',
True
) | [
9,
1068,
82
] |
def METHOD_NAME(sess_batch):
if sess_batch:
try:
print("inserting sessions...")
insert_batch(db, sess_batch, table=sessions_table_name, level='sessions')
print("inserted sessions succesfully")
except TypeError as e:
print("Type conversion error")
print(repr(e))
except ValueError as e:
print("Message value could not be processed or inserted correctly")
print(repr(e))
except Exception as e:
print(repr(e)) | [
3142,
240,
408
] |
def METHOD_NAME(*args, **kwargs):
deprecated_warning(type_='Function',
name=func.__name__,
reason=reason,
replacement=replacement,
since_version=since_version,
only_once=only_once,
skip_backtrace_count=skip_backtrace_count)
return func(*args, **kwargs) | [
291
] |
def METHOD_NAME(self, results: List[Dict], action, batched_records):
records_failed = 0
records_processed = 0
for i, result in enumerate(results):
if result.get("success"):
records_processed += 1
else:
records_failed += 1
self.logger.error(
f"Failed {action} to to {self.stream_name}. Error: {result.get('errors')}.\
Record {batched_records[i]}"
)
self.logger.info(
f"{action} {records_processed}/{len(results)} to {self.stream_name}."
)
if records_failed > 0 and not self.config.get("allow_failures"):
raise SalesforceApiError(
f"{records_failed} error(s) in {action} batch commit to {self.stream_name}."
) | [
187,
2277,
1571
] |
def METHOD_NAME(self,
name: str,
description: int
) -> List[Dict]:
'''
Create a new role
Args:
name (str):
The name of role.
description (str):
The description of role.
Returns:
list[dict]:
The created role object.
Examples:
>>> tad.roles.create(
... name='Admin',
... description="all privileges"
... )
'''
payload = [
self._schema.dump(self._schema.load({
'name': name,
'description': description
}))
]
return self._schema.load(self._post(json=payload), many=True) | [
129
] |
def METHOD_NAME(test):
"""Env cleanup_scenario1"""
pass | [
950,
7851
] |
def METHOD_NAME(self, startTimes, endTimes):
nSlices = len(startTimes)
if nSlices != len(endTimes):
print("Test cannot be computed")
exit(1)
input = list(range(max(endTimes)))
# expected values:
expected = []
orderedTimes = []
for i in range(nSlices):
time = (startTimes[i], endTimes[i])
orderedTimes.append(time)
orderedTimes = sorted(orderedTimes, key=lambda x: x[0])
for i in range(nSlices):
expected.append(input[orderedTimes[i][0]:orderedTimes[i][1]])
gen = VectorInput(input)
slicer = Slicer(startTimes=startTimes,
endTimes=endTimes,
timeUnits="samples")
pool = Pool()
gen.data >> slicer.audio
slicer.frame >> (pool, "data")
run(gen)
result = pool['data']
self.assertEqual(len(result), len(expected))
for i in range(nSlices):
self.assertEqualVector(result[i], expected[i]) | [
55
] |
def METHOD_NAME(self) -> Optional['outputs.CodelessUiConnectorConfigPropertiesResponse']:
"""
Config to describe the instructions blade
"""
return pulumi.get(self, "connector_ui_config") | [
4059,
882,
200
] |
def METHOD_NAME(self, subnode_xpath_expr):
"""
Finds a subnode in the host node and the retrieves a value from it.
@return An attribute value
"""
sub_node = self.node.find(subnode_xpath_expr)
if sub_node is not None:
return sub_node.text
return None | [
19,
526,
280,
12057
] |
def METHOD_NAME(self):
self.check_args('/NumberField/?discriminant=1988-2014', '401') # factor of one of the discriminants | [
9,
1070,
6540
] |
def METHOD_NAME(self) -> None:
attributions_batch_size = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
n_samples_batch_size=3,
)
attributions = self._saliency_base_assert(
*_get_multiargs_basic_config_large(),
nt_type="smoothgrad_sq",
)
assertTensorTuplesAlmostEqual(self, attributions_batch_size, attributions) | [
9,
11431,
9,
756,
10117,
8767,
293
] |
def METHOD_NAME():
path_prefix = os.path.dirname(os.path.abspath(__file__))
behavior_spec, pair_infos, total_expected = load_demonstration(
path_prefix + "/test.demo"
)
assert np.sum(behavior_spec.observation_specs[0].shape) == 8
assert len(pair_infos) == total_expected
_, demo_buffer = demo_to_buffer(path_prefix + "/test.demo", 1, BEHAVIOR_SPEC)
assert (
len(demo_buffer[BufferKey.CONTINUOUS_ACTION]) == total_expected - 1
or len(demo_buffer[BufferKey.DISCRETE_ACTION]) == total_expected - 1
) | [
9,
557,
2660
] |
def METHOD_NAME(a):
"""second derivative of the log gamma function""" | [
8042,
3991,
5920
] |
def METHOD_NAME(
name: str,
new_class: type,
clsdict: Optional[Dict[str, Any]] = None,
warn_category: Type[Warning] = ScrapyDeprecationWarning,
warn_once: bool = True,
old_class_path: Optional[str] = None,
new_class_path: Optional[str] = None,
subclass_warn_message: str = "{cls} inherits from deprecated class {old}, please inherit from {new}.",
instance_warn_message: str = "{cls} is deprecated, instantiate {new} instead.",
) -> type:
"""
Return a "deprecated" class that causes its subclasses to issue a warning.
Subclasses of ``new_class`` are considered subclasses of this class.
It also warns when the deprecated class is instantiated, but do not when
its subclasses are instantiated.
It can be used to rename a base class in a library. For example, if we
have
class OldName(SomeClass):
# ...
and we want to rename it to NewName, we can do the following::
class NewName(SomeClass):
# ...
OldName = create_deprecated_class('OldName', NewName)
Then, if user class inherits from OldName, warning is issued. Also, if
some code uses ``issubclass(sub, OldName)`` or ``isinstance(sub(), OldName)``
checks they'll still return True if sub is a subclass of NewName instead of
OldName.
"""
# https://github.com/python/mypy/issues/4177
class DeprecatedClass(new_class.__class__): # type: ignore[misc, name-defined]
deprecated_class: Optional[type] = None
warned_on_subclass: bool = False
def __new__(
metacls, name: str, bases: Tuple[type, ...], clsdict_: Dict[str, Any]
) -> type:
cls = super().__new__(metacls, name, bases, clsdict_)
if metacls.deprecated_class is None:
metacls.deprecated_class = cls
return cls
def __init__(cls, name: str, bases: Tuple[type, ...], clsdict_: Dict[str, Any]):
meta = cls.__class__
old = meta.deprecated_class
if old in bases and not (warn_once and meta.warned_on_subclass):
meta.warned_on_subclass = True
msg = subclass_warn_message.format(
cls=_clspath(cls),
old=_clspath(old, old_class_path),
new=_clspath(new_class, new_class_path),
)
if warn_once:
msg += " (warning only on first subclass, there may be others)"
warnings.warn(msg, warn_category, stacklevel=2)
super().__init__(name, bases, clsdict_)
# see https://www.python.org/dev/peps/pep-3119/#overloading-isinstance-and-issubclass
# and https://docs.python.org/reference/datamodel.html#customizing-instance-and-subclass-checks
# for implementation details
def __instancecheck__(cls, inst: Any) -> bool:
return any(cls.__subclasscheck__(c) for c in (type(inst), inst.__class__))
def __subclasscheck__(cls, sub: type) -> bool:
if cls is not DeprecatedClass.deprecated_class:
# we should do the magic only if second `issubclass` argument
# is the deprecated class itself - subclasses of the
# deprecated class should not use custom `__subclasscheck__`
# method.
return super().__subclasscheck__(sub)
if not inspect.isclass(sub):
raise TypeError("issubclass() arg 1 must be a class")
mro = getattr(sub, "__mro__", ())
return any(c in {cls, new_class} for c in mro)
def __call__(cls, *args: Any, **kwargs: Any) -> Any:
old = DeprecatedClass.deprecated_class
if cls is old:
msg = instance_warn_message.format(
cls=_clspath(cls, old_class_path),
new=_clspath(new_class, new_class_path),
)
warnings.warn(msg, warn_category, stacklevel=2)
return super().__call__(*args, **kwargs)
deprecated_cls = DeprecatedClass(name, (new_class,), clsdict or {})
try:
frm = inspect.stack()[1]
parent_module = inspect.getmodule(frm[0])
if parent_module is not None:
deprecated_cls.__module__ = parent_module.__name__
except Exception as e:
# Sometimes inspect.stack() fails (e.g. when the first import of
# deprecated class is in jinja2 template). __module__ attribute is not
# important enough to raise an exception as users may be unable
# to fix inspect.stack() errors.
warnings.warn(f"Error detecting parent module: {e!r}")
return deprecated_cls | [
129,
2497,
2
] |
def METHOD_NAME():
"""Register ``napari.types`` objects with magicgui."""
import sys
from concurrent.futures import Future
from magicgui import register_type
from napari.utils import _magicgui as _mgui
for type_ in (LayerDataTuple, List[LayerDataTuple]):
register_type(
type_,
return_callback=_mgui.add_layer_data_tuples_to_viewer,
)
if sys.version_info >= (3, 9):
future_type = Future[type_] # type: ignore [valid-type]
register_type(future_type, return_callback=_mgui.add_future_data)
for data_type in get_args(_LayerData):
register_type(
data_type,
choices=_mgui.get_layers_data,
return_callback=_mgui.add_layer_data_to_viewer,
)
if sys.version_info >= (3, 9):
register_type(
Future[data_type], # type: ignore [valid-type]
choices=_mgui.get_layers_data,
return_callback=partial(
_mgui.add_future_data, _from_tuple=False
),
)
register_type(
Optional[data_type], # type: ignore [call-overload]
choices=_mgui.get_layers_data,
return_callback=_mgui.add_layer_data_to_viewer,
)
if sys.version_info >= (3, 9):
register_type(
Future[Optional[data_type]], # type: ignore [valid-type]
choices=_mgui.get_layers_data,
return_callback=partial(
_mgui.add_future_data, _from_tuple=False
),
) | [
372,
119,
41,
-1
] |
def METHOD_NAME(saas_config):
return (
pydash.get(saas_config, "salesforce.identity_email")
or secrets["identity_email"]
) | [
601,
2989,
487
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-?', '--help', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit'))
parser.add_argument("trx_generator", type=str, help="The path to the transaction generator binary to excecute")
parser.add_argument("chain_id", type=str, help="Chain ID")
parser.add_argument("last_irreversible_block_id", type=str, help="Last irreversible block ID")
parser.add_argument("contract_owner_account", type=str, help="Cluster contract owner account name")
parser.add_argument("accounts", type=str, help="Comma separated list of account names")
parser.add_argument("priv_keys", type=str, help="Comma separated list of private keys")
parser.add_argument("trx_gen_duration", type=str, help="How long to run transaction generators")
parser.add_argument("target_tps", type=int, help="Goal transactions per second")
parser.add_argument("tps_limit_per_generator", type=int, help="Maximum amount of transactions per second a single generator can have.", default=4000)
parser.add_argument("log_dir", type=str, help="Path to directory where trx logs should be written.")
parser.add_argument("abi_file", type=str, help="The path to the contract abi file to use for the supplied transaction action data")
parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use")
parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.")
parser.add_argument("connection_pair_list", type=str, help="Comma separated list of endpoint:port combinations to send transactions to", default="localhost:9876")
parser.add_argument("endpoint_mode", type=str, help="Endpoint mode (\"p2p\", \"http\"). \
In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \
In \"http\" mode transactions will be directed to the http endpoint on an api node.",
choices=["p2p", "http"], default="p2p")
parser.add_argument("api_endpoint", type=str, help="The api endpoint to use to submit transactions. (Only used with http api nodes currently as p2p transactions are streamed)",
default="/v1/chain/send_transaction2")
args = parser.parse_args()
return args | [
214,
335
] |
def METHOD_NAME(self):
f = self.make_frame()[0]
btn = Checkbutton(f, anchor="w",
variable=self.recvar,
text="Recurse down subdirectories")
btn.pack(side="top", fill="both")
btn.select() | [
129,
2395,
1409
] |
def METHOD_NAME(lockfd, data):
if lockfd is None:
return
try:
_lockfd = os.dup(lockfd)
with os.fdopen(_lockfd, "w+") as ofile:
ofile.seek(0)
try:
lock_data = json.load(ofile)
except ValueError:
return
lock_data["progress"] = data
ofile.truncate(0)
ofile.seek(0)
json.dump(lock_data, ofile)
os.fsync(_lockfd)
os.close(_lockfd)
except Exception:
return | [
3064
] |
async def METHOD_NAME(
partition_sda1_service: PartitionService,
partition_sdb1_service: PartitionService,
dbus_session_bus: MessageBus,
):
"""Test partition table info."""
sda1 = UDisks2Partition("/org/freedesktop/UDisks2/block_devices/sda1")
sdb1 = UDisks2Partition(
"/org/freedesktop/UDisks2/block_devices/sdb1", sync_properties=False
)
assert sda1.name_ is None
assert sda1.size is None
assert sdb1.name_ is None
assert sdb1.size is None
await sda1.connect(dbus_session_bus)
await sdb1.connect(dbus_session_bus)
assert sda1.name_ == "hassos-data-external"
assert sda1.size == 250058113024
assert sdb1.name_ == ""
assert sdb1.size == 67108864
partition_sda1_service.emit_properties_changed({"Name": "test"})
await partition_sda1_service.ping()
assert sda1.name_ == "test"
partition_sda1_service.emit_properties_changed({}, ["Name"])
await partition_sda1_service.ping()
await partition_sda1_service.ping()
assert sda1.name_ == "hassos-data-external"
# Prop changes should not sync for this one
partition_sdb1_service.emit_properties_changed({"Name": "test"})
await partition_sdb1_service.ping()
assert sdb1.name_ == "" | [
9,
2312,
410,
100
] |
def METHOD_NAME(x, add_quantization_nodes):
def _Quantize(x, r):
if add_quantization_nodes:
x = gen_array_ops.fake_quant_with_min_max_vars(x, -r, r)
return x
x = _Quantize(x, 10.0)
x = x + 5
x = _Quantize(x, 15.0)
x = x - 5
x = _Quantize(x, 10.0)
x = x * 0.1
x = _Quantize(x, 1.0)
w = constant_op.constant(np.ones((8, 1)), dtype=dtypes.float32)
x = math_ops.matmul(x, w)
x = _Quantize(x, 10.0)
return array_ops.identity(x, name="output_0") | [
303,
667
] |
def METHOD_NAME(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` is torchscript-able and returns the same result"""
audio_path = self.get_temp_path(f"{dtype}_{sample_rate}_{num_channels}.wav")
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=1 * sample_rate)
save_wav(audio_path, data, sample_rate)
ts_info_func = torch_script(py_info_func)
py_info = py_info_func(audio_path)
ts_info = ts_info_func(audio_path)
assert py_info.sample_rate == ts_info.sample_rate
assert py_info.num_frames == ts_info.num_frames
assert py_info.num_channels == ts_info.num_channels | [
9,
100,
4097
] |
def METHOD_NAME():
_DECIMAL1 + 1.0
_DECIMAL1 - 1.0
_DECIMAL1 * 1.0
_DECIMAL1 / 1.0 | [
3397,
1002,
41,
3751
] |
def METHOD_NAME(self):
self.drop_image_with_multipoint_swipe(
["Drag Image1", "Drop Box1"], 1, False)
self.drop_image_with_multipoint_swipe(
["Drag Image2", "Drop Box1", "Drop Box2"], 1, False)
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image2", "Drop")
assert image_source == image_source_drop_zone | [
9,
107,
9747,
41,
9749,
9747
] |
def METHOD_NAME(small_scenario, west_to_south_east_simulations: Iterable[int]):
ids = west_to_south_east_simulations[0:2]
schedules = requests.get(f"{API_URL}train_schedule/results/?timetable_id={small_scenario.timetable}").json()
old_len = len(schedules)
r = requests.delete(f"{API_URL}train_schedule/", json={"ids": ids})
if r.status_code // 100 != 2:
raise RuntimeError(f"Schedule error {r.status_code}: {r.content}, payload={json.dumps(ids)}")
schedules = requests.get(f"{API_URL}train_schedule/results/?timetable_id={small_scenario.timetable}").json()
assert len(schedules) == old_len - 2 | [
9,
58,
2278,
34
] |
def METHOD_NAME():
env = {
"dse_ldif_simple": DseLdifSimple(context_wrap(DSE_LDIF_DOCTEST)),
}
failed, total = doctest.testmod(dse_ldif_simple, globs=env)
assert failed == 0 | [
9,
366,
2794
] |
def METHOD_NAME(cls):
try:
cls.mock.stop()
except RuntimeError:
pass | [
1843,
2
] |
f METHOD_NAME(self, items): | [
238,
2277
] |
def METHOD_NAME(
cube: cli.inputcube,
mask: cli.inputcube,
weights: cli.inputcube = None,
*,
coord_for_masking,
neighbourhood_shape="square",
radii: cli.comma_separated_list,
lead_times: cli.comma_separated_list = None,
area_sum=False,
):
"""Runs neighbourhooding processing iterating over a coordinate by mask.
Apply the requested neighbourhood method via the
ApplyNeighbourhoodProcessingWithMask plugin to a file with one diagnostic
dataset in combination with a cube containing one or more masks.
The mask dataset may have an extra dimension compared to the input
diagnostic. In this case, the user specifies the name of the extra
coordinate and this coordinate is iterated over so each mask is applied
to separate slices over the input cube. These intermediate masked datasets
are then concatenated, resulting in a dataset that has been processed
using multiple masks and has gained an extra dimension from the masking.
If weights are given the masking dimension that we gain will be collapsed
using a weighted average.
Args:
cube (iris.cube.Cube):
Cube to be processed.
mask (iris.cube.Cube):
Cube to act as a mask.
weights (iris.cube.Cube, Optional):
Cube containing the weights which are used for collapsing the
dimension gained through masking. (Optional).
coord_for_masking (str):
String matching the name of the coordinate that will be used
for masking.
neighbourhood_shape (str):
Name of the neighbourhood method to use.
Options: "circular", "square".
Default: "square".
radii (list of float):
The radius or a list of radii in metres of the neighbourhood to
apply.
If it is a list, it must be the same length as lead_times, which
defines at which lead time to use which nbhood radius. The radius
will be interpolated for intermediate lead times.
lead_times (list of int):
The lead times in hours that correspond to the radii to be used.
If lead_times are set, radii must be a list the same length as
lead_times. Lead times must be given as integer values.
area_sum (bool):
Return sum rather than fraction over the neighbourhood area.
Returns:
iris.cube.Cube:
A cube after being fully processed.
"""
from improver.nbhood import radius_by_lead_time
from improver.nbhood.use_nbhood import ApplyNeighbourhoodProcessingWithAMask
radius_or_radii, lead_times = radius_by_lead_time(radii, lead_times)
result = ApplyNeighbourhoodProcessingWithAMask(
coord_for_masking,
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
collapse_weights=weights,
sum_only=area_sum,
)(cube, mask)
return result | [
356
] |
def METHOD_NAME(request):
response = render(request, "home/offline.html")
return response | [
8024,
1179
] |
def METHOD_NAME():
"""Try importing Pyspark or display warn message in streamlit"""
try:
import pyspark
from pyspark.sql import SparkSession
except:
print("You need Pyspark installed to run NLU. Run <pip install pyspark==3.0.2>")
try:
import streamlit as st
st.error(
"You need Pyspark, Sklearn, Pyplot, Pandas, Numpy installed to run this app. Run <pip install pyspark==3.0.2 sklearn pyplot numpy pandas>")
except:
return False
return False
return True | [
1365,
512,
9409,
623,
2564
] |
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
User in DataBoxEdge Resource
"""
return pulumi.get(self, "system_data") | [
112,
365
] |
f METHOD_NAME(self, input_shape): | [
56
] |
def METHOD_NAME(name, package_registry, version=None):
"""
Internal function used to install the module
called 'name', using the passed 'package_registry'
to find the package that contains the package
that contains this module
"""
conda = _find_conda()
# ensure that we have the root package name
try:
package = name.split(".")[0]
except Exception:
package = name
if package in package_registry:
package = package_registry[name]
import subprocess
try:
if version is not None:
try:
_v = float(version)
version = "==%s" % version
except Exception:
pass
package = "'%s%s'" % (package, version)
print(
"\nTrying to install %s from package %s using %s...\n"
% (name, package, conda)
)
args = [conda, "install", package, "-y"]
result = subprocess.run(args)
if result.returncode == 0:
# installed ok
return
except Exception:
pass
print(
"\nWARNING: Unable to install '%s' from package '%s'\n"
% (name, package)
) | [
428,
360
] |
def METHOD_NAME():
async def main():
result = []
async def purge(items, attributes):
result.append(dict(items))
async with PoolManager("test", 3,
on_pool_purge=purge,
pass_pool_as_dict=True,
replace_item_on_append=False
) as m:
await m.append(1, key="a")
await m.append(2, key="b")
await m.append(3, key="b")
await m.append(4, key="a")
await m.append(5)
await m.append(5)
await asyncio.sleep(1)
return result
result = asyncio.run(main())
assert result == [{'a': [1], 'b': [2, 3]}, {'a': [4], 'test': [5, 5]}] | [
9,
427,
846,
1567,
1768,
604,
59
] |
def METHOD_NAME(sem_seg_logits):
"""
For each location of the prediction `sem_seg_logits` we estimate uncerainty as the
difference between top first and top second predicted logits.
Args:
mask_logits (Tensor): A tensor of shape (N, C, ...), where N is the minibatch size and
C is the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (N, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
top2_scores = torch.topk(sem_seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) | [
1593,
15703
] |
def METHOD_NAME(section):
"""Returns the list of segment areas within the section."""
return [mm.segment_area(seg) for seg in iter_segments(section)] | [
4373,
7462
] |
def METHOD_NAME(self):
# Save opt & sch states
self.opt_state = deepcopy(self.opt.state_dict())
self.sch_state = deepcopy(self.sch.state_dict())
# Cleanup opt, sch & models
self.sch = None
self.opt = None
self.model = None
self.logger = None
self.abort_signal = None
# Cleanup GPU cache
torch.cuda.empty_cache() | [
950
] |
def METHOD_NAME(
log_level: str,
is_json_logging: bool,
package_path: str,
command: str,
profiles_dir: str,
profile_name: Optional[str] = None,
command_args: Sequence[str] = None,
package_vars: StrAny = None
) -> Union[Sequence[DBTNodeResult], dbt_results.ExecutionResult]:
# initialize dbt logging, returns global parameters to dbt command
dbt_global_args = initialize_dbt_logging(log_level, is_json_logging)
return run_dbt_command(package_path, command, profiles_dir, profile_name, dbt_global_args, command_args, package_vars) | [
176,
663,
61,
22,
6128,
462
] |
def METHOD_NAME(self) -> discord.Embed:
embed = DefaultEmbed(
text_map.get(753, self.lang),
text_map.get(754, self.lang),
)
return embed | [
370,
596,
347
] |
def METHOD_NAME(self) -> 'outputs.ComponentVersionResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "component_version_properties") | [
1007,
281,
748
] |
def METHOD_NAME():
parser = cli_args_parser()
args = parser.parse_args(
["passthrough", "input.sdoc", "--output-file", "output.sdoc"]
)
assert args._get_kwargs() == [
("command", "passthrough"),
("input_file", "input.sdoc"),
("output_file", "output.sdoc"),
] | [
9,
5504,
1509,
5166
] |
METHOD_NAME(op, iOutput): | [
837,
838,
365,
151
] |
def METHOD_NAME(self):
mock_session = mock.Mock()
mock_response = mock.Mock()
mock_response.content = '"applesKey": "1234",'.encode("utf-8")
mock_session.get.return_value = mock_response
with self.assertRaises(RaceCardError):
self.race_card.login(mock_session)
assert self.race_card.app_key is None
mock_session.get.assert_called_with(self.race_card.login_url)
assert mock_session.get.call_count == 1 | [
9,
273,
168
] |
def METHOD_NAME(name):
return re.sub(r"[\W_]+", "-", name).strip("-") | [
1356,
156
] |
def METHOD_NAME(mocker):
api = mocker.Mock()
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(0, 0)
api.api.new_batch.return_value = mocker.MagicMock(spec=FacebookAdsApiBatch)
return api | [
58,
1964
] |
def METHOD_NAME(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
first, second = match.group(1), match.group(8)
first = RE_NUMBER.sub(replace_number, first)
second = RE_NUMBER.sub(replace_number, second)
result = f"{first}到{second}"
return result | [
369,
661
] |
def METHOD_NAME(self):
self.failUnlessEqual(2, self.audio.info.channels) | [
9,
860
] |
def METHOD_NAME(tty):
if tty.startswith('/dev/ttyS'):
return 'COM' + str(int(tty['/dev/ttyS'.__len__():]) + 1)
elif tty.startswith('COM'):
return tty
return None | [
9522,
24,
4430
] |
def METHOD_NAME(self, cur_labels: labels_view) -> labels_view:
"""
Modify labels
Parameters
----------
cur_labels : labels_view
Current labels. The coord can modify them as necessary.
Returns
-------
out : dict
Modified labels. Same object as the input.
"""
return cur_labels | [
415
] |
def METHOD_NAME(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
X = values[0]
y = values[1]
if y <= 0:
return [None, None]
else:
# DX = 2X/y, Dy = -||X||^2_2/y^2
if self.args[0].is_complex():
Dy = -(np.square(X.real) + np.square(X.imag)).sum()/np.square(y)
else:
Dy = -np.square(X).sum()/np.square(y)
Dy = sp.csc_matrix(Dy)
DX = 2.0*X/y
DX = np.reshape(DX, (self.args[0].size, 1))
DX = scipy.sparse.csc_matrix(DX)
return [DX, Dy] | [
140
] |
def METHOD_NAME(self, method, *args, **kwdargs):
"""General method for synchronously calling into the GUI.
This waits until the method has completed before returning.
"""
my_id = threading.get_ident()
if my_id == self.gui_thread_id:
return method(*args, **kwdargs)
else:
future = self.gui_do(method, *args, **kwdargs)
return future.wait() | [
2139,
128
] |
def METHOD_NAME(create_event, dummy_room, start_dt, end_dt, expected_params):
start_dt = pytz.utc.localize(start_dt)
end_dt = pytz.utc.localize(end_dt)
event = create_event(start_dt=start_dt, end_dt=end_dt, room=dummy_room)
params = get_booking_params_for_event(event)
assert params == {
'type': 'same_times',
'params': dict({
'link_id': event.id,
'link_type': 'event',
'text': f'#{dummy_room.id}',
}, **expected_params)
} | [
9,
19,
15081,
434,
43,
417,
1101
] |
def METHOD_NAME(value, nan_strategy, metric_class):
"""Test correct errors are raised."""
metric = metric_class(nan_strategy=nan_strategy)
if nan_strategy == "error":
with pytest.raises(RuntimeError, match="Encounted `nan` values in tensor"):
metric(value.clone())
elif nan_strategy == "warn":
with pytest.warns(UserWarning, match="Encounted `nan` values in tensor"):
metric(value.clone()) | [
9,
4082,
168
] |
def METHOD_NAME(self, pc, viewer=None):
super(GTMGPC, self).METHOD_NAME(pc, viewer)
if hasattr(self, "pc"):
viewer.printfASCII("PC using Gopalakrishnan and Tan algorithm\n")
self.pc.METHOD_NAME(viewer) | [
1179
] |
def METHOD_NAME():
return vyos_provider_spec | [
19,
2275,
15893
] |
f METHOD_NAME(self, param_name): | [
2569,
107
] |
def METHOD_NAME(self):
return getattr(self, "settings_build", self.settings) | [
817,
56
] |
def METHOD_NAME(self):
"""
SQL: test more complex search queries
"""
res1 = self.getUserList(self.y, {"userid": "> 2"})
assert len(res1) == 2
assert set(s["userid"] for s in res1) == set((3, 4))
res2 = self.getUserList(self.y, {"userid": " <= 3 "})
assert len(res2) == 3
res3 = self.getUserList(self.y, {"userid": ">77"})
assert res3 == [] | [
9,
1621,
2587,
1070
] |
def METHOD_NAME(img_spec):
"""Observe sync percentage till image goes to up+replying"""
sync_percent = 0
while mirror2.mirror_status("image", img_spec, "state") == "up+syncing":
curr_sync_percent = int(
mirror2.mirror_status("image", img_spec, "description")[-3:-1]
)
if curr_sync_percent >= sync_percent:
sync_percent = curr_sync_percent
continue
log.info(
f"img_spec current sync percent:{curr_sync_percent} is greater than or eaqual to previous sync_percent"
)
if curr_sync_percent < sync_percent:
log.err("Syncing seemed to be restarted, test case failed")
return 1
time.sleep(2)
return 0 | [
1162,
2785
] |
def METHOD_NAME(self):
gradient = self.create_gradient()
self.set_stops(gradient)
with self.assertTraitChanges(gradient, "updated", count=1):
gradient.stops[0].updated = True | [
9,
6105,
1768,
3758,
3758
] |
def METHOD_NAME(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t | [
447
] |
def METHOD_NAME(self):
x = np.array([5., -3, -.5], np.float32)
y = np.array([2, 1, .5], np.float32)
assert_equal(blas._test_isamax(x), 1)
assert_allclose(blas._test_sasum(x), 8.5, 5)
assert_allclose(blas._test_sdot(x, y), 6.75, 5)
assert_allclose(blas._test_snrm2(x), 5.85234975815, 5)
assert_allclose(blas._test_sasum(x[::2]), 5.5, 5)
assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75, 5)
assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297, 5) | [
9,
1819,
335
] |
def METHOD_NAME():
# Probably test this during the evaluation phase. In SQL, "fusable"
# table operations will be combined together into a single select
# statement
#
# see ibis #71 for more on this
t = ibis.table(
[
("a", "int8"),
("b", "int16"),
("c", "int32"),
("d", "int64"),
("e", "float32"),
("f", "float64"),
("g", "string"),
("h", "boolean"),
],
"foo",
)
proj = t["a", "b", "c"]
# Rewrite a little more aggressively here
expr1 = proj[t.a > 0]
# at one point these yielded different results
filtered = t[t.a > 0]
expr2 = filtered[t.a, t.b, t.c]
expr3 = filtered.select(["a", "b", "c"])
return expr1, expr2, expr3 | [
1958,
2151,
527
] |
def METHOD_NAME(qobj, max_size, qobj_id, seed):
# Check if we don't need to split
if max_size is None or not max_size > 0:
return qobj
num_jobs = ceil(len(qobj.experiments) / max_size)
if num_jobs == 1:
return qobj
qobjs = []
# Check for parameterizations
params = getattr(qobj.config, "parameterizations", None)
for i in range(num_jobs):
sub_id = qobj_id or str(uuid.uuid4())
indices = slice(i * max_size, (i + 1) * max_size)
sub_exp = qobj.experiments[indices]
sub_config = qobj.config
if params is not None:
sub_config.parameterizations = params[indices]
sub_config = copy.copy(qobj.config)
if seed > 0:
if sub_config is qobj.config:
sub_config = copy.copy(qobj.config)
qobjs.append(type(qobj)(sub_id, sub_config, sub_exp, qobj.header))
return qobjs | [
265,
16203
] |
def METHOD_NAME(self):
if self.datadog:
datadog.api.Metric.send(self.metrics)
self.metrics = [] | [
353,
75
] |
def METHOD_NAME(db):
"""
Require an admin user.
Provides a user with the admin flag set to True.
"""
from girder.models.user import User
u = User().createUser(email='[email protected]', login='admin', firstName='Admin',
lastName='Admin', password='password', METHOD_NAME=True)
yield u | [
2870
] |
def METHOD_NAME(self, mask):
module = DropMaskLinear(HIDDEN_DIM, HIDDEN_DIM)
inputs = (torch.rand(HIDDEN_DIM), None)
inputs[0].requires_grad = True
_test_activation_checkpoint(module, *inputs) | [
9,
737,
718,
98
] |
def METHOD_NAME(self, element_tree, val_name):
"""
Returns true if either all or none of the enum items
contain a given value
"""
has_value = 0
total = 0
for enum_item in element_tree.iter():
if enum_item.tag == "item":
total += 1
if val_name in enum_item.keys():
has_value += 1
is_consistent = True
if not has_value in (0, total):
is_consistent = False
return is_consistent | [
137,
309,
1311
] |
def METHOD_NAME(cs: _pytest.python.CallSpec2):
for key, val in enumerate(cs._idlist):
if key not in seen:
return False
if val not in seen[key]:
return False
return True | [
220,
8615,
3959
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, match_node_name):
"""Quantize AvgPool/MaxPool.
Dequantize + MaxPool + QuantizeV2
Dequantize + MaxPool3D + QuantizeV2
Dequantize + AvgPool + QuantizeV2
"""
skip_node_name = match_node_name[2:]
matched_node = self.node_name_mapping[match_node_name[1]]
control_inputs, normal_inputs = self._get_node_input(matched_node.node.name)
_, q_inputs = self._get_node_input(normal_inputs[0])
all_input_names = q_inputs
skip_node_name.append(normal_inputs[0])
for _, node in enumerate(self.input_graph.node):
if node.name in skip_node_name:
self.logger.debug("skip node {}".format(node.name))
elif node.name == match_node_name[1]:
self.logger.debug("Matched node {} with input {}.".format(node.name, node.input))
quantized_op_name = node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + node.op
if node.op == "MaxPool3D":
quantized_op_type = "_Quantized" + node.op
quantized_pool_node = helper.create_node(quantized_op_type, quantized_op_name, all_input_names)
self._add_pool_function(node, quantized_pool_node)
self.add_output_graph_node(quantized_pool_node)
deq_type = dtypes.quint8 if self._find_relu_node(node) else dtypes.qint8
self._intel_cpu_add_dequantize_result_node(
quantized_op_name, node.name, dtype=deq_type, performance_only=self.performance_only
)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
self.add_output_graph_node(new_node) | [
231,
1567,
9293
] |
def METHOD_NAME(self):
'''Wait until all jobs have finished'''
while self.jobs:
pid, code = os.METHOD_NAME()
self.job_done(pid, code)
if self.queue:
self.start_job(self.queue.pop(0)) | [
618
] |
def METHOD_NAME(self) -> str:
"""
The creation date of scope map.
"""
return pulumi.get(self, "creation_date") | [
581,
153
] |
def METHOD_NAME(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsExtractSubFc
"""
return super().METHOD_NAME | [
1461
] |
def METHOD_NAME(op, node, **kwargs):
"""Register a JAX implementation for `ARange`.
`jax.numpy.arange` requires concrete values for its arguments. Here we check
that the arguments are constant, and raise otherwise.
TODO: Handle other situations in which values are concrete (shape of an array).
"""
arange_args = node.inputs
constant_args = []
for arg in arange_args:
if arg.owner and isinstance(arg.owner.op, Shape_i):
constant_args.append(None)
elif isinstance(arg, Constant):
constant_args.append(arg.value)
else:
# TODO: This might be failing without need (e.g., if arg = shape(x)[-1] + 1)!
raise NotImplementedError(ARANGE_CONCRETE_VALUE_ERROR)
constant_start, constant_stop, constant_step = constant_args
def arange(start, stop, step):
start = start if constant_start is None else constant_start
stop = stop if constant_stop is None else constant_stop
step = step if constant_step is None else constant_step
return jnp.arange(start, stop, step, dtype=op.dtype)
return arange | [
757,
2607,
385,
661
] |
def METHOD_NAME(dtype):
q = get_queue_or_skip()
skip_if_dtype_not_supported(dtype, q)
X = dpt.ones((10, 10), dtype=dtype, sycl_queue=q)
dt_kind = X.dtype.kind
if dt_kind in "ui":
X *= int(1)
elif dt_kind == "f":
X *= float(1)
elif dt_kind == "c":
X *= complex(1)
elif dt_kind == "b":
X *= bool(1) | [
9,
6685,
5920,
440,
1997
] |
def METHOD_NAME(
cls,
command: Union[List[str], str],
**kwargs: Optional[Union[bool, Iterable[str], str]],
) -> List[str]:
"""Generate command to be executed and log it.
Args:
command: Command to run.
args: Additional args to pass to the command.
Returns:
The full command to be passed into a subprocess.
"""
cmd = [cls.EXECUTABLE, *(command if isinstance(command, list) else [command])]
cmd.extend(cls._generate_command_handle_kwargs(**kwargs))
LOGGER.debug("generated command: %s", cls.list2cmdline(cmd))
return cmd | [
567,
462
] |
def METHOD_NAME(value):
CORE.vscode = True
actual = config_validation.valid_name(value)
assert actual == value
CORE.vscode = False
with pytest.raises(Invalid):
actual = config_validation.valid_name(value) | [
9,
1205,
156,
4282,
1205
] |
def METHOD_NAME(self, inputs, weights, training: tf.constant):
"""
Apply rb sparsity mask to given weights.
:param inputs: Target weights to sparsify.
:param weights: Operation weights contains
`mask` and param `trainable`.
:param training: True if operation called in training mode
else False
"""
true_fn = lambda: apply_mask(inputs, self._calc_rb_binary_mask(weights))
false_fn = lambda: apply_mask(inputs, binary_mask(weights["mask"]))
return tf_internals.smart_cond(
training,
true_fn=lambda: tf_internals.smart_cond(weights["trainable"], true_fn=true_fn, false_fn=false_fn),
false_fn=false_fn,
) | [
128
] |
def METHOD_NAME(self):
return "DELETE" | [
103
] |
def METHOD_NAME(ref_list, saveplot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1)
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-diffusion'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.20
slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
# plot points
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = 0.05*(x_max - x_min)
npoints = 50
xx = numpy.linspace(x_min - offset, x_max + offset, npoints)
yy = intercept + slope*xx
# plot line
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[2*int(npoints/3)], yy[2*int(npoints/3)], '{:4.2f}'.format(slope),
verticalalignment='top',
horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = 'ref-' + '-'.join([str(r) for r in ref_list])
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if expected_slope is not None:
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert slope > expected_slope*(1 - slope_rtol), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, polynomial_degree+1, 'salt', saveplot) | [
22,
2240
] |
def METHOD_NAME(self):
return self.cache.lookup(data_path=os.path.join(self.category_name, producer.__name__),
plot=self, producer=lambda: producer(self)) | [
291
] |
def METHOD_NAME():
from . import signals as handlers
signals.post_save.connect(handlers.try_to_close_or_open_user_stories_when_edit_us_status,
sender=apps.get_model("projects", "UserStoryStatus"),
dispatch_uid="try_to_close_or_open_user_stories_when_edit_us_status")
signals.post_save.connect(handlers.create_swimlane_user_story_statuses_on_userstory_status_post_save,
sender=apps.get_model("projects", "UserStoryStatus"),
dispatch_uid="create_swimlane_user_story_statuses_on_userstory_status_post_save") | [
707,
4335,
452,
7958
] |
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
) | [
69,
1072
] |
def METHOD_NAME(args=None):
log=get_logger()
if not isinstance(args, argparse.Namespace):
args = parse(args)
bad_fibers = parse_fibers(args.fibers)
neighboring_fiber = None
h = pyfits.open(args.infile)
nfibers = h["XTRACE"].data.shape[0]
# coordinates of the middle of the traces
x = h["XTRACE"].data[:,0]
y = h["YTRACE"].data[:,0]
# compute offsets
dx_in = []
dx_out = []
for b in range(20) :
dx_in.append(np.median(x[b*25+1:(b+1)*25]-x[b*25:(b+1)*25-1]))
if b>0 :
dx_out.append(x[b*25]-x[b*25-1])
dx_in = np.median(dx_in)
dx_out = np.median(dx_out)
log.info("dx_in={:.3f}".format(dx_in))
log.info("dx_out={:.3f}".format(dx_out))
for bad_fiber in bad_fibers :
# find a neigboring fiber
neighboring_fiber = None
x_of_bad=x[bad_fiber]
bundle = bad_fiber//25
if bad_fiber%25 < 12 : # fiber is to the left of the bundle, so choose a neighbor to the right
step = 1
else :
step = -1
neighboring_fiber = None
neighboring_fiber_right = bad_fiber+1
while neighboring_fiber_right in bad_fibers :
neighboring_fiber_right += 1
bundle_right=neighboring_fiber_right//25
neighboring_fiber_left = bad_fiber-1
while neighboring_fiber_left in bad_fibers :
neighboring_fiber_left -= 1
bundle_left=neighboring_fiber_left//25
# one or the other is off range
if neighboring_fiber_left<0 :
if neighboring_fiber_right < nfibers :
neighboring_fiber = neighboring_fiber_right
else :
log.error("sorry, didn't find a good neighbor for fiber {}".format(bad_fiber))
continue
else :
if neighboring_fiber_right >= nfibers :
neighboring_fiber = neighboring_fiber_left
# both are in the same bundle
if neighboring_fiber is None and bundle_right==bundle and bundle_left==bundle :
if bad_fiber%25 < 12 : # fiber is to the left of the bundle, so choose a neighbor to the right
neighboring_fiber = neighboring_fiber_right
else :
neighboring_fiber = neighboring_fiber_left
# pick the one that is in the same bundle
if neighboring_fiber is None :
if bundle_right==bundle :
neighboring_fiber = neighboring_fiber_right
elif bundle_left==bundle :
neighboring_fiber = neighboring_fiber_left
else :
# none is in the same bundle, pick the nearest
if np.abs(bad_fiber-neighboring_fiber_right) < np.abs(bad_fiber-neighboring_fiber_left) :
neighboring_fiber = neighboring_fiber_right
else :
neighboring_fiber = neighboring_fiber_left
# set default values
if "PSF" in h :
h["PSF"].data["COEFF"][:,bad_fiber,:] = h["PSF"].data["COEFF"][:,neighboring_fiber,:]
h["XTRACE"].data[bad_fiber] = h["XTRACE"].data[neighboring_fiber]
h["YTRACE"].data[bad_fiber] = h["YTRACE"].data[neighboring_fiber]
# adjust x value
delta_out = bad_fiber//25 - neighboring_fiber//25
delta_in = (bad_fiber - neighboring_fiber)-delta_out
x_of_bad = x[neighboring_fiber] + delta_in*dx_in + delta_out*dx_out
h["XTRACE"].data[bad_fiber,0] = x_of_bad
# adjust y value
ii=(np.abs(x-x_of_bad)<dx_in*10)&(x!=x_of_bad)
if np.sum(ii)>2 :
pol = np.poly1d(np.polyfit(x[ii],y[ii],2))
y_of_bad = pol(x_of_bad)
h["YTRACE"].data[bad_fiber,0] = y_of_bad
log.info("Fixed fiber {} using fiber {} as reference.".format(bad_fiber,neighboring_fiber))
h.writeto(args.outfile,overwrite=True)
log.info("wrote {}".format(args.outfile))
return 0 | [
57
] |
def METHOD_NAME(self):
boxcox = BoxCox(lmbda=0.3)
boxcox.fit(self.multi_series)
assert boxcox._fitted_params == [[0.3, 0.3]]
boxcox = BoxCox(lmbda=[0.3, 0.4])
boxcox.fit(self.multi_series)
assert boxcox._fitted_params == [[0.3, 0.4]]
with pytest.raises(ValueError):
boxcox = BoxCox(lmbda=[0.2, 0.4, 0.5])
boxcox.fit(self.multi_series)
boxcox = BoxCox(optim_method="mle")
boxcox.fit(self.multi_series)
lmbda1 = boxcox._fitted_params[0].tolist()
boxcox = BoxCox(optim_method="pearsonr")
boxcox.fit(self.multi_series)
lmbda2 = boxcox._fitted_params[0].tolist()
assert lmbda1 != lmbda2 | [
9,
-1,
1778
] |
def METHOD_NAME(self):
return "Default" | [
19,
235,
52,
854
] |
def METHOD_NAME(self, additional_headers: None | dict[str, Any] = None) -> bool:
import_header = {"Accept": Consts.mediaTypeImportPreview}
return super().METHOD_NAME(additional_headers=import_header) | [
86
] |
def METHOD_NAME(self):
return True | [
576,
4491,
10818
] |
def METHOD_NAME() -> Dict:
run_id = get_flow_run_id()
if run_id is None:
return {}
flow_run = from_sync.call_soon_in_loop_thread(
create_call(_get_flow_run, run_id)
).result()
return flow_run.parameters or {} | [
19,
386
] |
def METHOD_NAME(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=":=") | [
4702,
16654
] |
async def METHOD_NAME(
geoip_service,
fake_geoip_path
):
db = mock.Mock()
geoip_service.db = db
geoip_service.file_path = fake_geoip_path
geoip_service.load_db()
assert geoip_service.db is db
geoip_service.db.close.assert_not_called() | [
9,
557,
1267,
130,
1460,
69,
1423
] |
def METHOD_NAME(hosts):
results = concurrent.tmap(
scan_scsi_host,
hosts,
max_workers=MAX_WORKERS,
name="scan")
if not all(res.value for res in results):
raise Error("Scanning hosts failed") | [
793,
3175
] |
def METHOD_NAME(tracer):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# it should handle the case where a Task is not available
# Note: the @pytest.mark.asyncio is missing to simulate an execution
# without a Task
task = asyncio_current_task()
# the task is not available
assert task is None
ctx = tracer.current_trace_context()
assert ctx is None | [
9,
198,
758,
98
] |
def METHOD_NAME() -> None:
r"""
A.one --> B.one --> C.one | D.one --> E.one
| | <--
| |
\/ |
C.two |
"""
a_one = DummyRelationAttribute(fq_attr_name="A.one")
b_one = DummyRelationAttribute(fq_attr_name="B.one")
c_one = DummyRelationAttribute(fq_attr_name="C.one")
c_two = DummyRelationAttribute(fq_attr_name="C.two")
d_one = DummyRelationAttribute(fq_attr_name="D_one")
e_one = DummyRelationAttribute(fq_attr_name="E.one")
graph = RelationPrecedenceGraph()
graph.add_precedence_rule(first_attribute=a_one, then_attribute=b_one)
graph.add_precedence_rule(first_attribute=b_one, then_attribute=c_one)
graph.add_precedence_rule(first_attribute=b_one, then_attribute=c_two)
graph.add_precedence_rule(first_attribute=d_one, then_attribute=e_one)
graph.add_precedence_rule(first_attribute=e_one, then_attribute=d_one)
with pytest.raises(CycleInRelationPrecedencePolicyError, match="A cycle exists in the relation precedence policy"):
graph.get_freeze_order() | [
9,
44,
8412,
303,
3351,
16727,
2922
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.