text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(
caplog: _pytest.logging.LogCaptureFixture,
evaluator: Callable[[Iterable[Any]], bool] = any,
not_present: bool = False,
**tests: Any
) -> None:
"""
Assert log contains a record - logged message - with given properties. Those are specified as
keyword parameters: :py:class:`logging.LogRecords` properties are allowed names, parameter
values are the expected values.
.. code-block:: python
assert_log(message='everything went well', levelno=logging.INFO)
assert_log(message='things broke down', levelno=logging.ERROR)
assert_log(message=MATCH('user .+ logged in'), levelno=logging.INFO)
:param caplog: Pytest's `caplog` fixture.
:param evaluator: a callable reducing a given list of booleans into a single boolean. It is
used to evaluate whether the search for matching record was successfull: each record is
tested, and results of these per-record tests are passed to `evaluator` for the final
decision.
"""
# We are given field_name=expected_value pairs, but we also want to be open to other binary
# operators, like "field_name matches pattern". To protect the actual matching from aspects of
# different possible operators, we will convert the "tests" into basic building blocks: a
# field name, a callable accepting two parameters, and the given (expected) value. With these,
# we can reduce the matching into functions calls without worrying what functions we work with.
operators: List[Tuple[Callable[[Any], Any], str, Callable[[Any, Any], bool], Any]] = []
for field_name, expected_value in tests.items():
if field_name.startswith('details_'):
field_name = field_name.replace('details_', '')
def field_getter(record, name): return record.details.get(name, None)
else:
def field_getter(record, name): return getattr(record, name)
# Special case: if the expected value is a pattern matching instance, it represents a
# regular expression. We don't modify the field name and "expected" value, but the
# function will be a custom lambda calling proper `re` method.
if isinstance(expected_value, PatternMatching):
operators.append((
field_getter,
field_name,
lambda a, b: a.method(b) is not None,
expected_value
))
continue
# Python's `operator` package offers operators - `==` or `!=` - in a form of functions,
# which is exactly what we need here, so we don't have to build our own
# `lambda a, b: a == b`. We might use more than just `eq` in the future, so let's start
# with `operator` right away.
operators.append((
field_getter,
field_name,
operator.eq,
expected_value
))
# Given a logging record, apply all field/operator/value triplets, and make sure all match the
# actual record properties.
def _cmp(record: logging.LogRecord) -> bool:
return all(op(expected_value, field_getter(record, field_name))
for field_getter, field_name, op, expected_value in operators)
# Final step: apply our "make sure field/operator/value triplets match given record" to each
# and every record, and reduce per-record results into a single answer. By default, `any` is
# used which means that any record matching all field/operator/value triples yield the final
# "yes, such a record exists".
outcome = evaluator([_cmp(record) for record in caplog.records])
def _report(message: str) -> None:
formatted_fields = [
f' {field} == {value}'
for field, value in tests.items()
]
for record in caplog.records:
for field_getter, field_name, op, expected_value in operators:
print(f'field={field_name}',
f'current=>>>{field_getter(record, field_name)}<<<',
f'expected=>>>{expected_value}<<<',
f'comparison={op(expected_value, field_getter(record, field_name))}')
pytest.fail(f""" | [
638,
390
] |
def METHOD_NAME(self, record: logging.LogRecord):
if record.name == "prefect.flow_runs":
style = self._flow_run_style
elif record.name == "prefect.task_runs":
style = self._task_run_style
else:
style = self._style
return style.format(record) | [
275,
277
] |
def METHOD_NAME(client):
print("COMMIT")
global test_count, test_fails
test_count = test_count + 1
rc = client.commit()
if rc != 0 and rc != -47:
test_fails = test_fails + 1
print("COMMIT TEST FAILED: ", client.error_string(rc)) | [
9,
1160
] |
def METHOD_NAME(self):
"""
Check the lambda for the time format. Raise an exception if the value is wrong
"""
LAMBDA = lambda: 0 # noqa: E731
if self.timeformat_lambda is None or (
isinstance(self.timeformat_lambda, type(LAMBDA)) and self.timeformat_lambda.__name__ == LAMBDA.__name__
):
return True
else:
ValueError("Expected [None] or [lambda function] for argument [timeformat_func]") | [
250,
11428,
1778
] |
def METHOD_NAME(self):
skip = ["close", "idle", "noidle"]
cmds = [c for c in self.conn.list_commands() if c not in skip]
for cmd in cmds:
self._cmd(cmd.encode("ascii") + b"\n") | [
9,
2458
] |
def METHOD_NAME(aws_meta):
exists = TransientBounceEmail.objects.filter(
email=aws_meta.email,
timestamp=aws_meta.timestamp,
).exists()
if not exists:
TransientBounceEmail.objects.create(
email=aws_meta.email,
timestamp=aws_meta.timestamp,
headers=aws_meta.headers,
) | [
148,
2603,
1738
] |
def METHOD_NAME(self, tms: m.Bit) -> m.Bits[4]:
self.yield_state = capture
yield self.yield_state.prev()
while True:
if tms == 0:
while True:
self.yield_state = shift
yield self.yield_state.prev()
if tms != 0:
break
self.yield_state = exit_1
yield self.yield_state.prev()
if tms == 0:
while True:
self.yield_state = pause
yield self.yield_state.prev()
if tms != 0:
break
self.yield_state = exit_2
yield self.yield_state.prev()
if tms != 0:
break
else:
break
self.yield_state = update
yield self.yield_state.prev()
return tms | [
793
] |
def METHOD_NAME(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame
:param metadata: Additional information, like the currently traded pair
:return: DataFrame with buy column
"""
dataframe.loc[
(
(dataframe['rsi'] < 35) &
(dataframe['fastd'] < 35) &
(dataframe['adx'] > 30) &
(dataframe['plus_di'] > 0.5)
) |
(
(dataframe['adx'] > 65) &
(dataframe['plus_di'] > 0.5)
),
'buy'] = 1
return dataframe | [
3914,
2007,
8165
] |
def METHOD_NAME(self, name):
"""Get a RegularSurface() instance by name, or return None if name not found"""
logger.info("Asking for a surface with name %s", name)
for surf in self._surfaces:
if surf.name == name:
return surf
return None | [
19,
881
] |
def METHOD_NAME(self, description):
description.append_text('item is not nan') | [
2517,
24
] |
def METHOD_NAME(gt_box_list, fg_anchor_list):
px, py, pw, ph = (
fg_anchor_list[:, 0],
fg_anchor_list[:, 1],
fg_anchor_list[:, 2],
fg_anchor_list[:, 3],
)
gx, gy, gw, gh = (
gt_box_list[:, 0],
gt_box_list[:, 1],
gt_box_list[:, 2],
gt_box_list[:, 3],
)
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = np.log(gw / pw)
dh = np.log(gh / ph)
return np.stack((dx, dy, dw, dh), axis=1) | [
421,
1364
] |
def METHOD_NAME(self, username, token, url):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.jira_cloud(username, token, user_id,url)
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
RETURNING username, token, url;""",
{"user_id": self._user_id, "username": username,
"token": token, "url": url})
)
w = helper.dict_to_camel_case(cur.fetchone())
return self.get() | [
238
] |
def METHOD_NAME(resource_group_name: Optional[pulumi.Input[str]] = None,
schema_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGlobalSchemaResult]:
"""
Gets the details of the Schema specified by its identifier.
Azure REST API version: 2022-08-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str schema_id: Schema id identifier. Must be unique in the current API Management service instance.
:param str service_name: The name of the API Management service.
"""
... | [
19,
285,
135,
146
] |
def METHOD_NAME(self):
var_opt_list = [
("+pthreads", "GRIB_THREADS"),
("+openmp", "GRIB_OMP_THREADS"),
("+fortran", "FORTRAN"),
("+examples", "EXAMPLES"),
("+test", "TESTS"),
]
args = [
"-DENABLE_%s=%s" % (opt, "ON" if var in self.spec else "OFF")
for var, opt in var_opt_list
]
if "+netcdf" in self.spec:
args.extend(
[
"-DENABLE_NETCDF=ON",
# Prevent overriding by environment variable
# HDF5_ROOT.
"-DHDF5_ROOT=" + self.spec["hdf5"].prefix,
# Prevent possible overriding by environment variables
# NETCDF_ROOT, NETCDF_DIR, and NETCDF_PATH.
"-DNETCDF_PATH=" + self.spec["netcdf-c"].prefix,
]
)
else:
args.append("-DENABLE_NETCDF=OFF")
if self.spec.variants["jp2k"].value == "none":
args.append("-DENABLE_JPG=OFF")
else:
args.append("-DENABLE_JPG=ON")
if self.spec.variants["jp2k"].value == "openjpeg":
args.append("-DOPENJPEG_PATH=" + self.spec["openjpeg"].prefix)
if "+png" in self.spec:
args.extend(["-DENABLE_PNG=ON", "-DZLIB_ROOT=" + self.spec["zlib-api"].prefix])
else:
args.append("-DENABLE_PNG=OFF")
if "+aec" in self.spec:
args.extend(
[
"-DENABLE_AEC=ON",
# Prevent overriding by environment variables
# AEC_DIR and AEC_PATH.
"-DAEC_DIR=" + self.spec["libaec"].prefix,
]
)
else:
args.append("-DENABLE_AEC=OFF")
return args | [
334,
335
] |
def METHOD_NAME(pdict, tmp_path):
dtype, shape, dim, metadata = (
pdict["dtype"],
pdict["shape"],
pdict["dim"],
pdict["metadata"],
)
s = _create_signal(shape=shape, dim=dim, dtype=dtype, metadata=metadata)
filename = _get_filename(s, metadata)
s.save(tmp_path / filename)
s_just_saved = hs.load(tmp_path / filename)
s_ref = hs.load(TEST_DATA_PATH / filename)
try:
for stest in (s_just_saved, s_ref):
npt.assert_array_equal(s.data, stest.data)
assert s.data.dtype == stest.data.dtype
assert s.axes_manager.signal_shape == stest.axes_manager.signal_shape
assert (
s.axes_manager.navigation_shape == stest.axes_manager.navigation_shape
)
assert s.metadata.General.title == stest.metadata.General.title
mdpaths = ("Signal.signal_type",)
if s.metadata.Signal.signal_type == "EELS" and metadata:
mdpaths += (
"Acquisition_instrument.TEM.convergence_angle",
"Acquisition_instrument.TEM.beam_energy",
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
)
elif "EDS" in s.metadata.Signal.signal_type and metadata:
mdpaths += (
"Acquisition_instrument.TEM.Stage.tilt_alpha",
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
"Acquisition_instrument.TEM.Detector." "EDS.energy_resolution_MnKa",
"Acquisition_instrument.TEM.Detector.EDS.live_time",
)
if metadata:
mdpaths = (
"General.date",
"General.time",
"General.title",
)
for mdpath in mdpaths:
assert s.metadata.get_item(mdpath) == stest.metadata.get_item(mdpath)
for saxis, taxis in zip(s.axes_manager._axes, stest.axes_manager._axes):
taxis.convert_to_units()
assert saxis.scale == taxis.scale
assert saxis.offset == taxis.offset
assert saxis.units == taxis.units
assert saxis.name == taxis.name
except Exception:
raise
finally:
# As of v0.8.5 the data in the ripple files are loaded as memmaps
# instead of array. In Windows the garbage collector doesn't close
# the file before attempting to delete it making the test fail.
# The following lines simply make sure that the memmap is closed.
# del s_just_saved.data
# del s_ref.data
del s_just_saved
del s_ref
gc.collect() | [
9,
365
] |
def METHOD_NAME(
self,
url,
verb,
retry_id,
query_parts=None,
x_amz_headers=None,
headers=None,
payload=None,
unsigned_payload=False,
ignore_content_encoding=False,
):
# when called under _initiate_multipart_upload and _upload_chunk, add content-encoding to header
if verb is not None and verb in ("POST", "PUT") and headers is not None:
headers["Content-Encoding"] = "gzip"
return orig_send_req(
self,
url,
verb,
retry_id,
query_parts,
x_amz_headers,
headers,
payload,
unsigned_payload,
ignore_content_encoding,
) | [
248,
353,
377
] |
async def METHOD_NAME(interaction: discord.Interaction, current: str) -> list[app_commands.Choice[str]]:
assert interaction.guild is not None
rules = await interaction.guild.fetch_automod_rules()
if current:
choices = []
for rule in rules:
if rule.trigger.type is AutoModRuleTriggerType.keyword and current in rule.name:
choices.append(app_commands.Choice(name=rule.name, value=str(rule.id)))
return choices
else:
return [app_commands.Choice(name=rule.name, value=str(rule.id)) for rule in rules
if rule.trigger.type is AutoModRuleTriggerType.keyword] | [
1634,
8854
] |
def METHOD_NAME(followed_user_id, follower_user_id):
return DBSession. \
query(FollowedUser). \
filter(FollowedUser.followed_user_id == followed_user_id). \
filter(FollowedUser.follower_user_id == follower_user_id). \
first() | [
19,
17614,
2043
] |
def METHOD_NAME(fh):
"""
Read the file handler containing a dep makefile (simple makefile only
containing dependencies) and returns an iterator of the corresponding Rules
it contains. Ignores removal guard rules.
"""
rule = ""
for line in fh.readlines():
line = six.ensure_text(line)
assert not line.startswith("\t")
line = line.strip()
if line.endswith("\\"):
rule += line[:-1]
else:
rule += line
split_rule = _depfilesplitter.split(rule, 1)
if len(split_rule) > 1 and split_rule[1].strip():
yield Rule(split_rule[0].strip().split()).add_dependencies(
split_rule[1].strip().split()
)
rule = ""
if rule:
raise Exception("Makefile finishes with a backslash. Expected more input.") | [
203,
161,
5484
] |
def METHOD_NAME(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except RuntimeError as e:
if support.verbose:
print(func.__name__, 'failed:', e)
else:
if support.verbose:
print(func.__name__, 'returned')
return ret
return wrapper | [
4463,
717
] |
def METHOD_NAME(builder, subscriberAuthrole):
return SubscriberReceivedAddSubscriberAuthrole(builder, subscriberAuthrole) | [
238,
2042,
12896
] |
def METHOD_NAME(ctx: ModuleContext, value: Type, module: ModuleLike):
driver = value.trace()
if driver is None:
raise UnconnectedPortException(value)
_visit_driver(ctx, value, driver, module) | [
716,
362
] |
def METHOD_NAME():
return ImageContainer(Image(720, 480)) | [
129,
660
] |
def METHOD_NAME(self):
body = {
"form": f"http://testserver.com{self.form_url}",
"formUrl": "http://testserver.com/my-form",
}
response = self.client.post(self.endpoint, body, HTTP_HOST="testserver.com")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertFalse(Submission.objects.exists()) | [
9,
447,
1978,
137,
130,
2474
] |
def METHOD_NAME(self):
with mock.patch("warnings.warn") as warn:
self._call_units(units_str="/s")
self.assertEqual(warn.call_count, 0)
self.assertEqual(self.cube.units, "s^-1")
self.assertArrayAlmostEqual(
self.cube.data, np.ones_like(self.cube.data)
)
self.assertEqual(self.cube.data.dtype, np.float32) | [
9,
2735,
3146
] |
def METHOD_NAME(certificate_order_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAppServiceCertificateOrderCertificateResult]:
"""
Get the certificate associated with a certificate order.
:param str certificate_order_name: Name of the certificate order.
:param str name: Name of the certificate.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... | [
19,
991,
549,
1548,
852,
1548,
146
] |
def METHOD_NAME(self, node: nodes.FunctionDef) -> None:
if self.graph is not None:
# closure
pathnode = self._append_node(node)
self.tail = pathnode
self.dispatch_list(node.body)
bottom = f"{self._bottom_counter}"
self._bottom_counter += 1
self.graph.connect(self.tail, bottom)
self.graph.connect(node, bottom)
self.tail = bottom
else:
self.graph = PathGraph(node)
self.tail = node
self.dispatch_list(node.body)
self.graphs[f"{self.classname}{node.name}"] = self.graph
self.reset() | [
716,
559,
2483
] |
f METHOD_NAME(self, name): | [
19,
401
] |
f METHOD_NAME(self): | [
9,
631,
1148
] |
f METHOD_NAME(self, event): | [
69,
1472
] |
def METHOD_NAME():
# proj necessary since we need to rename indexed value into a proper attribute name
assert Team.proj(car_length="car.length").fetch(
as_dict=True, order_by="car_length"
) == [
{"name": "marketing", "car_length": None},
{"name": "business", "car_length": "100"},
{"name": "engineering", "car_length": "20.5"},
]
assert Team.proj(car_length="car.length:decimal(4, 1)").fetch(
as_dict=True, order_by="car_length"
) == [
{"name": "marketing", "car_length": None},
{"name": "engineering", "car_length": 20.5},
{"name": "business", "car_length": 100.0},
]
assert Team.proj(
car_width="JSON_VALUE(`car`, '$.length' RETURNING float) - 15"
).fetch(as_dict=True, order_by="car_width") == [
{"name": "marketing", "car_width": None},
{"name": "engineering", "car_width": 5.5},
{"name": "business", "car_width": 85.0},
]
assert (
(Team & {"name": "engineering"}).proj(car_tire_pressure="car.tire_pressure")
).fetch1("car_tire_pressure") == "[32, 31, 33, 34]"
assert np.array_equal(
Team.proj(car_inspected="car.inspected").fetch(
"car_inspected", order_by="name"
),
np.array([None, "true", None]),
)
assert np.array_equal(
Team.proj(car_inspected="car.inspected:unsigned").fetch(
"car_inspected", order_by="name"
),
np.array([None, 1, None]),
) | [
9,
5786
] |
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument("-D", "--debug", help="Enable debug logging", action="store_true")
parser.add_argument("--version", action="version", help="show program version", version=version.__version__)
parser.add_argument("--site", help="backup site", required=False)
parser.add_argument("--key-id", help="key alias as used with encryption_key_id configuration directive", required=True)
parser.add_argument("--bits", help="length of the generated key in bits, default %(default)d", default=3072, type=int)
parser.add_argument("--config", help="configuration file to store the keys in", default=os.environ.get("PGHOARD_CONFIG"))
args = parser.parse_args()
logutil.configure_logging(level=logging.DEBUG if args.debug else logging.INFO)
rsa_private_key, rsa_public_key = create_keys(args.bits)
try:
if args.config:
return save_keys(args.config, args.site, args.key_id, rsa_private_key, rsa_public_key)
else:
return show_key_config(args.site, args.key_id, rsa_private_key, rsa_public_key)
except (CommandError, InvalidConfigurationError) as ex:
print("FATAL: {}".format(ex))
return 1 | [
57
] |
def METHOD_NAME(name, properties):
for m in [re.match("(.*)=(.*)", p) for p in properties]:
if m and m.group(1) == name:
return m.group(2) | [
19,
1042
] |
def METHOD_NAME(setup_rpc):
class Dropouts(nn.Module):
def forward(self, x):
for _ in range(100):
x = F.dropout(x, p=0.001)
return x
model = nn.Sequential(Dropouts(), Dropouts())
x = torch.rand(10, 10, requires_grad=True)
model = Pipe(model, chunks=10, checkpoint="always")
y = model(x)
y = y.local_value()
y.norm().backward()
assert y.to(torch.bool).tolist() == x.grad.to(torch.bool).tolist() | [
9,
1498,
-1
] |
def METHOD_NAME() -> None: # noqa: D
# Setup logging.
dev_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=dev_format)
credential_sets_json_str = os.environ.get("MF_TEST_ENGINE_CREDENTIAL_SETS")
if credential_sets_json_str is None:
raise ValueError(
f"Environment variable: MF_TEST_ENGINE_CREDENTIAL_SETS has not been set. Please see the comment in "
f"{__file__} for details on how to set it."
)
credential_sets = MetricFlowTestCredentialSetForAllEngines.parse_raw(credential_sets_json_str)
logger.info(
f"Running the following tests to generate snapshots:\n{pformat_big_objects(SNAPSHOT_GENERATING_TEST_FILES)}"
)
for test_configuration in credential_sets.as_configurations:
logger.info(
f"Running tests for {test_configuration.engine} with URL: {test_configuration.credential_set.engine_url}"
)
run_tests(test_configuration, SNAPSHOT_GENERATING_TEST_FILES) | [
22,
615
] |
def METHOD_NAME(self):
self.assertTrue(strop.atoi(" 1 ") == 1)
self.assertRaises(ValueError, strop.atoi, " 1x")
self.assertRaises(ValueError, strop.atoi, " x1 ") | [
9,
10699
] |
def METHOD_NAME(self):
self.run_permutations(lambda abc, a: abc.startswith(a), ("abc", "a"), True, True)
self.run_permutations(lambda abc, a: abc.startswith(a, 0), ("abc", "a"), True, True)
self.run_permutations(lambda abc, a: abc.startswith(a, 0, 1), ("abc", "a"), True, True) | [
9,
12776
] |
def METHOD_NAME(self, model_view, scoped_session, model_access):
"""Install bindings."""
with scoped_session as session:
for resource_name, bindings in model_view.items():
policy = model_access.get_iam_policy(session, resource_name)
if policy['bindings']:
raise Exception('policy should have been empty')
model_access.set_iam_policy(
session,
resource_name,
{'bindings': bindings, 'etag': policy['etag']},
update_members=True)
model_access.expand_special_members(session) | [
428,
5992
] |
def METHOD_NAME(self, variable_name: str, grids: list[pp.Grid]):
"""Perturbation of a variable from its reference value.
The parameter :code:`variable_name` should be the name of a variable so that
:code:`self.variable_name()` and `self.reference_variable_name()` are valid
calls. These methods will be provided by mixin classes; normally this will be a
subclass of :class:`VariableMixin`.
The returned operator will be of the form
:code:`self.variable_name(grids) - self.reference_variable_name(grids)`.
Parameters:
variable_name: Name of the variable.
grids: List of subdomain or interface grids on which the variable is defined.
Returns:
Operator for the perturbation.
"""
var = getattr(self, variable_name)
var_ref = getattr(self, "reference_" + variable_name)
d_var = var(grids) - var_ref(grids)
d_var.set_name(variable_name + "_perturbation")
return d_var | [
11931,
280,
272
] |
def METHOD_NAME(m1, m2, m3, P=5.0e9, T=2000.0):
composition = m1.formula
assemblage = burnman.Composite([m1, m2, m3])
assemblage.set_state(P, T)
equality_constraints = [
("phase_fraction", (m1, 0.0)),
("phase_fraction", (m2, 0.0)),
]
sol, prm = equilibrate(
composition, assemblage, equality_constraints, store_iterates=False
)
return sol.x[0:2] | [
11016
] |
async def METHOD_NAME(self, amodel: WordlistActionModel, data: List[Tuple[str, int]], args: WordlistSaveFormArgs):
output = asdict(args)
output['Items'] = data
output['wlattr'] = amodel.curr_wlform_args.wlattr
output['pattern'] = amodel.curr_wlform_args.wlpat
output['human_corpname'] = amodel.corp.human_readable_corpname
output['usesubcorp'] = amodel.args.usesubcorp
template = self._template_env.get_template('txt_wlist.jinja2')
self._data = await template.render_async(output) | [
77,
11763
] |
def METHOD_NAME():
"""
Regression test: project-rules.glob rule run from inside an indirect
conditional should report an error as it depends on the 'currently loaded
project' concept and indirect conditional rules get called only after all
the project modules have already finished loading.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\ | [
9,
1825,
623,
2500,
869
] |
def METHOD_NAME():
pass | [
1288
] |
def METHOD_NAME(**kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
return model | [
5267,
1170
] |
def METHOD_NAME(self):
"""
Returns a protobuf object that contains persistable data representing this object
:return: A protobuf TokenMetadata object
:rtype: qrl_pb2.TokenMetadata
"""
return self._data | [
16857
] |
def METHOD_NAME(self):
"""
Respect setting field templates to an empty string.
This should not be ignored but should make the corresponding task field
an empty string.
https://github.com/ralphbean/bugwarrior/issues/970
"""
raw_values = {'templates': {}, 'project_template': ''}
computed_values = schema.ServiceConfig().compute_templates(raw_values)
self.assertEqual(computed_values['templates'], {'project': ''}) | [
9,
35,
671
] |
def METHOD_NAME(domain_link, upstream_app_id, upstream_version):
url = reverse('linked_domain:app_by_version', args=[domain_link.master_domain,
upstream_app_id,
upstream_version])
response = _do_request_to_remote_hq_json(url, domain_link.remote_details, domain_link.linked_domain)
return response['app'] | [
19,
991,
604,
281
] |
def METHOD_NAME(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=MSG_URI_V1):
'''sending message to hipchat v1 server'''
params = {}
params['room_id'] = room
params['from'] = msg_from[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['api'] = api
params['notify'] = int(notify)
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
data = urlencode(params)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) | [
353,
169,
3392
] |
def METHOD_NAME(self, choice):
if hasattr(choice, "vote_count"):
return getattr(choice, "vote_count", -1)
else:
return choice.votes.all().count() | [
19,
29
] |
def METHOD_NAME(self, ref):
assert ref == units.MDANALYSIS_BASE_UNITS | [
9,
-1,
1305,
-1,
1178,
805,
1305
] |
def METHOD_NAME():
# Arrange
result1 = CheckResult(0, 'check1')
result1.conditions_results = [ConditionResult(ConditionCategory.PASS)]
result2 = CheckResult(0, 'check2')
result2.conditions_results = [ConditionResult(ConditionCategory.WARN)]
result3 = CheckResult(0, 'check3')
result3.conditions_results = [ConditionResult(ConditionCategory.FAIL)]
# Act & Assert
not_passed_checks = SuiteResult('test', [result1, result2]).get_not_passed_checks()
assert_that(not_passed_checks, has_length(1))
not_passed_checks = SuiteResult('test', [result1, result2]).get_not_passed_checks(fail_if_warning=False)
assert_that(not_passed_checks, has_length(0))
not_passed_checks = SuiteResult('test', [result1, result2, result3]).get_not_passed_checks()
assert_that(not_passed_checks, has_length(2)) | [
9,
482,
1571,
2676,
130,
7680
] |
def METHOD_NAME():
for T in [float, _AutoDiffXd, _sym.Expression]:
RollPitchYaw_[T].__repr__ = _roll_pitch_yaw_repr
RotationMatrix_[T].__repr__ = _rotation_matrix_repr
RigidTransform_[T].__repr__ = _rigid_transform_repr | [
238,
92,
3194
] |
def METHOD_NAME(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
label_mapping_path = osp.join(self.local_path, 'label_mapping.txt')
with open(label_mapping_path, 'r', encoding='utf-8') as f:
label_mapping = f.readlines()
score = torch.max(inputs['outputs'])
inputs = {
OutputKeys.SCORES: [score.item()],
OutputKeys.LABELS:
[label_mapping[inputs['outputs'].argmax()].split('\t')[1]]
}
return inputs | [
1710
] |
def METHOD_NAME(matrix):
# List to store the sizes of rivers
sizes = []
# Create a visited matrix to keep track of visited nodes
visited = [[False for _ in range(len(matrix[0]))] for _ in range(len(matrix))]
# Iterate over each cell in the matrix
for i in range(len(matrix)):
for j in range(len(matrix[i])):
# If the cell has already been visited, continue to the next iteration
if visited[i][j]:
continue
# Explore the river connected to the current cell and update sizes
sizes = traverseNode(i, j, matrix, visited, sizes)
return sizes | [
15096,
3120
] |
METHOD_NAME(self): | [
69,
10412
] |
def METHOD_NAME(self, load_site_user: bool = True):
self.site_user = load_config(self.options.config, load_site_user) | [
557,
200
] |
def METHOD_NAME(con, index):
table = con.table("array_table")
expr = table[table.array_int[index].name("indexed")]
result = expr.execute()
df = table.compile().toPandas()
expected = pd.DataFrame(
{
"indexed": df.array_int.apply(
lambda x: x[index] if -len(x) <= index < len(x) else np.nan
)
}
)
tm.assert_frame_equal(result, expected) | [
9,
877,
724
] |
def METHOD_NAME(self):
import warnings
warnings.warn(
"error.error from azure exceptions is deprecated, just simply use 'error' once",
DeprecationWarning,
)
return self | [
168
] |
def METHOD_NAME(
self, query: str, language: str
) -> list[tuple[float, Document]]:
return _search(
query,
language,
self._vectors[language],
self._idf[language],
self._documents[language],
self._stemmer,
) | [
1070
] |
def METHOD_NAME(admin_client, delete_link, css):
admin_client.post(delete_link)
with pytest.raises(Css.DoesNotExist):
css.refresh_from_db() | [
9,
344,
392,
472,
1108,
14347,
41
] |
def METHOD_NAME(self):
with patch('requests.sessions.Session.post', unittest.mock.Mock(side_effect = lambda url, data, timeout, files=None: mock_netchop_netmhcstabpan(
data,
files,
self.test_data_directory,
'net_chop.fail.html'
))), self.assertRaises(Exception) as context:
output_file = tempfile.NamedTemporaryFile()
NetChop(
os.path.join(self.test_data_directory, 'Test_filtered.tsv'),
self.test_fasta,
output_file.name,
).execute()
self.assertTrue('NetChop encountered an error during processing.' in str(context.exception)) | [
9,
819,
10080,
180
] |
def METHOD_NAME(item):
"""Return True if one of parents of item is a string"""
for parent in item.parents(): # [consider-using-any-or-all]
if isinstance(parent, str):
return True
return False | [
137,
280,
144
] |
def METHOD_NAME(self):
"""Return a collection of movie credits that this :class:`Person` was a
cast or crew member on
"""
if self._movie_credits is None:
data = yield self.ext_movie_credits
self._movie_credits = MovieCredits(**data)
yield self._movie_credits | [
1786,
7559
] |
def METHOD_NAME(self):
m = pyo.ConcreteModel()
m.properties = NaturalGasParameterBlock()
m.state = m.properties.build_state_block()
self.assertEqual(m.state.compress_fact.value, 0.80) | [
9,
2800,
2985
] |
def METHOD_NAME(self):
self.assertEqual(BlockDev.get_plugin_soname(BlockDev.Plugin.LOOP), "libbd_loop.so.3") | [
9,
2793,
281
] |
def METHOD_NAME(parent_context, error_name, lazy_context, message):
if isinstance(lazy_context, LazyTreeContext):
node = lazy_context.data
if node.parent.type == 'argument':
node = node.parent
analysis.add(parent_context, error_name, node, message) | [
238,
1545,
946
] |
def METHOD_NAME(self, archive, obj, symbol):
sections = self.get_sections(archive, obj)
return [s for s in sections if s.endswith(symbol)] | [
590,
1608
] |
def METHOD_NAME(space, index, value, shared_memory):
size = int(np.prod(space.shape))
destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)
np.copyto(
destination[index * size : (index + 1) * size],
np.asarray(value, dtype=space.dtype).flatten(),
) | [
77,
414,
24,
1644,
1645
] |
def METHOD_NAME(self, offset, size):
self.f.seek(offset)
return self.f.read(size) | [
772,
12192
] |
f METHOD_NAME(): | [
1239
] |
async def METHOD_NAME(
self,
location: str,
**kwargs: Any
) -> "_models.Quota":
"""Return quota for subscription by region.
:param location: Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Quota, or the result of cls(response)
:rtype: ~avs_client.models.Quota
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Quota"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-05-01"
accept = "application/json"
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Quota', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | [
250,
2268,
6477
] |
def METHOD_NAME(self, content):
try:
json.loads(content)
raise AssertionError("Expected %s... to be non-JSON" % content[:10])
except ValueError:
pass | [
638,
130,
763
] |
def METHOD_NAME(self, parser):
parser.add_argument("directory", type=str)
parser.add_argument("run_all", nargs="?", type=str)
parser.add_argument("run_now", nargs="?", type=str) | [
238,
134
] |
def METHOD_NAME(self) -> None:
api = APIRegistry.api_for(
node_uid=self.node_uid,
user_verify_key=self.syft_client_verify_key,
)
call = SyftAPICall(
node_uid=self.node_uid,
path="queue",
args=[],
kwargs={"uid": self.id},
blocking=True,
)
result = api.make_call(call)
if isinstance(result, QueueItem) and result.resolved:
self.resolved = True
self.result = result.result
self.status = result.status | [
1047
] |
def METHOD_NAME(self):
name = super().METHOD_NAME()
if 'mutators' in self.opt and self.opt['mutators']:
return name + '__' + self.opt['mutators']
return name | [
19,
147
] |
def METHOD_NAME(self, xp, c_dtype, f_dtype):
with pytest.warns(numpy.ComplexWarning):
out = xp.ones((8,), dtype=c_dtype).sum(dtype=f_dtype)
return out | [
9,
6251
] |
def METHOD_NAME():
q0, q1, q2 = cirq.LineQubit.range(3)
ps = cirq.KET_PLUS(q0) * cirq.KET_PLUS(q1) * cirq.KET_ZERO(q2)
assert ps.qubits == [q0, q1, q2]
assert ps[q0] == cirq.KET_PLUS | [
9,
1188,
3603
] |
def METHOD_NAME(m, base="TDS"):
"""
Builds RO model based on the IDAES separator.
Requires prop_TDS property package.
"""
prop = property_models.get_prop(m, base=base)
m.fs.RO = Separator(
property_package=prop,
outlet_list=["retentate", "permeate"],
split_basis=SplittingType.componentFlow,
energy_split_basis=EnergySplittingType.equal_temperature,
)
# specify
if base == "TDS":
m.fs.RO.split_fraction[0, "permeate", "H2O"].fix(0.5)
m.fs.RO.split_fraction[0, "permeate", "TDS"].fix(0.01)
else:
raise ValueError(
"Unexpected property base {base} provided to build_SepRO"
"".format(base=base)
)
# scale
set_scaling_factor(
m.fs.RO.split_fraction, 1
) # TODO: IDAES should set these scaling factors by default
constraint_scaling_transform(m.fs.RO.sum_split_frac[0.0, "H2O"], 1)
constraint_scaling_transform(m.fs.RO.sum_split_frac[0.0, "TDS"], 1) | [
56,
2479,
4072
] |
def METHOD_NAME(cls) -> AnyUrl:
return AnyUrl(scheme="https", url="https://docs.airbyte.com/integrations/sources/test") # type: ignore | [
1200,
274
] |
def METHOD_NAME(self):
mem_value = self.replayhelper.read_phys_mem(0x5000, 0x2)
self.assertEqual(mem_value, b'\xd1\x99') | [
9,
203,
-1,
1279,
66
] |
def METHOD_NAME(self, model: type[Model]) -> bool: ... | [
137,
3024
] |
def METHOD_NAME(context, step):
proc_state = run_docker_proc(context, step)
return copy_docker_ksql_cli_output(context, step, proc_state) | [
223,
5043,
615,
240
] |
def METHOD_NAME():
for wrapped in _all_wrapped:
wrapped.cache_clear() | [
537,
75,
587,
588
] |
def METHOD_NAME(button):
do_import(button, app.library) | [
2859,
905
] |
f METHOD_NAME(self): | [
9,
4593,
298,
137,
4398
] |
def METHOD_NAME(self):
"""Get default vertical code for variable name"""
for pattern, code in self.DEFAULT_VERT_CODE_PATTERNS.items():
if fnmatch.fnmatch(self.var_name, pattern):
return code
raise ValueError(f"No default vertical code could be found for {self.var_name}") | [
19,
235,
4873,
544
] |
def METHOD_NAME(self, cellPointIdsList):
'''convert a list of numpy arrays defining each cell into a flat array defining cells.
This function is the inverse of vmtk.meshtonumpy._ConvertFlatCellsArrayToList(cells, cellLocations)
arguments:
- cellPointIdsList: list of numpy arrays (in same order defined in cells), where each array contains
the cellPointIds for that specific cell. Note: array lengths are not constant,
each cell can have a different number of constituent points.
returns:
- cells: 1D array of format [npointsCell1, cell1PointId_1, .., cell1PointId_npointsCell1,
npointsCell2, cell2PointId_1, .., cell2PointId_npointsCell2,
...
npointsCell(nCells), cell(nCells)PointId_1, .. cell(nCells)PointId_npointsCell(nCells)]
- cellLocations: flat array of size = nCells. each element in the array defines starts a new cell
(a location of npointCellFoo) in the cells array
'''
cellArrayList = []
cellLocationsList = [np.array([0])]
cellIndex = 0
for cellPointIdArray in cellPointIdsList:
numPointsInArray = cellPointIdArray.size
cellArray = np.concatenate((np.array([numPointsInArray]), cellPointIdArray))
cellArrayList.append(cellArray)
cellIndex += cellArray.size
cellLocationsList.append(np.array([cellIndex]))
cellLocations = np.concatenate(cellLocationsList[:-1])
cells = np.concatenate(cellArrayList)
return cells, cellLocations | [
197,
245,
24,
2301,
383,
877
] |
def METHOD_NAME(suite, suite_test, xunit_filename, errtype):
num_errors = 3
for _ in range(num_errors):
suite_test.append_line('slash.add_{}("some message")'.format(errtype))
if errtype == 'error':
suite_test.expect_error()
else:
suite_test.expect_failure()
suite.run()
testcase_xml = _get_testcase_xml(suite_test, xunit_filename)
errors = testcase_xml.findall(errtype)
assert len(errors) == num_errors
for error in errors:
assert error.attrib['message'] == 'some message'
assert error.attrib['type'] == errtype
assert errors | [
9,
12666,
2793,
238,
374,
168
] |
def METHOD_NAME(assert_pixels):
assert_pixels('''
BBBrrrrrr
BBBrrrrrr
BBBrrrrrr
BBBrrrrrr
BBBrrrrrr
''', '''<style>@page { size: 9px 5px; background: linear-gradient(
to right, blue 3px, blue 3px, red 3px, red 3px
)''') | [
9,
1783,
1784,
1327
] |
def METHOD_NAME(self):
source = CPUStream
target = CPUStream
self._test_wait_stream(source, target) | [
9,
618,
919,
2265,
2265
] |
def METHOD_NAME(self):
self.cpp_info.includedirs.append(os.path.join("include", "libsafec"))
self.cpp_info.libs = [f"safec-{self.version}"]
self.cpp_info.set_property("pkg_config_name", "libsafec")
bin_dir = os.path.join(self.package_folder, "bin")
self.output.info(f"Appending PATH environment variable: {bin_dir}")
self.env_info.PATH.append(bin_dir) | [
360,
100
] |
def METHOD_NAME(x, axis=None):
"""
Subtracts the mean of the input array over all but the last dimension
and over all MPI processes from each entry.
Args:
x: Input array
axis: Axis or axes along which the means are computed. The default (None) is to
compute the mean of the flattened array.
Returns:
The resulting array.
"""
# here we keep the dims, since automatic broadcasting of a scalar (shape () )
# to an array produces errors when used inside of a function which is transposed
# with jax.linear_transpose
x_mean = mean(x, axis=axis, keepdims=True)
return x - x_mean # automatic broadcasting of x_mean | [
2096,
314
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME():
"""
The test network fc1.onnx is used, which has two input variables and two output variables.
The network was trained such that the first output approximates the sum of the absolute
values of the inputs, while the second output approximates the sum of the squares of the inputs
for inputs in the range [-10.0, 10.0].
"""
filename = os.path.join(os.path.dirname(__file__), NETWORK_FILE)
network = Marabou.read_onnx(filename)
# Get the input and output variable numbers; [0] since first dimension is batch size
inputVars = network.inputVars[0][0]
# Set input bounds
network.setLowerBound(inputVars[0],-10.0)
network.setUpperBound(inputVars[0], 10.0)
network.setLowerBound(inputVars[1],-10.0)
network.setUpperBound(inputVars[1], 10.0)
return network | [
557,
1228
] |
def METHOD_NAME(self):
self._test_reproducibility(
"test_reproducibility_fp16",
[
"--fp16",
"--fp16-init-scale",
"4096",
],
delta=0.011,
) | [
9,
8759,
23
] |
async def METHOD_NAME(self):
raw = await self._read_reg16s(REG_CURRENT)
amps = raw * AMPERE_FACTOR
self._logger.log(self._level, "INA260: current raw=%d amps=%+f", raw, amps)
return amps | [
19,
1056
] |
def METHOD_NAME(): # noqa
args = parse_arguments()
for path in args.path:
sys.path.insert(0, path)
try:
if args.pid_file:
setup_pidfile(args.pid_file)
except RuntimeError as e:
logger = setup_logging(args, stream=args.log_file or sys.stderr)
logger.critical(e)
return RET_PIDFILE
logger = setup_logging(args, stream=args.log_file or sys.stderr)
logger.info("Remoulade %r is booting up." % __version__)
if args.pid_file:
atexit.register(remove_pidfile, args.pid_file, logger)
return start_worker(args, logger) | [
57
] |
def METHOD_NAME(server_id, action_id, dry_run=0):
log_debug(3, server_id, action_id)
if dry_run:
raise ShadowAction("dry run requested - skipping")
kickstart_session_id = server_kickstart.get_kickstart_session_id(server_id,
action_id)
if kickstart_session_id is None:
raise InvalidAction("Could not find kickstart session ID")
row = server_kickstart.get_kickstart_session_info(kickstart_session_id, server_id)
deploy_configs = (row['deploy_configs'] == 'Y')
ks_package_profile = server_kickstart.get_kisckstart_session_package_profile(kickstart_session_id)
# if the session doesn't have a pkg profile, try from the ks profile itself
if not ks_package_profile:
ks_package_profile = server_kickstart.get_kickstart_profile_package_profile(kickstart_session_id)
if not ks_package_profile:
log_debug(4, "No kickstart package profile")
# No profile to bring this system to
if deploy_configs:
# We have to deploy configs, so pass in a server profile
server_profile = server_kickstart.get_server_package_profile(server_id)
else:
# No configs to be deployed
server_profile = None
server_kickstart.schedule_config_deploy(server_id,
action_id, kickstart_session_id, server_profile=server_profile)
raise ShadowAction("Package sync not scheduled, missing kickstart "
"package profile; proceeding with configfiles.deploy")
server_profile = server_kickstart.get_server_package_profile(server_id)
installs, removes = server_packages.package_delta(server_profile,
ks_package_profile)
if not (installs or removes):
log_debug(4, "No packages to be installed/removed")
if not deploy_configs:
server_profile = None
server_kickstart.schedule_config_deploy(server_id,
action_id, kickstart_session_id, server_profile=None)
raise ShadowAction("Package sync not scheduled, nothing to do")
log_debug(4, "Scheduling kickstart delta")
server_kickstart.schedule_kickstart_delta(server_id,
kickstart_session_id, installs, removes)
raise ShadowAction("Package sync scheduled") | [
507,
164
] |
def METHOD_NAME(self):
with self.assertRaises(TypeError):
with sqlite.connect(':memory:') as bck:
self.cx.backup(bck, 1) | [
9,
2069,
246,
335
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.