text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(v, z, **kwargs):
op = TensorJV(**kwargs)
return op(v, z) | [
6200
] |
f METHOD_NAME(self): | [
129,
2277,
1252
] |
def METHOD_NAME(self):
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(50, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
selector = NoSelect()
Xselect = selector.evaluate(X=X, y=y, savepath=os.getcwd())
self.assertEqual(Xselect.shape, (50, 10))
return | [
9,
-1
] |
def METHOD_NAME(self):
inputs = self.inputs
outputs = self.outputs
color_ramp_node_name = self._get_color_ramp_node_name()
evaluate = get_evaluator(node_group_name, color_ramp_node_name)
# no outputs, end early.
if not outputs['Color'].is_linked:
return
result = []
floats_in = inputs[0].sv_get(default=[[]], deepcopy=False)
desired_levels = [2]
result = recurse_f_level_control([floats_in], [evaluate, self.use_alpha], color_ramp_mapper, None, desired_levels)
self.outputs[0].sv_set(result) | [
356
] |
async def METHOD_NAME():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
process_output = [
" Test Description with leading and trailing spaces ",
"\n10\t3\tnotebook_without_spaces.ipynb",
"11\t4\tNotebook with spaces.ipynb",
"12\t5\tpath/notebook_without_spaces.ipynb",
"13\t6\tpath/Notebook with spaces.ipynb",
"14\t1\tpath/Notebook with λ.ipynb",
"0\t0\t",
"folder1/file with spaces and λ.py",
"folder2/file with spaces.py",
"-\t-\tbinary_file.png",
]
mock_execute.return_value = maybe_future(
(0, "\x00".join(process_output) + "\x00", "")
)
expected_response = {
"code": 0,
"commit_body": "Test Description with leading and trailing spaces",
"modified_file_note": "7 files changed, 60 insertions(+), 19 deletions(-)",
"modified_files_count": "7",
"number_of_insertions": "60",
"number_of_deletions": "19",
"modified_files": [
{
"modified_file_path": "notebook_without_spaces.ipynb",
"modified_file_name": "notebook_without_spaces.ipynb",
"insertion": "10",
"deletion": "3",
"is_binary": False,
},
{
"modified_file_path": "Notebook with spaces.ipynb",
"modified_file_name": "Notebook with spaces.ipynb",
"insertion": "11",
"deletion": "4",
"is_binary": False,
},
{
"modified_file_path": "path/notebook_without_spaces.ipynb",
"modified_file_name": "notebook_without_spaces.ipynb",
"insertion": "12",
"deletion": "5",
"is_binary": False,
},
{
"modified_file_path": "path/Notebook with spaces.ipynb",
"modified_file_name": "Notebook with spaces.ipynb",
"insertion": "13",
"deletion": "6",
"is_binary": False,
},
{
"modified_file_path": "path/Notebook with λ.ipynb",
"modified_file_name": "Notebook with λ.ipynb",
"insertion": "14",
"deletion": "1",
"is_binary": False,
},
{
"modified_file_path": "folder2/file with spaces.py",
"modified_file_name": "folder1/file with spaces and λ.py => folder2/file with spaces.py",
"previous_file_path": "folder1/file with spaces and λ.py",
"insertion": "0",
"deletion": "0",
"is_binary": False,
},
{
"modified_file_path": "binary_file.png",
"modified_file_name": "binary_file.png",
"insertion": "0",
"deletion": "0",
"is_binary": True,
},
],
}
# When
actual_response = await Git().detailed_log(
selected_hash="f29660a2472e24164906af8653babeb48e4bf2ab",
path=str(Path("/bin/test_curr_path")),
)
# Then
mock_execute.assert_called_once_with(
[
"git",
"log",
"--cc",
"-m",
"-1",
"--oneline",
"--numstat",
"--pretty=format:%b%x00",
"-z",
"f29660a2472e24164906af8653babeb48e4bf2ab",
],
cwd=str(Path("/bin") / "test_curr_path"),
timeout=20,
env=None,
username=None,
password=None,
is_binary=False,
)
assert expected_response == actual_response | [
9,
4814,
390
] |
def METHOD_NAME(gds: GraphDataScience) -> LPTrainingPipeline:
pipe, _ = gds.beta.pipeline.linkPrediction.create(PIPE_NAME)
return pipe | [
4466,
890
] |
def METHOD_NAME(command, **kwargs):
if "is-enabled" in command:
return "enabled", 0
elif "is-active" in command:
return "active", 0
else:
raise Exception("unknown command") | [
22,
462,
142,
4214
] |
def METHOD_NAME() -> None:
first, builder = round_trip_for(AwsSagemakerModel)
assert len(builder.resources_of(AwsSagemakerModel)) == 1 | [
9,
379
] |
def METHOD_NAME(self):
form = FormFactory.create()
FormStepFactory.create(
form=form,
form_definition__configuration={
"components": [
{
"key": "content",
"html": "<p>The raw time now is: {{ now.isoformat }}</p>",
"type": "content",
"label": "Content",
},
]
},
)
FormVariableFactory.create(form=form, source=FormVariableSources.user_defined)
form_path = reverse("api:form-detail", kwargs={"uuid_or_slug": form.uuid})
start_endpoint = reverse("api:submission-list")
body = {
"form": f"http://testserver.com{form_path}",
"formUrl": "http://testserver.com/my-form",
}
# start a submission
response = self.client.post(start_endpoint, body)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
submission = Submission.objects.get()
submission_variables = submission.submissionvaluevariable_set.all()
# Only the user defined variable is initiated after creating the submission
self.assertEqual(1, submission_variables.count())
submission_step_endpoint = reverse(
"api:submission-steps-detail",
kwargs={
"submission_uuid": submission.uuid,
"step_uuid": form.formstep_set.get().uuid,
},
)
with freeze_time("2021-07-29T14:00:00Z"):
response = self.client.get(submission_step_endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
content_field = response_data["formStep"]["configuration"]["components"][0][
"html"
]
data = response_data["data"]
# The static variable is used in the dynamic configuration ...
self.assertIn("The raw time now is: 2021-07-29T14:00:00+00:00", content_field)
# ... but it is not sent back in the data
self.assertEqual({}, data) | [
9,
447,
1978
] |
def METHOD_NAME(self):
dataset = MAG240MDataset(self.data_dir)
edge_path = f'{dataset.dir}/paper_to_paper_symmetric_pgl'
t = time.perf_counter()
if not osp.exists(edge_path):
log.info('Converting adjacency matrix...')
edge_index = dataset.edge_index('paper', 'cites', 'paper')
edge_index = edge_index.T
edges_new = np.zeros((edge_index.shape[0], 2))
edges_new[:, 0] = edge_index[:, 1]
edges_new[:, 1] = edge_index[:, 0]
edge_index = np.vstack((edge_index, edges_new))
edge_index = np.unique(edge_index, axis=0)
graph = Graph(edge_index, sorted=True)
graph.adj_dst_index
graph.dump(edge_path)
log.info(f'Done! [{time.perf_counter() - t:.2f}s]')
np.random.seed(self.seed)
self.train_idx = dataset.get_idx_split('train')
np.random.shuffle(self.train_idx)
self.val_idx = dataset.get_idx_split('valid')
self.test_idx = dataset.get_idx_split('test')
self.x = dataset.paper_feat
self.y = dataset.all_paper_label
self.graph = Graph.load(edge_path, mmap_mode='r+')
log.info(f'Done! [{time.perf_counter() - t:.2f}s]') | [
123,
365
] |
def METHOD_NAME(framework, config):
""" Defines default setup_cmd and _setup_cmd, interpolate commands if necessary.
The default values are `None`.
In case a setup_cmd is defined, the original definition is saved to `_setup_cmd`.
The new `setup_cmd` will be a list of commands, where each command has the
directories, package manager and python binary interpolated.
`_setup_cmd` will be used for setup inside containers.
`setup_cmd` will be used when running locally (or on an Amazon image).
"""
if "setup_cmd" not in framework:
framework._setup_cmd = None
framework.setup_cmd = None
else:
framework._setup_cmd = framework.setup_cmd
if isinstance(framework.setup_cmd, str):
framework.setup_cmd = [framework.setup_cmd]
framework.setup_cmd = [
cmd.format(pip="{pip}", py="{py}", **config.common_dirs)
for cmd in framework.setup_cmd
] | [
238,
235,
102,
1660
] |
def METHOD_NAME(bitstring):
"""Convert bitstring readouts (memory) to hexadecimal readouts."""
return hex(int(bitstring, 2)) | [
762,
24,
696
] |
def METHOD_NAME(self, obj):
"""
Get latest version.
"""
rv = obj.versions.filter(is_highest=True).first()
href = reverse(
"v2-collection-versions-detail",
kwargs={
"path": self.context["path"],
"namespace": obj.namespace,
"name": obj.name,
"version": rv.version,
},
)
return {"href": href, "version": rv.version} | [
19,
893,
281
] |
def METHOD_NAME(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) | [
2471,
437
] |
def METHOD_NAME(self):
import sys
existing_file = sys.executable
result = examinee.existing_file(existing_file)
self.assertEqual(existing_file, result)
with capture_out() as (stdout, stderr):
with self.assertRaises(Failure):
examinee.existing_file('no such file, I hope')
self.assertIn('not an existing file', stderr.getvalue().strip())
self.assertTrue(len(stdout.getvalue()) == 0)
# should also work with pathlib.Path
existing_file = pathlib.Path(existing_file)
self.assertEqual(examinee.existing_file(existing_file), existing_file) | [
9,
1153,
171
] |
def METHOD_NAME(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""") | [
9,
1591,
1553,
1600,
534,
654,
173
] |
def METHOD_NAME(self):
"""Test that img-src contains data/blob."""
assert 'blob:' in base_settings.CSP_IMG_SRC
assert 'data:' in base_settings.CSP_IMG_SRC | [
9,
260,
61,
365,
623,
2029,
1339
] |
def METHOD_NAME(text: str, cache: bool = True) -> str:
"""Highlights some TIM code."""
@lru_cache(1048)
def _highlight(txt: str) -> str:
output = ""
cursor = 0
active_tokens: list[Token] = []
def _get_active_markup() -> str:
active_markup = " ".join(tkn.markup for tkn in active_tokens)
if active_markup == "":
return ""
return f"[{active_markup}]"
for matchobj in RE_MARKUP.finditer(txt):
start, end = matchobj.span()
if cursor < start:
if cursor > 0:
output += "]"
output += _get_active_markup()
output += f"{txt[cursor:start]}[/]"
*_, tags = matchobj.groups()
output += "["
for tag in tags.split():
token = consume_tag(tag)
output += f"{token.prettified_markup} "
if Token.is_clear(token):
active_tokens = [
tkn for tkn in active_tokens if not token.targets(tkn)
]
else:
active_tokens.append(token)
output = output.rstrip()
cursor = end
if cursor < len(txt) - 1:
if cursor > 0:
output += "]"
output += _get_active_markup()
output += f"{txt[cursor:]}"
if len(active_tokens) > 0:
output += "[/]"
if output.count("[") != output.count("]"):
output += "]"
return output
if cache:
return _highlight(text)
return _highlight.__wrapped__(text) | [
8186,
14889
] |
def METHOD_NAME(self):
"""
name of the symbol
"""
return self._name | [
156
] |
def METHOD_NAME(self, widget):
try:
val = int(widget.get_text())
except ValueError:
val = -1
if val < 0:
val = 0
widget.set_text(str(val))
elif val >= len(self.img.sprites):
val = len(self.img.sprites) - 1
widget.set_text(str(val))
self._switch_entry() | [
69,
475,
147,
1180
] |
def METHOD_NAME(self) -> "RemoveBlock":
return RemoveBlock() | [
670
] |
def METHOD_NAME(radarr_id, message):
providers = get_notifier_providers()
if not len(providers):
return
movie = get_movie(radarr_id)
if not movie:
return
movie_title = movie['title']
movie_year = movie['year']
if movie_year not in [None, '', '0']:
movie_year = ' ({})'.format(movie_year)
else:
movie_year = ''
asset = apprise.AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset)
for provider in providers:
if provider['url'] is not None:
apobj.add(provider['url'])
apobj.notify(
title='Bazarr notification',
body="{}{} : {}".format(movie_title, movie_year, message),
) | [
353,
609,
1786
] |
def METHOD_NAME(options):
"""
Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used).
"""
thread = _CheckCgroupsThread(options)
thread.start()
thread.join()
if thread.error:
raise thread.error | [
250,
11354,
6477,
623,
600
] |
def METHOD_NAME(self, tensor: deeplake.Tensor):
meta = tensor.meta
return {
"name": tensor.key,
"num_samples": len(tensor),
"htype": tensor.htype,
"dtype": str(tensor.dtype) if tensor.dtype else None,
"compression_type": "sample_compression"
if meta.sample_compression
else ("chunk_compression" if meta.chunk_compression else None),
"compression_format": meta.sample_compression or meta.chunk_compression,
"info": dict(tensor.info),
} | [
768,
1094
] |
def METHOD_NAME(self):
return self.realm | [
19,
2440
] |
def METHOD_NAME(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MinkowskiDistance
>>> metric = MinkowskiDistance(p=3)
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MinkowskiDistance
>>> metric = MinkowskiDistance(p=3)
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax) | [
1288
] |
def METHOD_NAME(self):
result = LookupMethods().property_in("abcde", "bcd")
self.assertTrue(result) | [
9,
623
] |
def METHOD_NAME(center_x, center_y, r, x_grid, y_grid):
"""
:param center_x: x-coordinate of center position of circular mask
:param center_y: y-coordinate of center position of circular mask
:param r: radius of mask in pixel values
:param x_grid: x-coordinate grid
:param y_grid: y-coordinate grid
:return: mask array of shape x_grid with =0 inside the radius and =1 outside
:rtype: array of size of input grid with integers 0 or 1
"""
x_shift = x_grid - center_x
y_shift = y_grid - center_y
R = np.sqrt(x_shift * x_shift + y_shift * y_shift)
mask = np.empty_like(R, dtype="int")
mask[R > r] = 1
mask[R <= r] = 0
return mask | [
361,
1262,
1085
] |
def METHOD_NAME(self):
resources_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources", "modules-and-vars")
runner = Runner(db_connector=self.db_connector())
report = runner.run(root_folder=resources_path)
self.assertLessEqual(2, len(report.failed_checks))
self.assertLessEqual(12, len(report.passed_checks))
self.assertEqual(0, len(report.skipped_checks))
found_versioning_failure = False
for record in report.failed_checks:
if record.check_id != 'CKV_AWS_40':
self.assertIsNotNone(record.breadcrumbs)
if record.check_id == 'CKV_AWS_21':
found_versioning_failure = True
bc = record.breadcrumbs.get('versioning.enabled')
self.assertEqual(len(bc), 2)
bc = bc[0]
self.assertEqual(bc.get('type'), 'module')
self.assertEqual(os.path.relpath(bc.get('path'), resources_path), 'examples/complete/main.tf')
self.assertEqual(record.resource, 'module.s3_bucket.aws_s3_bucket.default')
self.assertTrue(found_versioning_failure) | [
9,
298,
61,
2045
] |
def METHOD_NAME(self, property_id=None):
if property_id is not None:
analysis_property_views = self.analysispropertyview_set.filter(property=property_id)
else:
analysis_property_views = self.analysispropertyview_set
return {
'number_of_analysis_property_views': self.analysispropertyview_set.count(),
'views': list(analysis_property_views.values_list('id', flat=True).distinct()),
'cycles': list(analysis_property_views.values_list('cycle', flat=True).distinct())
} | [
19,
1042,
1179,
100
] |
def METHOD_NAME(*args, **kwargs):
"""Return extension."""
return SmartSymbolsExtension(*args, **kwargs) | [
93,
2916
] |
f METHOD_NAME(self): | [
9,
35
] |
def METHOD_NAME(parser):
"""
Adds common command line arguments to a parser.
"""
# Logging levels
logging_group = parser.add_mutually_exclusive_group()
logging_group.add_argument(
'--log-level',
action=SetLogLevel,
choices=['FATAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
help="Set logging level of current script. Only one of 'log-level', 'verbose',"
" 'quiet' can be set at a time.")
logging_group.add_argument(
'--quiet',
'-q',
action=SetLogLevel,
nargs=0,
help="Display less outputs to console. Only one of 'log-level', 'verbose',"
" 'quiet' can be set at a time.")
logging_group.add_argument(
'--verbose',
'-v',
action=SetLogLevel,
nargs=0,
help="Increase logging verbosity to include DEBUG messages. Only one of "
"'log-level', 'verbose', 'quiet' can be set at a time.") | [
238,
67,
434
] |
def METHOD_NAME(self):
sleep( 5 )
amountRecharge = 2 * 10 ** 18
self.blockchain.recharge_user_wallet(self.config.mainnet_key, self.config.schain_name, amountRecharge)
sleep( 5 )
# deploy token
self.erc721 = self.blockchain.deploy_erc721_on_mainnet(self.config.mainnet_key, 'elv721', 'ELV')
# mint
address = self.blockchain.key_to_address(self.config.mainnet_key)
mint_txn = self.erc721.functions.mint(address, self.token_id)\
.buildTransaction({
'gas': 8000000,
'nonce': self.blockchain.get_transactions_count_on_mainnet(address)})
signed_txn = self.blockchain.web3_mainnet.eth.account\
.signTransaction(mint_txn, private_key=self.config.mainnet_key)
self.blockchain.web3_mainnet.eth.sendRawTransaction(signed_txn.rawTransaction)
self.blockchain.disableWhitelistERC721(self.config.mainnet_key, self.config.schain_name)
self.blockchain.enableAutomaticDeployERC721(self.config.schain_key, "Mainnet")
# send to schain
self.agent.transfer_erc721_from_mainnet_to_schain(self.erc721,
self.config.mainnet_key,
self.config.schain_key,
self.token_id,
self.timeout)
sleep( 5 )
#
amount_eth = 90 * 10 ** 15
#
self.agent.transfer_eth_from_mainnet_to_schain(self.config.mainnet_key,
self.config.schain_key,
amount_eth,
self.timeout)
#
sleep( 5 )
self.erc721_clone = self.blockchain.get_erc721_on_schain("Mainnet", self.erc721.address) | [
123
] |
def METHOD_NAME(
rules: Sequence[_RuleType], out_docs_dir: str, template_dir: str
) -> None:
# TODO: Add doc generation in a follow-up PR.
pass | [
370,
7643,
672
] |
def METHOD_NAME(self):
# One job with different execution_id
self.mock_api.data_job_execution_list.return_value = [self.data_job_1]
result = self.checker.is_job_execution_running(
"test_job", "test_team", "test_id"
)
assert result is True | [
9,
58,
128,
955,
147
] |
def METHOD_NAME(self):
"""Testing MDF robot."""
robot = abilab.MdfRobot.from_dir(os.path.join(abidata.dirpath, "refs", "si_bse_kpoints"))
assert len(robot) == 3
robot = abilab.MdfRobot()
robot.scan_dir(os.path.join(abidata.dirpath, "refs", "si_bse_kpoints"))
assert len(robot) == 3
repr(robot); str(robot)
df = robot.get_dataframe(with_geo=True)
assert df is not None
df_params = robot.get_params_dataframe()
assert "nsppol" in df_params
plotter = robot.get_multimdf_plotter()
if self.has_matplotlib():
assert plotter.plot(show=False)
assert robot.plot(show=False)
#robot.plot_conv_mdf(self, hue, mdf_type="exc_mdf", **kwargs):
if self.has_nbformat():
robot.write_notebook(nbpath=self.get_tmpname(text=True))
robot.close() | [
9,
14297,
4705
] |
def METHOD_NAME(tmpdir_factory, request):
"""Copy example archive to temp directory into an extension-less file for test"""
archive_file_stub = os.path.join(datadir, "Foo")
extension, add_extension = request.param
tmpdir = tmpdir_factory.mktemp("compression")
tmp_archive_file = os.path.join(
str(tmpdir), "Foo" + (("." + extension) if add_extension else "")
)
shutil.copy(archive_file_stub + "." + extension, tmp_archive_file)
return (tmp_archive_file, extension) | [
1622,
171,
61,
2916
] |
def METHOD_NAME():
ports = [0, 1, 2, 3]
config = ExploitationOptionsConfiguration(http_ports=ports)
assert config.http_ports == tuple(ports) | [
9,
15260,
1881,
830,
135
] |
def METHOD_NAME(self) -> int: ... | [
2904
] |
def METHOD_NAME(self):
for xp in (numpy, cupy):
x = testing.shaped_random((2, 3, 4), xp, numpy.complex128)
assert xp.shares_memory(x, x.imag) is True | [
9,
2587,
44,
331
] |
def METHOD_NAME(self):
logger.info("Starting...")
with open(
settings.TEST_DATA_DIR / "flashtext/keyword_remover_test_cases.json"
) as f:
self.test_cases = json.load(f) | [
0,
1
] |
def METHOD_NAME(x, data_format, keepdims):
steps_axis = [1] if data_format == "channels_last" else [2]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res | [
2212,
285,
232,
9004
] |
def METHOD_NAME(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
assert library.get_info('BLOB')['handler'] == 'PickleStore' | [
9,
19,
100,
15852,
279
] |
def METHOD_NAME(step):
selector = '.edit-term-submit'
elt = world.browser.find_element_by_css_selector(selector)
elt.click() | [
497,
73,
983,
3108
] |
def METHOD_NAME(launcher_id: str, address: str) -> None:
import asyncio
from .plotnft_funcs import change_payout_instructions
asyncio.run(change_payout_instructions(launcher_id, address)) | [
194,
7811,
897,
1660
] |
def METHOD_NAME(self):
self.num_nodes = 4 | [
0,
9,
434
] |
def METHOD_NAME(self):
obj = self.obj_factory()
obj.foo = 3
self.flush_event_loop()
notifications = self.notifications
self.assertEqual(len(notifications), 1)
thread_id, event = notifications[0]
self.assertEqual(event, (obj, "foo", 0, 3))
ui_thread = trait_notifiers.ui_thread
self.assertEqual(thread_id, ui_thread) | [
9,
857,
280,
57,
600
] |
def METHOD_NAME() -> None:
"""
The purpose of this test is to exercise a selection of rdflib features under "mypy --strict" review.
"""
graph = rdflib.Graph()
literal_one = rdflib.Literal("1", datatype=rdflib.XSD.integer)
literal_two = rdflib.Literal(2)
literal_true = rdflib.Literal(True)
# Set up predicate nodes, using hash IRI pattern.
predicate_p = rdflib.URIRef("http://example.org/predicates#p")
predicate_q = rdflib.URIRef("http://example.org/predicates#q")
predicate_r = rdflib.URIRef("http://example.org/predicates#r")
# Set up knowledge base nodes, using URN namespace, slash IRI pattern, and/or blank node.
kb_bnode = rdflib.BNode()
kb_http_uriref = rdflib.URIRef("http://example.org/kb/x")
kb_https_uriref = rdflib.URIRef("https://example.org/kb/y")
kb_urn_uriref = rdflib.URIRef("urn:example:kb:z")
graph.add((kb_urn_uriref, predicate_p, literal_one))
graph.add((kb_http_uriref, predicate_p, literal_one))
graph.add((kb_https_uriref, predicate_p, literal_one))
graph.add((kb_https_uriref, predicate_p, literal_two))
graph.add((kb_https_uriref, predicate_q, literal_two))
graph.add((kb_bnode, predicate_p, literal_one))
expected_nodes_using_predicate_q: Set[Node] = {kb_https_uriref}
computed_nodes_using_predicate_q: Set[Node] = set()
for triple in graph.triples((None, predicate_q, None)):
computed_nodes_using_predicate_q.add(triple[0])
assert expected_nodes_using_predicate_q == computed_nodes_using_predicate_q
one_usage_query = """\ | [
9,
-1,
539,
3446
] |
def METHOD_NAME(self):
return 17 | [
1835,
4372
] |
def METHOD_NAME(variant_scalar_rgb):
p = mi.Properties()
p.set_plugin_name('some_plugin')
p.set_id('some_id')
p['prop_1'] = 1
p['prop_2'] = 'hello'
assert str(p) == \ | [
5296,
1219
] |
def METHOD_NAME(self, msg, error_mode):
if error_mode > 1:
raise InterfaceError(msg)
if error_mode == 1:
logger.warning(msg)
return False | [
276,
168
] |
async def METHOD_NAME(instrument, elasticapm_client, redis_conn):
# The PING command is sent as a byte string, so this tests if we can handle
# the command both as a str and as a bytes. See #1307
elasticapm_client.begin_transaction("transaction.test")
redis_conn.ping()
elasticapm_client.end_transaction("test")
transaction = elasticapm_client.events[TRANSACTION][0]
span = elasticapm_client.spans_for_transaction(transaction)[0]
assert span["name"] == "PING" | [
9,
1677
] |
def METHOD_NAME(self, mock_os):
mpathcount.get_root_dev_major()
assert(mock_os.major.called) | [
9,
19,
1563,
828,
3901
] |
f METHOD_NAME(self, path): | [
2423,
217,
157,
130,
954
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(o: object) -> str:
return type(o).__name__.lower() | [
44,
156
] |
def METHOD_NAME(connected_app, connected_user=None):
app = frappe.get_doc("Connected App", connected_app)
token_cache = app.get_token_cache(connected_user or frappe.session.user)
return bool(token_cache and token_cache.get_password("access_token", False)) | [
220,
466
] |
def METHOD_NAME(self, data):
"""
Adds data to the buffer.
Arguments:
data(str,Buffer): Data to add
"""
# Fast path for ''
if not data: return
if isinstance(data, Buffer):
self.size += data.size
self.data += data.data
else:
self.size += len(data)
self.data.append(data) | [
238
] |
def METHOD_NAME(self):
couch_row = {
'id': 'an-id',
'doc': {'a': 'document'},
'seq': '21',
'deleted': False
}
ways_to_make_change_object = [
change_from_couch_row(couch_row),
]
for change in ways_to_make_change_object:
self.assertTrue(isinstance(change, Change))
self.assertEqual('an-id', change.id)
self.assertEqual(couch_row['doc'], change.document)
self.assertEqual('21', change.sequence_id)
self.assertEqual(False, change.deleted)
for key in couch_row:
self.assertEqual(couch_row[key], change[key]) | [
9,
197,
24,
61,
280,
4400,
843
] |
def METHOD_NAME(self):
assert MyCallback.__prefix__ == "test"
with pytest.raises(ValueError, match="prefix required.+"):
class MyInvalidCallback(CallbackData):
pass | [
9,
176,
9260,
426,
984
] |
METHOD_NAME(self, answer): | [
353,
1365,
5519,
10782,
857
] |
def METHOD_NAME(codeString, filename, reporter):
"""Check the Python source given by codeString} for flakes."""
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return
# Okay, it's syntactically valid. Now check it.
w = checker.Checker(tree, filename)
w.messages.sort(key=lambda m: m.lineno)
for warning in w.messages:
reporter.flake(warning) | [
250,
-1
] |
def METHOD_NAME(self, video):
matches = guess_matches(video, {
'title': self.title,
'year': self.year,
'season': self.season,
'episode': self.episode
})
video_type = 'episode' if isinstance(video, Episode) else 'movie'
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': video_type}))
return matches | [
19,
855
] |
def METHOD_NAME(self, method_uri):
"""
Retrieve the requests.Session like object associated with a security
method URI.
Parameters
----------
method_uri : str
URI representing the security method
"""
return self.credentials[method_uri] | [
19
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME(self):
# Parse custom arguments and append to self.args
self.parse_args()
if self.args.accuracy_only and self.args.calibration_only:
self.run_calibration()
else:
self.run_benchmark_or_accuracy() | [
22
] |
def METHOD_NAME(self, table):
assert isinstance(table, BaseTable)
source = {}
callbackKey = (type(table),)
if isinstance(table, FormatSwitchingBaseTable):
source["Format"] = int(table.Format)
callbackKey += (table.Format,)
for converter in table.getConverters():
if isinstance(converter, ComputedInt):
continue
value = getattr(table, converter.name)
enumClass = getattr(converter, "enumClass", None)
if enumClass:
source[converter.name] = value.name.lower()
elif isinstance(converter, Struct):
if converter.repeat:
source[converter.name] = [self.METHOD_NAME(v) for v in value]
else:
source[converter.name] = self.METHOD_NAME(value)
elif isinstance(converter, SimpleValue):
# "simple" values (e.g. int, float, str) need no further un-building
source[converter.name] = value
else:
raise NotImplementedError(
"Don't know how unbuild {value!r} with {converter!r}"
)
source = self._callbackTable.get(callbackKey, lambda s: s)(source)
return source | [
10572
] |
METHOD_NAME(self, code): | [
19,
3224
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.disk_name = AAZStrArg(
options=["-n", "--name", "--disk-name"],
help="The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
] |
async def METHOD_NAME(use_hosted_api_server):
"""
Runs an agent listening to all queues.
Yields:
The anyio.Process.
"""
out = tempfile.TemporaryFile() # capture output for test assertions
# Will connect to the same database as normal test clients
async with open_process(
command=[
"prefect",
"agent",
"start",
"--match=nonexist",
],
stdout=out,
stderr=out,
env={**os.environ, **get_current_settings().to_environment_variables()},
) as process:
process.out = out
for _ in range(int(STARTUP_TIMEOUT / POLL_INTERVAL)):
await anyio.sleep(POLL_INTERVAL)
if out.tell() > 400:
# Sleep to allow startup to complete
# TODO: Replace with a healthcheck endpoint
await anyio.sleep(4)
break
assert out.tell() > 400, "The agent did not start up in time"
assert process.returncode is None, "The agent failed to start up"
# Yield to the consuming tests
yield process
# Then shutdown the process
try:
process.terminate()
except ProcessLookupError:
pass
out.close() | [
1849,
356
] |
def METHOD_NAME(args):
project_path = args.project_path
output_path = args.output_path
template_name = args.template_name
submission_id = args.submission_id
allowed_hosts = args.allowed_hosts
token = args.token
print(
f"Exporting {project_path=} {template_name=} {submission_id=} {allowed_hosts=} {token=}"
)
with tempfile.TemporaryDirectory() as tmpdirname:
# write the project to a file
contents = open(project_path, "r").read()
# replace URLs to Django by internal calls
for allowed_host in allowed_hosts.split(","):
escaped_allowed_host = re.escape(allowed_host)
pattern = rf"http(s)?://{escaped_allowed_host}(:[0-9]+)?/"
replacement = f"http://web:9000/"
contents = re.sub(pattern, replacement, contents)
# rewrite the permits layer url to only show the current feature
# TODO: In principle, filtering should not be necessary if the wfs3 endpoint performance was
# good enough for QGIS to load the whole layer. Filtering should be done only if desired
# (e.g. if a filter is set on the QGIS layer, somehow using the atlas id if needed).
# Also in theory, this should be set as a layer filter rather than on the provider URL,
# but this seems not supported by QGIS 3.24 (!!).
pattern = r"url='http://web:9000/wfs3/'"
replacement = rf"url='http://web:9000/wfs3/?submission_id={submission_id}'"
contents = contents.replace(pattern, replacement)
input_path = os.path.join(tmpdirname, "project.qgs")
open(input_path, "w").write(contents)
# start QGIS
qgs = QgsApplication([], False)
qgs.initQgis()
# prepare auth configuration
qgs.authManager().setMasterPassword("master", verify=True)
config = QgsAuthMethodConfig()
config.setId("geocity")
config.setName("geocity")
config.setMethod("APIHeader")
config.setConfigMap({"Authorization": f"Token {token}"})
qgs.authManager().storeAuthenticationConfig(config)
# load the conf once (seems to be required otherwise it's not available)
qgs.authManager().loadAuthenticationConfig("geocity", QgsAuthMethodConfig())
# open the project
project = QgsProject.instance()
project.read(input_path)
# iterate over all layers
for layer in QgsProject.instance().mapLayers().values():
print(f"checking layer {layer.name()}... ", end="")
if layer.isValid():
print("ok")
else:
print("invalid !")
# print layer information
print(f" {layer.dataProvider().uri()=}")
print(f" {layer.featureCount()=}")
print(f" {layer.dataProvider().error().message()=}")
# get the atlas
layout = project.layoutManager().layoutByName(template_name)
# TODO: Print when atlas name is wrong. Make easier to find configuration mistakes
atlas = layout.atlas()
# configure the atlas
atlas.setEnabled(True)
atlas.setFilenameExpression("'export_'||@atlas_featurenumber")
# move to the requested feature (workaround using filter if the above does not work)
atlas.setFilterFeatures(True)
atlas.setFilterExpression(f"submission_id={submission_id}")
atlas.seekTo(0)
atlas.refreshCurrentFeature()
# show the coverage layer (for debugging)
coverage_layer = atlas.coverageLayer()
print(f"coverage layer {coverage_layer.name()}...", end="")
if coverage_layer.isValid():
print(" ok")
else:
print(" invalid !")
# show contents of the response
r = requests.get(
f"http://web:9000/wfs3/collections/submissions/items/{submission_id}",
headers={"Authorization": f"Token {token}"},
)
print(f"response code: {r.status_code}")
print(f"response content: {r.content}")
# export
settings = QgsLayoutExporter.ImageExportSettings()
QgsLayoutExporter.exportToImage(
layout.atlas(), os.path.join(tmpdirname, "export.png"), "png", settings
)
# exit QGIS
qgs.exitQgis()
# show exported files (for debugging)
print(f"exported {os.listdir(tmpdirname)}")
# return the file
export_image_path = os.path.join(tmpdirname, "export_1.png")
os.rename(export_image_path, output_path)
print(f"saved file to {output_path}")
exit(0) | [
294
] |
def METHOD_NAME(self) -> int | None:
return self._pyarrow_df.METHOD_NAME() | [
181,
1346
] |
def METHOD_NAME(self, cmd, *args, **kwargs):
if args:
cmd = cmd.format(*args)
args = ()
return self.cmd(cmd + ' --auth-mode login --backup-intent', *args, **kwargs) | [
171,
499,
1660
] |
def METHOD_NAME(inplanes, outplanes, stride):
"""downsample_basic_block.
:param inplanes: int, number of channels in the input sequence.
:param outplanes: int, number of channels produced by the convolution.
:param stride: int, size of the convolving kernel.
"""
return nn.Sequential(
nn.Conv1d(
inplanes,
outplanes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm1d(outplanes),
) | [
3305,
756,
573
] |
def METHOD_NAME(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y)) | [
195,
1092
] |
def METHOD_NAME(self, request, *args, **kwargs):
return super(OrganizationAPI, self).METHOD_NAME(request, *args, **kwargs) | [
1276
] |
def METHOD_NAME(vertex: KubernetesBlock, vertices: list[KubernetesBlock]) -> list[int]:
"""
connection is defined by a match between a vertex's (of a certain type) references definitions to a potential
vertex (of any type).
example:
A Pod with the property 'spec.serviceAccountName' with value 'service-123' will match a resource of type
'ServiceAccount' with a 'metadata.name' property equals to 'service-123'
"""
connections: list[int] = []
for potential_vertex_index, potential_vertex in enumerate(vertices):
if potential_vertex.id == vertex.id:
continue
resource_references_definitions: list[dict[str, str] | list[dict[str, dict[str, str]]]] = ResourceKeywordIdentifier.KINDS_KEYWORDS_MAP[vertex.attributes["kind"]] # type: ignore[assignment]
# check that resource items comply to all references definitions defined in ResourceKeywordIdentifier
for references_definition in resource_references_definitions:
match = True
if isinstance(references_definition, dict):
for potential_vertex_key, vertex_key in references_definition.items():
match = KeywordEdgeBuilder._find_match_in_attributes(vertex, potential_vertex, potential_vertex_key, vertex_key, match)
if match:
connections.append(potential_vertex_index)
# some items are nested in lists and their value in the vertex is concatenated with their index,
# like so: subjects.0.name
elif isinstance(references_definition, list):
# not really a loop, just extracting the dict's key
for base_key_attribute, reference_definitions_items in references_definition[0].items():
vertex_attribute_references_list: list[dict[str, str]] = vertex.attributes.get(base_key_attribute) # type: ignore[assignment]
if not vertex_attribute_references_list:
continue
# iterate every item on the list as a separate resource
for i in range(len(vertex_attribute_references_list)):
match = True
for potential_vertex_key, vertex_key in reference_definitions_items.items():
vertex_key = f"{base_key_attribute}.{i}.{vertex_key}"
match = KeywordEdgeBuilder._find_match_in_attributes(vertex, potential_vertex, potential_vertex_key, vertex_key, match)
if match:
connections.append(potential_vertex_index)
return connections | [
416,
560
] |
def METHOD_NAME(data):
"""Removes in place the $schema values.
Removes the root and the sources $schema.
:param data: the data representation of the current contribution as a dict.
:returns: the modified data.
:rtype: dict.
"""
data.pop('$schema', None)
for source in current_app.config.get('RERO_ILS_AGENTS_SOURCES', []):
if source in data:
data[source].pop('$schema', None)
return data | [
188,
135
] |
def METHOD_NAME():
response = _fetch_software_updates()
with transaction.atomic():
seen_software_updates = []
for kwargs, supported_devices in _iter_software_updates(response):
su, _ = SoftwareUpdate.objects.update_or_create(**kwargs)
seen_software_updates.append(su.pk)
for device_id in supported_devices:
sd, _ = SoftwareUpdateDeviceID.objects.get_or_create(software_update=su, device_id=device_id)
(SoftwareUpdateDeviceID.objects.filter(software_update=su)
.exclude(device_id__in=supported_devices).delete())
SoftwareUpdate.objects.exclude(pk__in=seen_software_updates).delete() | [
164,
2733,
682
] |
def METHOD_NAME(self):
# test bottomup training
dataset = self.build_crowdpose_dataset(data_mode='bottomup')
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0], data_mode='bottomup')
# test bottomup testing
dataset = self.build_crowdpose_dataset(
data_mode='bottomup', test_mode=True)
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0], data_mode='bottomup') | [
9,
6795
] |
def METHOD_NAME(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path) | [
215,
415
] |
def METHOD_NAME(self, command_name, para_list, ctx, op=operator.eq, expect_result=0):
runner = CliRunner()
result = runner.invoke(config.config.commands["interface"].commands[command_name], para_list, obj = ctx)
print(result.output)
assert op(result.exit_code, expect_result)
return result | [
756,
250
] |
f METHOD_NAME(self): | [
203,
2846,
146,
2228
] |
def METHOD_NAME (arguments, log, progress=True):
# setup a connection to the Slycat Web Server.
connection = slycat.web.client.connect(arguments)
# create a new project to contain our model.
pid = connection.find_or_create_project(arguments.project_name, arguments.project_description)
# create the new, empty model.
mid = connection.post_project_models(pid, "DAC", arguments.model_name, arguments.marking,
arguments.model_description)
# assign model UI defaults
connection.put_model_parameter(mid, "dac-ui-parms", dac_model_defaults())
# upload data to new model
connection.upload_files(mid, [arguments.dac_gen_zip],
"dac-gen-zip-parser", [["Null"], ["DAC"]], progress)
# mark model as finished
connection.post_model_finish(mid)
# wait until the model is ready
connection.join_model(mid)
return mid | [
172,
578
] |
def METHOD_NAME(ms_lib, symbol):
# Lower level checks to ensure end_time is set correctly
start_time = 'start'
metadata = None
for item in ms_lib.find({'symbol': symbol}, sort=[('start_time', 1)]):
if start_time != 'start' and item['start_time'] != start_time:
raise ValueError('end_time not set correctly')
start_time = item.get('end_time')
if item['metadata'] == metadata:
raise ValueError('consecutive duplicate metadata')
metadata = item['metadata']
assert start_time == None, 'end_time of the last entry should be unset' | [
7700,
250
] |
def METHOD_NAME(self):
self.patch = create_patches()[0]
self.user = create_user() | [
0,
1
] |
def METHOD_NAME(self):
f, _, _ = self.make_frames()
with self.assertRaises(AttributeError):
del f.f_lineno | [
9,
474,
4155,
1269,
10750
] |
def METHOD_NAME(attack, dataset, attack_train_ratio):
(x_train, y_train), _ = dataset
attack_train_size = int(len(x_train) * attack_train_ratio)
kwargs = {
"norm": 2,
"max_iter": 2,
"max_eval": 4,
"init_eval": 1,
"init_size": 1,
"verbose": False,
}
# infer attacked feature on remainder of data
inferred_train_prob = attack.infer(
x_train[attack_train_size:], y_train[attack_train_size:], probabilities=True, **kwargs
)
# check accuracy
backend_check_probabilities(inferred_train_prob) | [
3127,
250,
2397,
3318
] |
def METHOD_NAME(local_ds: Dataset):
# TODO: when chunk-wise compression is done, `labels` should be compressed using lz4, so this test needs to be updated
images = local_ds.create_tensor("images", htype="image", sample_compression="png")
labels = local_ds.create_tensor("labels", htype="class_label")
images.extend(np.ones((16, 10, 10, 3), dtype="uint8"))
labels.extend(np.ones((16, 1), dtype="uint32"))
for batch in local_ds.tensorflow():
# converting tf Tensors to numpy
X = batch["images"].numpy()
T = batch["labels"].numpy()
assert X.shape == (10, 10, 3)
assert T.shape == (1,) | [
9,
1696,
41,
4483
] |
def METHOD_NAME():
"""Parses the command line arguments"""
parser = argparse.ArgumentParser(
description="Outputs entity hierarchy graph from a device's "
"ENTITY-MIB::entPhysicalTable response",
usage="%(prog)s device",
)
parser.add_argument(
'device',
type=device,
help="The NAV-monitored IP device to query. Must be either "
"a sysname prefix or an IP address.",
)
parser.add_argument(
"--port",
"-p",
type=int,
help="change the portnumber, [default: 161]",
metavar="PORT",
default=161,
)
return parser.METHOD_NAME() | [
214,
335
] |
def METHOD_NAME(self, queryset, name, value):
if not value: # pragma: no cover
return queryset
return queryset.filter(
Q(
pk__in=models.ProjectAssignee.objects.filter(
is_manager=True, user_id=value
).values("project_id"),
)
| Q(
customer_id__in=models.CustomerAssignee.objects.filter(
is_manager=True, user_id=value
).values("customer_id"),
)
) | [
527,
220,
722
] |
def METHOD_NAME(self):
if not self.already_saved and self.device.current_index > 0:
reply = QMessageBox.question(self, self.tr("Save data?"),
self.tr("Do you want to save the data you have captured so far?"),
QMessageBox.Yes | QMessageBox.No | QMessageBox.Abort)
if reply == QMessageBox.Yes:
self.on_save_clicked()
elif reply == QMessageBox.Abort:
return False
try:
sample_rate = self.device.sample_rate
except:
sample_rate = 1e6
self.files_recorded.emit(self.recorded_files, sample_rate)
return True | [
73,
1553,
1462
] |
def METHOD_NAME(dash_dcc):
app = Dash(__name__)
min_date = pd.to_datetime("2010-01-01")
max_date = pd.to_datetime("2099-01-01")
disabled_days = [
x for x in pd.date_range(min_date, max_date, freq="D") if x.day != 1
]
app.layout = html.Div(
[
html.Label("Operating Date"),
dcc.DatePickerSingle(
id="dps",
min_date_allowed=min_date,
max_date_allowed=max_date,
disabled_days=disabled_days,
),
]
)
dash_dcc.start_server(app)
date = dash_dcc.wait_for_element("#dps", timeout=5)
"""
WebDriver click() function hangs at the time of the react code
execution, so it necessary to check execution time.
"""
start_time = time.time()
date.click()
assert time.time() - start_time < 5
dash_dcc.wait_for_element(".SingleDatePicker_picker", timeout=5)
assert dash_dcc.get_logs() == [] | [
9,
-1,
-1,
1463,
659
] |
def METHOD_NAME(self, _flag):
"""Set blocking."""
return None | [
9141
] |
def METHOD_NAME(self):
self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
self.addCleanup(support.unlink, TEST_PY_FILE)
self.assertFalse(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
self.assertEqual((0, b''), self._generate_keywords(GRAMMAR_FILE,
TEST_PY_FILE))
self.assertTrue(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE)) | [
9,
1866,
11504,
61,
2069,
171
] |
def METHOD_NAME(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_analysisstatus_api', password='aCTVRIdJ4cyVSkYiJKrM'
)
# create url
destination = urllib.parse.quote('/api/analysisstatus/', safe='/')
# get response
response = self.client.get('/api/analysisstatus', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
) | [
9,
9335,
245,
58,
1736
] |
def METHOD_NAME(a: t.Any, b: t.Any) -> t.Any:
return (a > b) - (a < b) | [
2014
] |
def METHOD_NAME(
data_fixture,
):
user = data_fixture.create_user()
unrelated_table = data_fixture.create_database_table(user=user)
table_a = data_fixture.create_database_table(user=user)
table_b = data_fixture.create_database_table(user=user, database=table_a.database)
table_a_text_field = data_fixture.create_text_field(table=table_a)
table_a.refresh_from_db()
table_b.refresh_from_db()
old_table_a_version = table_a.version
old_table_b_version = table_b.version
old_unrelated_table_version = unrelated_table.version
assert get_cached_model_field_attrs(table_a) is None
assert get_cached_model_field_attrs(table_b) is None
assert get_cached_model_field_attrs(unrelated_table) is None
FieldHandler().update_field(
user, table_a_text_field, "link_row", link_row_table=table_b, name="new"
)
table_a.refresh_from_db()
table_b.refresh_from_db()
unrelated_table.refresh_from_db()
assert old_table_a_version == table_a.version
assert old_table_b_version == table_b.version
assert old_unrelated_table_version == unrelated_table.version
assert get_cached_model_field_attrs(table_a) is None
assert get_cached_model_field_attrs(table_b) is None
assert get_cached_model_field_attrs(unrelated_table) is None | [
9,
1046,
193,
578,
596
] |
def METHOD_NAME(self, model_name: str, name: str, app_label: str) -> bool: ... | [
1004,
101
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.