text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME() -> Path:
return _get_data_folder().joinpath("ens.json") | [
19,
157
] |
def METHOD_NAME(self, reference=None):
"""
Returns (:py:class:`.Placeable`, :py:class:`.Vector`) tuple where
the vector points to the center of the placeable, in ``x``, ``y``,
and ``z``. This can be passed into any :py:class:`.Robot` or
:py:class:`.Pipette` method ``location`` argument.
If ``reference`` (a :py:class:`.Placeable`) is provided, the return
value will be in that placeable's coordinate system.
:param reference: An optional placeable for the vector to be relative
to.
:returns: A tuple of the placeable and the offset. This can be passed
into any :py:class:`.Robot` or :py:class:`.Pipette` method
``location`` argument.
"""
pass | [
1262
] |
def METHOD_NAME(path, exit_on_err=False, logger=None):
"""
Try creating required directory structure
ignore EEXIST and raise exception for rest of the errors.
Print error in stderr and exit if exit_on_err is set, else
raise exception.
"""
try:
os.makedirs(path)
except (OSError, IOError) as e:
if e.errno == EEXIST and os.path.isdir(path):
pass
else:
if exit_on_err:
fail("Fail to create dir %s: %s" % (path, e), logger=logger)
else:
raise | [
16613
] |
def METHOD_NAME(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
fd = None
tty = None
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = os.fdopen(fd, 'w+', 1)
input = tty
if not stream:
stream = tty
except EnvironmentError, e:
# If that fails, see if stdin can be controlled.
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
passwd = None
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error, e:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
del input, tty # clean up unused file objects before blocking
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd | [
1226,
1009
] |
def METHOD_NAME(self):
notebook = self.guake.get_notebook()
current_page_index = notebook.get_current_page()
terminals = notebook.get_terminals_for_page(current_page_index)
return len(terminals) | [
19,
3108,
29
] |
def METHOD_NAME(self, params=None, headers=None):
"""
Returns all alerts.
"""
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", "alerts"),
params=params,
headers=headers,
) | [
19,
5468
] |
def METHOD_NAME(ctx: click.Context, **kibana_kwargs):
"""Commands for integrating with Kibana."""
ctx.ensure_object(dict)
# only initialize an kibana client if the subcommand is invoked without help (hacky)
if sys.argv[-1] in ctx.help_option_names:
click.echo('Kibana client:')
click.echo(format_command_options(ctx))
else:
ctx.obj['kibana'] = get_kibana_client(**kibana_kwargs) | [
3840,
846
] |
def METHOD_NAME(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
) | [
314,
2491
] |
def METHOD_NAME(self):
self.task_config.config["options"] = {
"object": "Customizable_Rollup_Setings__c",
"field": "Rollups_Account_Batch_Size__c",
"value": 200,
"poll_interval": 1,
}
task, url = self._get_url_and_task()
responses.add(
responses.GET,
url,
status=400,
json=[
{
"message": "\nSELECT SetupOwnerId FROM npe5__Affiliation__c\n ^\nERROR at Row:1:Column:8\nNo such column 'SetupOwnerId' on entity 'npe5__Affiliation__c'. If you are attempting to use a custom field, be sure to append the '__c' after the custom field name. Please reference your WSDL or the describe call for the appropriate names.",
"errorCode": "INVALID_FIELD",
}
],
)
with pytest.raises(TaskOptionsError) as e:
task()
assert "supported" in str(e.value) | [
9,
22,
343,
817,
618,
130,
817
] |
def METHOD_NAME(self, **kwargs: Any) -> bool:
"""Return 204 status code if successful.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_head204_request(
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299 | [
1543
] |
def METHOD_NAME(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed) | [
9,
1684,
3351,
11277
] |
def METHOD_NAME(server: Server, timeout: Optional[float] = 10):
server.start()
server.wait_is_up(timeout)
try:
yield server
finally:
server.shutdown() | [
163,
198
] |
def METHOD_NAME(self):
validate_intrinsic_if_items(["Condition", "Then", "Else"]) | [
9,
187,
8464,
217,
1768,
1205
] |
async def METHOD_NAME(self, nativeId: str) -> ArloDeviceBase:
if not nativeId.endswith("siren"):
return None
return self.get_or_create_siren() | [
19,
398
] |
def METHOD_NAME(self, experiment, path=None, destination_dir=None):
pass | [
136,
1831
] |
def METHOD_NAME(session):
"""
This function is used to add an extra header for the User-Agent in the Botocore session,
as described in the pull request: https://github.com/boto/botocore/pull/2682
Parameters
----------
session : botocore.session.Session
The Botocore session to which the user-agent function will be registered.
Raises
------
Exception
If there is an issue while adding the extra header for the User-Agent.
"""
try:
session.register(TARGET_SDK_EVENT, _create_feature_function(DEFAULT_FEATURE))
except Exception:
logger.debug("Can't add extra header User-Agent") | [
1796,
4312,
240
] |
def METHOD_NAME(self, action="GET", url="/", data=None, query_params=None):
# Not use for tests
pass | [
377
] |
def METHOD_NAME(self):
return bool(int(self._raw_data.get('nomodels'))) | [
-1
] |
def METHOD_NAME(self, mocked_om):
"""test flatten method returns expected results
Args:
mocked_om (_type_):
"""
data = {
"DashboardService": {
"Chart": {
"None": {
"None": {
"missingOwner": 12,
"completedDescriptions": 12,
"hasOwner": 0,
"missingDescriptions": 0,
},
"Tier.Tier1": {
"missingOwner": 5,
"completedDescriptions": 1,
"hasOwner": 3,
"missingDescriptions": 8,
},
},
"Marketing": {
"Tier.Tier1": {
"missingOwner": 0,
"completedDescriptions": 0,
"hasOwner": 7,
"missingDescriptions": 5,
}
},
},
},
"TableService": {
"Table": {
"None": {
"None": {
"missingOwner": 12,
"completedDescriptions": 12,
"hasOwner": 0,
"missingDescriptions": 0,
}
}
},
},
}
expected = [
ReportData(
timestamp=None,
reportDataType=ReportDataType.EntityReportData.value,
data=EntityReportData(
entityType="Chart",
serviceName="DashboardService",
team=None,
entityTier=None,
missingOwner=12,
completedDescriptions=12,
hasOwner=0,
missingDescriptions=0,
), # type: ignore
),
ReportData(
timestamp=None,
reportDataType=ReportDataType.EntityReportData.value,
data=EntityReportData(
entityType="Chart",
serviceName="DashboardService",
team=None,
entityTier="Tier.Tier1",
missingOwner=5,
completedDescriptions=1,
hasOwner=3,
missingDescriptions=8,
), # type: ignore
),
ReportData(
timestamp=None,
reportDataType=ReportDataType.EntityReportData.value,
data=EntityReportData(
entityType="Chart",
serviceName="DashboardService",
team="Marketing",
entityTier="Tier.Tier1",
missingOwner=0,
completedDescriptions=0,
hasOwner=7,
missingDescriptions=5,
), # type: ignore
),
ReportData(
timestamp=None,
reportDataType=ReportDataType.EntityReportData.value,
data=EntityReportData(
entityType="Table",
serviceName="TableService",
team=None,
entityTier=None,
missingOwner=12,
completedDescriptions=12,
hasOwner=0,
missingDescriptions=0,
), # type: ignore
),
]
processed = []
for flat_result in EntityReportDataProcessor(mocked_om)._flatten_results(data):
flat_result.timestamp = None
processed.append(flat_result)
assert all(
k in flat_result.data.dict()
for k in [
"entityType",
"entityTier",
"team",
"organization",
"completedDescriptions",
"missingDescriptions",
"hasOwner",
"missingOwner",
]
)
self.assertListEqual(expected, processed) | [
9,
247,
51
] |
def METHOD_NAME(self, row_index:int):
return self.rows[row_index] if (row_index >= 0) and (row_index < self.length()) else None | [
19
] |
def METHOD_NAME(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks.get_value() == 0:
self._cond.notify_all() | [
758,
1658
] |
def METHOD_NAME():
assert parsed_items_prev[0]["description"] == "" | [
9,
1067,
1491
] |
def METHOD_NAME(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller | [
2575,
2576
] |
def METHOD_NAME(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret | [
554,
146
] |
def METHOD_NAME(self):
self.assertTagsEqual('foo', 0, 0, 0) | [
9,
654,
82
] |
def METHOD_NAME(fileinfo): # specify file
# Accept a file name or a file descriptor; make sure mode is 'rb' (read binary)
fd = open(fileinfo, mode='rb')
text = fd.read()
tris = [] # list of triangles to compound
keywords = [b'outer', b'endloop', b'endfacet', b'solid', b'endsolid']
if False: # prevent executing code for binary file
pass
# The following code for binary files must be updated: | [
10277,
24,
7916
] |
def METHOD_NAME(candidate, cycle, district=None):
if cycle:
if candidate['office'] == 'H':
if candidate.get('state') and candidate.get('district'):
district_url = '/' + str(candidate['state']) + '/' + candidate['district']
else:
return None
elif candidate['office'] == 'S':
if candidate.get('state'):
district_url = '/' + str(candidate['state'])
else:
return None
else:
district_url = ''
return '/data/elections/' + candidate['office_full'].lower() + district_url + '/' + str(cycle)
else:
return None | [
19,
6315,
274
] |
def METHOD_NAME(tensor, prefix, *, tsfc_parameters=None):
"""Compiles the TSFC form associated with a Slate :class:`~.Tensor`
object. This function will return a :class:`ContextKernel`
which stores information about the original tensor, integral types
and the corresponding TSFC kernels.
:arg tensor: A Slate `~.Tensor`.
:arg prefix: An optional `string` indicating the prefix for the
subkernel.
:arg tsfc_parameters: An optional `dict` of parameters to provide
TSFC.
Returns: A `ContextKernel` containing all relevant information.
"""
assert tensor.terminal, (
"Only terminal tensors have forms associated with them!"
)
# Sets a default name for the subkernel prefix.
mapper = RemoveRestrictions()
integrals = map(partial(map_integrand_dags, mapper),
tensor.form.integrals())
transformed_integrals = transform_integrals(integrals)
cxt_kernels = []
assert prefix is not None
for orig_it_type, integrals in transformed_integrals.items():
subkernel_prefix = prefix + "%s_to_" % orig_it_type
form = Form(integrals)
kernels = tsfc_compile(form,
subkernel_prefix,
parameters=tsfc_parameters,
split=False, diagonal=tensor.diagonal)
if kernels:
cxt_k = ContextKernel(tensor=tensor,
coefficients=form.coefficients(),
constants=extract_firedrake_constants(form),
original_integral_type=orig_it_type,
tsfc_kernels=kernels)
cxt_kernels.append(cxt_k)
cxt_kernels = tuple(cxt_kernels)
return cxt_kernels | [
296,
1019,
1029
] |
def METHOD_NAME():
import sentry
base = os.path.abspath(os.path.join(sentry.__file__, "../../.."))
emit_dot(os.path.join(base, "import-graph.dot"))
emit_ascii_tree(os.path.join(base, "import-graph.txt")) | [
77,
1537
] |
def METHOD_NAME(examples):
return tokenizer(examples["text"]) | [
667
] |
def METHOD_NAME(wire_in_grid):
return '{}/{}'.format(wire_in_grid.tile, wire_in_grid.wire) | [
324,
8915,
156
] |
def METHOD_NAME(self):
responses.add(
responses.POST, "https://example.org/oauth/token", json={"token": "a-fake-token"}
)
pipeline = IdentityProviderPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" in result
assert "a-fake-token" == result["token"]
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "https://example.org/oauth/token"
data = dict(parse_qsl(responses.calls[0].request.body))
assert data == {
"client_id": "123456",
"client_secret": "secret-value",
"code": "auth-code",
"grant_type": "authorization_code",
"redirect_uri": "http://testserver/extensions/default/setup/",
} | [
9,
2088,
466,
1434
] |
def METHOD_NAME(self):
return self._typename | [
5541
] |
def METHOD_NAME(tag, keys):
"""Register the ctags-complete command for the newly-created commander."""
c = keys.get('c')
if c:
c.k.registerCommand('ctags-complete', start) | [
69,
129
] |
def METHOD_NAME(ifp, ofp, xml=0, autoclose=(), verbatims=()):
if xml:
autoclose = ()
attrs = {}
lastopened = None
knownempties = []
knownempty = 0
lastempty = 0
inverbatim = 0
while 1:
line = ifp.readline()
if not line:
break
type = line[0]
data = line[1:]
if data and data[-1] == "\n":
data = data[:-1]
if type == "-":
data = esistools.decode(data)
data = escape(data)
if not inverbatim:
data = data.replace("---", "—")
ofp.write(data)
if "\n" in data:
lastopened = None
knownempty = 0
lastempty = 0
elif type == "(":
if data == "COMMENT":
ofp.write("<!--")
continue
data = map_gi(data, _elem_map)
if knownempty and xml:
ofp.write("<%s%s/>" % (data, format_attrs(attrs, xml)))
else:
ofp.write("<%s%s>" % (data, format_attrs(attrs, xml)))
if knownempty and data not in knownempties:
# accumulate knowledge!
knownempties.append(data)
attrs = {}
lastopened = data
lastempty = knownempty
knownempty = 0
inverbatim = data in verbatims
elif type == ")":
if data == "COMMENT":
ofp.write("-->")
continue
data = map_gi(data, _elem_map)
if xml:
if not lastempty:
ofp.write("</%s>" % data)
elif data not in knownempties:
if data in autoclose:
pass
elif lastopened == data:
ofp.write("</>")
else:
ofp.write("</%s>" % data)
lastopened = None
lastempty = 0
inverbatim = 0
elif type == "A":
name, type, value = data.split(" ", 2)
name = map_gi(name, _attr_map)
attrs[name] = esistools.decode(value)
elif type == "e":
knownempty = 1
elif type == "&":
ofp.write("&%s;" % data)
knownempty = 0
else:
raise RuntimeError, "unrecognized ESIS event type: '%s'" % type
if LIST_EMPTIES:
dump_empty_element_names(knownempties) | [
197
] |
def METHOD_NAME(
data: pd.Series,
n_fast: int = 12,
n_slow: int = 26,
n_signal: int = 9,
) -> pd.DataFrame:
"""Moving average convergence divergence
Parameters
----------
data: pd.Series
Values for calculation
n_fast : int
Fast period
n_slow : int
Slow period
n_signal : int
Signal period
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(
ta.METHOD_NAME(data, fast=n_fast, slow=n_slow, signal=n_signal).dropna()
) | [
-1
] |
def METHOD_NAME(self):
# mock fetch_timeseries() because 'keys_ts' has been pre-populated
self.log_stats_parser.fetch_timeseries = MagicMock()
# condition: evaluate_expression
cond1 = Condition('cond-1')
cond1 = TimeSeriesCondition.create(cond1)
cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
cond1.set_parameter('behavior', 'evaluate_expression')
keys = [
'rocksdb.manifest.file.sync.micros.p99',
'rocksdb.db.get.micros.p50'
]
cond1.set_parameter('keys', keys)
cond1.set_parameter('aggregation_op', 'latest')
# condition evaluates to FALSE
cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)>200')
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
self.assertIsNone(cond1.get_trigger())
# condition evaluates to TRUE
cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)<200')
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
self.assertDictEqual(expected_cond_trigger, cond1.get_trigger())
# ensure that fetch_timeseries() was called
self.log_stats_parser.fetch_timeseries.assert_called() | [
9,
250,
61,
2117,
1626,
1171,
1371
] |
def METHOD_NAME(self):
dateString=time.strftime("%Y_%m_%d_%H", time.localtime())
for i in range(26):
path="%s_%s_%s.log"%(self.path, dateString, chr(i+97))
if not os.path.exists(path) or os.stat(path)[6] < self.sizeLimit:
return path
# Hmm, 26 files are full? throw the rest in z:
# Maybe we should clear the self.sizeLimit here... maybe.
return path | [
171,
157
] |
def METHOD_NAME(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s[:-1])
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
import operator
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
self.skipTest('Pointers to c_wchar are not supported')
res = dll.my_wcsdup(s[:-1])
tmpl = list(range(ord("a"), ord("z")+1))
self.assertEqual(res[:len(s)-1], tmpl)
self.assertEqual(res[:len(s)-1:], tmpl)
self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
dll.my_free(res) | [
9,
1373,
2980
] |
def METHOD_NAME(item):
if item.startswith("//"):
return item[2:].replace("/", "_").replace(":", "_")
return item | [
24,
334,
1030
] |
def METHOD_NAME(tmp_path, any_grid):
if version.parse(xtgeo_version) < version.parse("2.16"):
pytest.skip()
any_grid.to_file(tmp_path / "grid.roff", fformat="roff")
with pytest.warns(DeprecationWarning, match="from_file is deprecated"):
any_grid.from_file(tmp_path / "grid.roff", fformat="roff") | [
9,
753,
280,
171,
6251
] |
def METHOD_NAME(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored | [
527,
1894,
604,
1318
] |
def METHOD_NAME(matches):
extracted = []
for match in matches:
value = extract_value(match)
entity = {"start": match["start"],
"end": match["end"],
"text": match.get("body", match.get("text", None)),
"value": value,
"confidence": 1.0,
"additional_info": match["value"],
"entity": match["dim"]}
extracted.append(entity)
return extracted | [
197,
17256,
275,
24,
15515
] |
def METHOD_NAME(cls, mod_path: str, package: str = None) -> ModuleType:
"""Load a module by its absolute path.
Args:
mod_path: the absolute or relative module path
package: the parent package to search for the module
Returns:
The resolved module or `None` if the module cannot be found
Raises:
ModuleLoadError: If there was an error loading the module
"""
if package:
# preload parent package
if not cls.METHOD_NAME(package):
return None
# must treat as a relative import
if not mod_path.startswith("."):
mod_path = f".{mod_path}"
full_path = resolve_name(mod_path, package)
if full_path in sys.modules:
return sys.modules[full_path]
if "." in mod_path:
parent_mod_path, mod_name = mod_path.rsplit(".", 1)
if parent_mod_path and parent_mod_path[-1] != ".":
parent_mod = cls.METHOD_NAME(parent_mod_path, package)
if not parent_mod:
return None
package = parent_mod.__name__
mod_path = f".{mod_name}"
# Load the module spec first
# this means that a later ModuleNotFoundError indicates a code issue
spec = find_spec(mod_path, package)
if not spec:
return None
try:
return import_module(mod_path, package)
except ModuleNotFoundError as e:
raise ModuleLoadError(
f"Unable to import module {full_path}: {str(e)}"
) from e | [
557,
298
] |
def METHOD_NAME(self):
return None | [
6987,
146,
2851
] |
def METHOD_NAME(self, data_root_path, subdir):
_index = 1
_indes_str = "%04d" % _index
fpath = os.path.join(data_root_path, f"{subdir}_{_indes_str}")
while os.path.exists(fpath):
_index += 1
_indes_str = "%04d" % _index
fpath = os.path.join(data_root_path, f"{subdir}_{_indes_str}")
return fpath | [
416,
243,
-1,
1190
] |
def METHOD_NAME(self, record):
"""Write a single tab line to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
self.handle.write(as_tab(record)) | [
77,
148
] |
def METHOD_NAME(self):
self.assertHolidayName(
"Velikonočni ponedeljek",
"2019-04-22",
"2020-04-13",
"2021-04-05",
"2022-04-18",
"2023-04-10",
) | [
9,
7696,
606
] |
def METHOD_NAME(self, rev: str = "head") -> None:
alembic_config = get_alembic_config(__file__)
with self.connect() as conn:
run_alembic_downgrade(alembic_config, conn, rev=rev) | [
8171,
1502
] |
def METHOD_NAME(self, data: Mapping[K, T], force: bool = False) -> None:
# Populate the cache with the specified values
if self.cache:
for key, value in data.items():
if not self.cache_map.get(key) or force:
future: Future = Future(loop=self.loop)
future.set_result(value)
self.cache_map.set(key, future)
# For keys that are pending on the current batch, but the
# batch hasn't started fetching yet: Remove it from the
# batch and set to the specified value
if self.batch is not None and not self.batch.dispatched:
batch_updated = False
for task in self.batch.tasks:
if task.key in data:
batch_updated = True
task.future.set_result(data[task.key])
if batch_updated:
self.batch.tasks = [
task for task in self.batch.tasks if not task.future.done()
] | [
7322,
1401
] |
def METHOD_NAME(max_retries: int = 5):
def retry_request(func):
@wraps(func)
def wrapper(*args, **kwargs):
retries = max_retries
while True:
try:
return func(*args, **kwargs)
except RequestException as exception:
logger.warning(exception)
retries -= 1
if retries <= 0:
break
sleep(2.0)
return None
return wrapper
return retry_request | [
2052,
972
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return self._output() | [
1519
] |
def METHOD_NAME(storage_connection_str, table_name):
fully_qualified_namespace = "test_namespace"
eventhub_name = "eventhub"
consumer_group = "$default"
ownership_cnt = 8
checkpoint_store = TableCheckpointStore.from_connection_string(
storage_connection_str, table_name
)
ownership_list = []
for i in range(ownership_cnt):
ownership = _create_ownership(str(i), "owner_id", None, None)
ownership_list.append(ownership)
result_ownership_list = checkpoint_store.claim_ownership(ownership_list)
assert result_ownership_list[0]["owner_id"] == "owner_id"
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["owner_id"] = "Bill"
ownership_list = checkpoint_store.claim_ownership(single_ownership)
assert ownership_list[0]["owner_id"] == "Bill"
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["etag"] = "W/\"datetime'2021-08-02T00%3A46%3A51.7645424Z'\""
single_ownership[0]["owner_id"] = "Jack"
single_ownership[0]["partition_id"] = "10"
result_ownership = checkpoint_store.claim_ownership(single_ownership)
list_ownership = checkpoint_store.list_ownership(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert result_ownership[0] in list_ownership
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["etag"] = "W/\"datetime'2021-08-02T00%3A46%3A51.7645424Z'\""
with pytest.raises(OwnershipLostError) as e_info:
checkpoint_store.claim_ownership(single_ownership) | [
2556,
3982,
442,
9
] |
def METHOD_NAME(self,
group_key=default_group,
module_name=None,
module_cls=None,
force=False):
assert isinstance(group_key,
str), 'group_key is required and must be str'
if group_key not in self._modules:
self._modules[group_key] = dict()
# Some registered module_cls can be function type.
# if not inspect.isclass(module_cls):
# raise TypeError(f'module is not a class type: {type(module_cls)}')
if module_name is None:
module_name = module_cls.__name__
if module_name in self._modules[group_key] and not force:
raise KeyError(f'{module_name} is already registered in '
f'{self._name}[{group_key}]')
self._modules[group_key][module_name] = module_cls
module_cls.group_key = group_key | [
372,
298
] |
def METHOD_NAME():
"""Tests adding a var that already exists in the root."""
with raises(AttributeError):
AddConfigVar('sample_rate', "doc", IntParam(1), root=msaf.config) | [
9,
909,
-1,
1563
] |
def METHOD_NAME(self):
# Create pre-existing pickle file.
stored_model = Model(count=52)
filename = os.path.join(self.tmpdir, "model.pkl")
with open(filename, "wb") as pickled_object:
pickle.dump(stored_model, pickled_object)
model = Model(count=19)
with mock.patch.object(self.toolkit, "view_application"):
with self.assertWarns(DeprecationWarning):
model.configure_traits(filename=filename)
self.assertEqual(model.count, 52) | [
9,
1147,
41,
1153,
171
] |
f METHOD_NAME(self): | [
9,
146
] |
def METHOD_NAME(self, data=None, metadata=None, buffers=None):
"""Send a message to the frontend-side version of this comm"""
self._publish_msg('comm_msg',
data=data, metadata=metadata, buffers=buffers,
) | [
353
] |
def METHOD_NAME(self, tag: str, attrs: Dict[str, str]):
if tag == "div" and get_attr_value(attrs, "class") == "download":
data_id = get_attr_value(attrs, "data-id")
self.variants.append(
{
"_id": get_attr_value(attrs, "data-id"),
"vendor": get_attr_value(attrs, "data-manufacturer"),
"model": get_attr_value(attrs, "data-name"),
"family": get_attr_value(attrs, "data-mcufamily"),
"info_url": f"https://circuitpython.org/board/{data_id}/",
}
) | [
276,
12992
] |
def METHOD_NAME(self, logic):
"""
Tests the default 1D set
"""
interval = numpy.linspace(start=1, stop=10, num=10, endpoint=True)
logic.createDefault1dData(interval=interval)
assert logic.data.id == ('0 data')
assert logic.data.group_id == ('0 Model1D')
assert not logic.data.is_data
assert logic.data._xaxis == ('\\rm{Q}')
assert logic.data._xunit == ('A^{-1}')
assert logic.data._yaxis == ('\\rm{Intensity}')
assert logic.data._yunit == ('cm^{-1}') | [
9,
129,
-1,
365
] |
def METHOD_NAME(self):
tracker = NnmClubTracker(u"9876543", u'2' * 32)
tracker.tracker_settings = self.tracker_settings
self.assertFalse(tracker.verify()) | [
9,
1162,
180
] |
def METHOD_NAME(self, delay):
self.keydelay = delay | [
659
] |
def METHOD_NAME(name):
try:
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['which', name], stdout=fnull, stderr=fnull)
except Exception:
pytest.skip('required %s not found' % (name)) | [
208,
2777
] |
f METHOD_NAME(self): | [
9,
555,
559
] |
def METHOD_NAME(self) -> pd.DataFrame:
"""Generates the raw CPS dataset."""
# Files are named for a year after the year the survey represents.
# For example, the 2020 CPS was administered in March 2021, so it's
# named 2021.
file_year = int(self.time_period) + 1
file_year_code = str(file_year)[-2:]
CPS_URL_BY_YEAR = {
2020: "https://www2.census.gov/programs-surveys/cps/datasets/2021/march/asecpub21csv.zip",
2021: "https://www2.census.gov/programs-surveys/cps/datasets/2022/march/asecpub22csv.zip",
2022: "https://www2.census.gov/programs-surveys/cps/datasets/2023/march/asecpub23csv.zip",
}
if self.time_period not in CPS_URL_BY_YEAR:
raise ValueError(
f"No raw CPS data URL known for year {self.time_period}."
)
url = CPS_URL_BY_YEAR[self.time_period]
response = requests.get(url, stream=True)
total_size_in_bytes = int(
response.headers.get("content-length", 200e6)
)
progress_bar = tqdm(
total=total_size_in_bytes,
unit="iB",
unit_scale=True,
desc="Downloading ASEC",
)
if response.status_code == 404:
raise FileNotFoundError(
"Received a 404 response when fetching the data."
)
try:
with BytesIO() as file, pd.HDFStore(
self.file_path, mode="w"
) as storage:
content_length_actual = 0
for data in response.iter_content(int(1e6)):
progress_bar.update(len(data))
content_length_actual += len(data)
file.write(data)
progress_bar.set_description("Downloaded ASEC")
progress_bar.total = content_length_actual
progress_bar.close()
zipfile = ZipFile(file)
with zipfile.open(f"pppub{file_year_code}.csv") as f:
storage["person"] = pd.read_csv(
f,
usecols=PERSON_COLUMNS
+ SPM_UNIT_COLUMNS
+ TAX_UNIT_COLUMNS,
).fillna(0)
person = storage["person"]
with zipfile.open(f"ffpub{file_year_code}.csv") as f:
person_family_id = person.PH_SEQ * 10 + person.PF_SEQ
family = pd.read_csv(f).fillna(0)
family_id = family.FH_SEQ * 10 + family.FFPOS
family = family[family_id.isin(person_family_id)]
storage["family"] = family
with zipfile.open(f"hhpub{file_year_code}.csv") as f:
person_household_id = person.PH_SEQ
household = pd.read_csv(f).fillna(0)
household_id = household.H_SEQ
household = household[
household_id.isin(person_household_id)
]
storage["household"] = household
storage["tax_unit"] = RawCPS._create_tax_unit_table(person)
storage["spm_unit"] = RawCPS._create_spm_unit_table(person)
except Exception as e:
raise ValueError(
f"Attempted to extract and save the CSV files, but encountered an error: {e} (removed the intermediate dataset)."
) | [
567
] |
def METHOD_NAME(self):
"""
This is an OPTIONAL method that may be implemented.
Returns a map of key-value pairs that can be sent with the
SASL/OAUTHBEARER initial client request. If not implemented, the values
are ignored
This feature is only available in Kafka >= 2.1.0.
"""
return {} | [
583
] |
async def METHOD_NAME(self, resource_attributes: AttributeDict):
await self.nova.init_api(timeout=60)
response = await self.nova.servers.force_delete(
resource_attributes.remote_resource_uuid
)
logger.debug(f"{self.site_name} servers terminate returned {response}")
return response | [
1602,
191
] |
def METHOD_NAME(self, df: PandasData):
"""Creates placeholder column names for dataframes without column names.
Args:
df (pd.DataFrame):
Required. This is the dataframe to serialize.
"""
if isinstance(df.columns, pd.RangeIndex):
df.columns = [str(x) for x in df.columns]
self.restore_df_actions.append("remove_placeholder_col_names")
self.restore_df_actions_args.append([]) | [
129,
4126,
2618,
83
] |
def METHOD_NAME(self):
"""Test protection against MemoryError when replacing in RegexDetector"""
multiplier = 30
entity_name = 'abab'
tag = '__{}__'.format(entity_name)
pattern = '\\bab\\b'
text = ' '.join(['ab'] * multiplier)
regex_detector = RegexDetector(entity_name=entity_name, pattern=pattern)
expected_values = ['ab'] * multiplier
expected_original_texts = ['ab'] * multiplier
expected_tagged_text = ' '.join(['{t}'.format(t=tag)] * multiplier)
values, original_texts = regex_detector.detect_entity(text)
self.assertEqual(regex_detector.tagged_text, expected_tagged_text)
self.assertEqual(values, expected_values)
self.assertEqual(original_texts, expected_original_texts) | [
9,
2203,
369
] |
def METHOD_NAME():
"""Adds consumption and peak load to load areas for households"""
print("Add consumption and peak loads to load areas for households...")
execute_sql_script("loadareas_add_demand_hh.sql") | [
14059,
238,
6946,
-1
] |
METHOD_NAME(self, tree): | [
278
] |
def METHOD_NAME(seq1, seq2):
''' | [
5508,
445
] |
def METHOD_NAME(self, high):
max = 3 * len(high)
for word, n in zip(high, range(max, 0, -3)):
# print(word[0],word[1],n)
self.cards[10 ** n] = word[1] | [
0,
5020,
18017
] |
def METHOD_NAME():
check_production_install()
from rez.cli._main import run
return run("bind") | [
22,
2177,
287
] |
f METHOD_NAME(self): | [
9,
692,
294,
281
] |
def METHOD_NAME(self):
dirname = os.path.dirname(__file__)
self.m = Manticore(os.path.join(dirname, "binaries", "arguments_linux_amd64")) | [
0,
1
] |
def METHOD_NAME(client):
print("pinging node...")
ping_ok = False
for _i in range(10):
try:
subprocess.check_call(["ping", "-q", "-c1", "-w1", client])
ping_ok = True
break
except subprocess.CalledProcessError:
pass
if not ping_ok:
print("pinging node failed. aborting test.")
sys.exit(1)
else:
print("pinging node succeeded.")
return ping_ok | [
7160
] |
def METHOD_NAME(self):
for (addr, error, locs) in INVALID_DATA:
self.check_invalid(addr, error, locs)
for (addr, spk) in VALID_DATA:
self.check_valid(addr, spk) | [
9,
14670
] |
def METHOD_NAME(self):
self.assertIsInstance(self.es_list[1].__str__(), str)
self.assertTrue(" Is a metal: True" in self.es_list[1].__str__()) | [
9,
3
] |
def METHOD_NAME(self, hash, value):
if hash not in self.value_dict:
self.value_dict[hash] = value
else:
self.repeated_hash.append(hash) | [
86,
99,
553
] |
def METHOD_NAME(self):
import _testcapi
self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1,
fcntl.LOCK_SH) | [
9,
10052,
1482
] |
def METHOD_NAME(self, pytester: Pytester, pastebinlist) -> None:
from _pytest.pytester import LineMatcher
testpath = pytester.makepyfile(
"""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
"""
)
reprec = pytester.inline_run(testpath, "--pastebin=all", "-v")
assert reprec.countoutcomes() == [1, 1, 1]
assert len(pastebinlist) == 1
contents = pastebinlist[0].decode("utf-8")
matcher = LineMatcher(contents.splitlines())
matcher.fnmatch_lines(
[
"*test_pass PASSED*",
"*test_fail FAILED*",
"*test_skip SKIPPED*",
"*== 1 failed, 1 passed, 1 skipped in *",
]
) | [
9,
75
] |
def METHOD_NAME(self):
"""Test: Create inventory, background & no import."""
def test(client):
"""API test callback."""
progress = None
inventory_index = None
for progress in client.inventory.create(background=True,
import_as=''):
continue
while True:
# Give background running time to complete.
time.sleep(5)
if [x for x in client.inventory.list()]:
break
self.assertGreater(len([x for x in client.inventory.list()]),
0,
'Assert list not empty')
for inventory_index in client.inventory.list():
self.assertTrue(inventory_index.id == progress.id)
self.assertEqual(inventory_index.id,
(client.inventory.get(inventory_index.id)
.inventory.id))
self.assertEqual(inventory_index.id,
(client.inventory.delete(inventory_index.id)
.inventory.id))
self.assertEqual([], [i for i in client.inventory.list()])
with gcp_api_mocks.mock_gcp():
setup = create_tester()
setup.run(test) | [
9,
756,
2272
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME():
ap = argparse.ArgumentParser(description="ONL PKI Management")
ap.add_argument("--init", action='store_true', help="Initialize PKI files (if necessary)")
ap.add_argument("--regen-cert", action='store_true', help="Regenerate certificate.")
ap.add_argument("--force", "-f", action='store_true', help="Force regeneration of the key and certificate during initialization (--init)")
ap.add_argument("--quiet", "-q", action='store_true', help="Quiet output.")
ap.add_argument("--verbose", "-v", action='store_true', help="Verbose output.")
ops = ap.parse_args()
logging.basicConfig()
logger = logging.getLogger("PKI")
if ops.verbose:
logger.setLevel(logging.DEBUG)
elif ops.quiet:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.INFO)
pki = OnlPki(logger)
if ops.init:
pki.init_key(force=ops.force)
pki.init_cert(force=ops.force)
elif ops.regen_cert:
pki.init_cert(force=True) | [
57
] |
def METHOD_NAME():
n = 1024
l = 128
m = 235
bias = te.var("bias", dtype="float32")
A = te.placeholder((l,), name="A")
B = te.placeholder((m, l), name="B")
C = nnpack.fully_connected_inference(A, B)
D = te.compute(C.shape, lambda i: C[i] + bias, name="D")
s = te.create_schedule(D.op)
def verify(target="llvm"):
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
if not nnpack.is_available():
pytest.skip("nnpack is not available")
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target)
a = tvm.nd.array(np.random.uniform(size=(l)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(m, l)).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((m,), dtype=D.dtype), dev)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(d.numpy(), np.dot(a.numpy(), b.numpy().T) + bb, rtol=1e-5)
verify() | [
9,
495,
2261,
1748
] |
def METHOD_NAME(self):
for w in self._listbox.body:
w.data.clearcache()
super().METHOD_NAME() | [
537
] |
def METHOD_NAME(self):
try:
return redirect(self.get_success_url())
except NoReverseMatch:
return redirect(self.get_success_url(ignore_next=True)) | [
19,
1736
] |
def METHOD_NAME(v):
"""Returns an orthogonal vector to v
v -- geometry_msgs Vector3 or Point
returns the orthogonal vector of v
"""
normalized = normalize(v)
t = type(v)
x = abs(normalized.x)
y = abs(normalized.y)
z = abs(normalized.z)
basis = None
if x < y:
basis = t(1, 0, 0) if x < z else t(0, 0, 1)
else:
basis = t(0, 1, 0) if y < z else t(0, 0, 1)
return cross(normalized, basis) | [
5329
] |
def METHOD_NAME(ql: Qiling, key: int, msgflg: int):
def __create_msq(key: int, flags: int) -> int:
"""Create a new message queue for the specified key.
Returns: msqid of the newly created message queue, -1 if an error has occurred
"""
if len(ql.os.msq) >= MSGMNI:
return -1 # ENOSPC
mode = flags & ((1 << 9) - 1)
msqid = ql.os.msq.add(QlMsqId(key, ql.os.uid, ql.os.gid, mode))
ql.log.debug(f'created a new msg queue: key = {key:#x}, mode = 0{mode:o}. assigned id: {msqid:#x}')
return msqid
# create new message queue
if key == IPC_PRIVATE:
msqid = __create_msq(key, msgflg)
else:
msqid, msq = ql.os.msq.get_by_key(key)
# a message queue with the specified key does not exist
if msq is None:
# the user asked to create a new one?
if msgflg & IPC_CREAT:
msqid = __create_msq(key, msgflg)
else:
return -1 # ENOENT
# a message queue with the specified key exists
else:
# the user asked to create a new one?
if msgflg & (IPC_CREAT | IPC_EXCL):
return -1 # EEXIST
if __perms(ql, msq, msgflg):
return -1 # EACCES
return msqid | [
6509,
6830,
-1
] |
def METHOD_NAME(t):
if t is NaN:
return t
try:
return int(
LOCAL_ZONE.dst(datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=t // 1000)).seconds) * 1000
except:
warnings.warn(
'Invalid datetime date, assumed DST time, may be inaccurate...',
Warning)
return 1
#raise MakeError('TypeError', 'date not supported by python.datetime. I will solve it in future versions') | [
1167,
6837,
6838
] |
def METHOD_NAME(_dict, chunk_size=1):
dict_size = len(_dict)
if dict_size <= chunk_size:
return [_dict]
chunks = []
chunk = {}
counter = 0
keys = list(_dict.keys())
for key in keys:
chunk[key] = _dict.pop(key)
counter += 1
if counter == chunk_size:
chunks.append(chunk)
counter = 0
chunk = {}
if len(chunk):
chunks.append(chunk)
return chunks | [
265,
553
] |
def METHOD_NAME(self, slider_val):
self._check_max_and_min()
return self.min + slider_val * self._step_size() | [
197,
280,
4684
] |
def METHOD_NAME(insights_config, insights_client):
insights_config.return_value.load_all.return_value.content_type = 'bz2'
insights_config.return_value.load_all.return_value.payload = 'testct.tgz'
try:
collect_and_output()
except SystemExit:
pass
insights_client.return_value.collect.assert_not_called()
insights_client.return_value.upload.assert_not_called() | [
9,
1444,
61,
146,
955,
288,
44
] |
def METHOD_NAME(framework, schema_name, pattern, data_name, value):
"""
Tests behavior of pattern slots.
Pattern slots allow for regular expression constraints.
Currently not supported for validation by python frameworks.
:param framework: not supported by python frameworks
:param schema_name: the name reflects which constraints are implementd
:param pattern: regular expression
:param value: value to check
:return:
"""
classes = {
CLASS_C: {
"attributes": {
SLOT_S1: {
"pattern": pattern,
},
}
}
}
schema = validated_schema(
METHOD_NAME, schema_name, framework, classes=classes, core_elements=["pattern"]
)
implementation_status = ValidationBehavior.IMPLEMENTS
is_valid = bool(re.match(pattern, value))
if framework in [PYDANTIC, PYTHON_DATACLASSES, SQL_DDL_SQLITE]:
if not is_valid:
implementation_status = ValidationBehavior.INCOMPLETE
check_data(
schema,
data_name,
framework,
{SLOT_S1: value},
is_valid,
expected_behavior=implementation_status,
target_class=CLASS_C,
description=f"matching {value} to {pattern} expected={is_valid}",
) | [
9,
652
] |
def METHOD_NAME(self):
"""
Print the layer information.
"""
print(self._layer) | [
697
] |
def METHOD_NAME(gitlab_cli, project, format, loader):
cmd = ["-o", format, "project", "get", "--id", project.id]
ret = gitlab_cli(cmd)
assert ret.success
content = loader(ret.stdout.strip())
assert content["id"] == project.id | [
9,
615,
52
] |
def METHOD_NAME(self):
self.logIt("Copying scim.war into jetty webapps folder...")
self.installJettyService(self.jetty_app_configuration[self.service_name], True)
jettyServiceWebapps = os.path.join(self.jetty_base, self.service_name, 'webapps')
self.copyFile(self.source_files[0][0], jettyServiceWebapps)
self.enable() | [
428
] |
def METHOD_NAME(self):
"""
Test that theme title is used instead of parent title.
"""
self._login()
dashboard_url = reverse('dashboard')
resp = self.client.get(dashboard_url)
assert resp.status_code == 200
# This string comes from the 'pagetitle' block of the overriding theme.
self.assertContains(resp, "Overridden Title!") | [
9,
345,
573,
623,
935
] |
def METHOD_NAME(self):
self.assertEqual(
controller.get_cloud(),
self.cloud)
self.Controller_mock.get_cloud.assert_called_once() | [
9,
19,
4054
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.