text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
a = InlineQueryResultAudio(self.id_, self.audio_url, self.title)
b = InlineQueryResultAudio(self.id_, self.title, self.title)
c = InlineQueryResultAudio(self.id_, "", self.title)
d = InlineQueryResultAudio("", self.audio_url, self.title)
e = InlineQueryResultVoice(self.id_, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e) | [
9,
1392
] |
def METHOD_NAME(data):
'''Populates the data field of execute with context information if
not present.'''
if data is None:
data = {}
if '_context' not in data:
data['_context'] = Window.get_foreground()
return data | [
602,
2046,
198
] |
def METHOD_NAME(self, request: HttpRequest, *, stream: bool = False, **kwargs) -> PipelineResponse:
return self._client._pipeline.run(request, stream=stream, **kwargs) # pylint: disable=protected-access | [
353,
377
] |
def METHOD_NAME():
"""In addition to stop tracking all modules it also clears the tracking flag for all individual modules."""
global _trackAll
global _trackModule
_trackAll = False
_trackModule = {} | [
10330,
75,
468
] |
def METHOD_NAME(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
else:
break
return _sentinel | [
250,
2
] |
def METHOD_NAME(self, u, x, y, q, n_sersic, R_sersic, keff, centerx, centery):
ellip_coord = self._elliptical_coord_u(x, y, u, q)
def_angle_circular = self._sersic.alpha_abs(
ellip_coord, 0, n_sersic, R_sersic, keff, centerx, centery
)
return (
ellip_coord * def_angle_circular * (1 - (1 - q**2) * u) ** -0.5 * u**-1
) | [
6797,
497
] |
def METHOD_NAME(self):
if self.release() is None:
release = None
else:
release = self.release()
return rpmquery.RpmQuery.filename(self.name(), None, self.version(), release, self.arch()) | [
-1
] |
def METHOD_NAME(cls, column_list, **kwargs):
columns_to_sum = column_list[0:-1]
sqlalchemy_columns_to_sum = columns_to_sum[0]
if len(columns_to_sum) > 1:
for column in columns_to_sum[1:]:
sqlalchemy_columns_to_sum += column
column_to_equal = column_list[-1]
return sqlalchemy_columns_to_sum == column_to_equal | [
4267
] |
def METHOD_NAME(self, label):
for i in range(50):
resp = self.kphp_server.http_post(uri="/test_shared_memory_piece_in_response", json={
"label": label,
})
self.assertEqual(resp.status_code, 200)
for ans in resp.json()['jobs-result']:
self.assertEqual(ans, 42) | [
9,
1644,
1645,
10399,
8066,
2581
] |
def METHOD_NAME(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, double_iter)):
float_tensors, float_tensor_params, float_params = float_values
int_tensors, int_tensor_params, int_params = int_values
assert_dicts_equal(float_params, int_params)
assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"])
float_measurement, int_measurement = (
Timer(
stmt,
globals=tensors,
).blocked_autorange(min_run_time=_MEASURE_TIME)
for tensors in (float_tensors, int_tensors)
)
descriptions = []
for name in float_tensors:
shape_str = "(" + ", ".join([
f"2 ** {int(np.log2(i))}"
if 2 ** int(np.log2(i)) == i and i > 1
else str(i)
for i in float_tensors[name].shape
]) + ")"
sparse_dim = float_tensor_params[name]["sparse_dim"]
sparse_dim_str = str(sparse_dim)
is_coalesced = float_tensor_params[name]["is_coalesced"]
is_coalesced_str = "True" if is_coalesced else "False"
descriptions.append((name, shape_str, sparse_dim_str, is_coalesced_str))
raw_results.append((float_measurement, int_measurement, descriptions))
print(f"\r{i + 1} / {n}", end="")
print()
parsed_results, name_len, shape_len, sparse_dim_len, is_coalesced_len = [], 0, 0, 0, 0
for float_measurement, int_measurement, descriptions in raw_results:
t_float = float_measurement.median * 1e6
t_int = int_measurement.median * 1e6
rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2
parsed_results.append((t_float, t_int, rel_diff, descriptions))
for name, shape, sparse_dim, is_coalesced in descriptions:
name_len = max(name_len, len(name))
shape_len = max(shape_len, len(shape))
sparse_dim_len = max(sparse_dim_len, len(sparse_dim))
is_coalesced_len = max(is_coalesced_len, len(is_coalesced))
parsed_results.sort(key=lambda x: x[2])
print(f"stmt: {stmt}")
print(f" diff faster{'':>17}{' ' * name_len} ", end="")
print(f"{'shape'.ljust(shape_len)}{'':>12}{'sparse_dim'.ljust(sparse_dim_len)}", end="")
print(f" is_coalesced\n{'-' * 100}")
for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]:
for t_float, t_int, rel_diff, descriptions in results:
time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"]
time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]])
for t_str, (name, shape, sparse_dim, is_coalesced) in zip(time_str, descriptions):
name = f"{name}:".ljust(name_len + 1)
shape = shape.ljust(shape_len + 10)
sparse_dim = sparse_dim.ljust(sparse_dim_len)
print(f"{t_str} {name} {shape}| {sparse_dim} | {is_coalesced}")
print(spacer) | [
22
] |
def METHOD_NAME(self):
s11 = self.app.data.s11
maximums = sorted(
at.maxima([d.impedance().real for d in s11], threshold=500)
)
extended_data = {}
logger.info("TO DO: find near data")
for lowest in self.crossings:
my_data = self._get_data(lowest)
if lowest in extended_data:
extended_data[lowest].update(my_data)
else:
extended_data[lowest] = my_data
logger.debug("maximumx %s of type %s", maximums, type(maximums))
for m in maximums:
logger.debug("m %s of type %s", m, type(m))
my_data = self._get_data(m)
if m in extended_data:
extended_data[m].update(my_data)
else:
extended_data[m] = my_data
fields = [
("freq", format_frequency_short),
("r", format_resistence_neg),
("lambda", lambda x: round(x, 2)),
]
if self.old_data:
diff = self.compare(self.old_data[-1], extended_data, fields=fields)
else:
diff = self.compare({}, extended_data, fields=fields)
self.old_data.append(extended_data)
for i, idx in enumerate(sorted(extended_data.keys())):
self.layout.addRow(
f"{format_frequency_short(s11[idx].freq)}",
QtWidgets.QLabel(
f" ({diff[i]['freq']})"
f" {format_complex_imp(s11[idx].impedance())}"
f" ({diff[i]['r']}) {diff[i]['lambda']} m"
),
)
if self.filename and extended_data:
with open(
self.filename, "w", newline="", encoding="utf-8"
) as csvfile:
fieldnames = extended_data[
sorted(extended_data.keys())[0]
].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for idx in sorted(extended_data.keys()):
writer.writerow(extended_data[idx]) | [
74,
10516,
689
] |
def METHOD_NAME(self):
from scipy import special
y = np.linspace(-1.0, 1.0, num=10)
x = special.erfinv(y)
y2 = jt.array(y)
x2 = jt.erfinv(y2)
np.testing.assert_allclose(y.data, y2.data)
y = np.linspace(-0.9, 0.9, num=10)
x = special.erfinv(y)
y2 = jt.array(y)
x2 = jt.erfinv(y2)
np.testing.assert_allclose(y.data, y2.data)
d = jt.grad(x2, y2)
_, (dn,) = ngrad(lambda y: special.erfinv(y).sum(), [y], 1e-8)
tol = 1e-3 if jt.flags.amp_reg & 2 else 1e-6
np.testing.assert_allclose(d.data, dn, atol=tol, rtol=tol) | [
9,
11885
] |
def METHOD_NAME(self):
"""Allows to connect mesh input to the operator.
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.wireframe()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh | [
1949
] |
def METHOD_NAME(self):
return not hasattr(self._connections, 'conn') or \
self._connections.conn() is None or \
self._connections.conn().METHOD_NAME | [
4703
] |
def METHOD_NAME(request, feature_flag_name):
"""
Adapt feature_flags.flag_is_active() to be used as a request method.
This enables things to call request.feature("my_feature") and it'll
call feature_flags.flag_is_active("my_feature").
"""
return feature_flags.flag_is_active(request, feature_flag_name) | [
964
] |
def METHOD_NAME(
model_card: Optional[Model] = None,
metadata: Optional[dict] = None,
minimal_metadata_path: Optional[str] = None,
binary_file: Optional[str] = None,
code_file: Optional[str] = None,
):
"""Validate the model and files provided for upload
Args:
model_card: Model card of the model to update. Defaults to None.
metadata: Metadata required for uploading a new model. Must
match the minimal metadata. Defaults to None.
minimal_metadata_path: something
binary_file: File path to model binary. Defaults to None.
code_file: File path to model code. Defaults to None.
Raises:
DataInvalid: Invalid model
DataInvalid: Binary or code file does not exist
InvalidMetadata: Metadata does not meet the minimal metadata
"""
if model_card:
validate_model_card(model_card)
if metadata:
validate_metadata(metadata, minimal_metadata_path)
if binary_file and code_file:
validate_file_paths(binary_file, code_file) | [
187,
3349
] |
def METHOD_NAME(info, image_id, class_names):
image_file = info['image_file']
single_image = dict()
single_image['file_name'] = os.path.split(image_file)[-1]
single_image['id'] = image_id
image = cv2.imread(image_file)
height, width, _ = image.shape
single_image['width'] = width
single_image['height'] = height
# process annotation field
single_objs = []
objects = info['annotation']
for obj in objects:
poly, name, difficult = obj['poly'], obj['name'], obj['difficult']
if difficult == '2':
continue
single_obj = dict()
single_obj['category_id'] = class_names.index(name) + 1
single_obj['segmentation'] = [poly]
single_obj['iscrowd'] = 0
xmin, ymin, xmax, ymax = min(poly[0::2]), min(poly[1::2]), max(poly[
0::2]), max(poly[1::2])
width, height = xmax - xmin, ymax - ymin
single_obj['bbox'] = [xmin, ymin, width, height]
single_obj['area'] = height * width
single_obj['image_id'] = image_id
single_objs.append(single_obj)
return (single_image, single_objs) | [
356,
97,
734
] |
def METHOD_NAME(timeseries, units):
b = Beaching(active_range, units, timeseries, name='test_beaching')
assert b.name == 'test_beaching'
assert b.units == units
assert b.active_range[0] == active_range[0]
assert b.timeseries[-1][0] == b.active_range[1]
ts = np.asarray(timeseries, dtype=datetime_value_1d)
assert all(b.timeseries['time'] == ts['time'])
assert all(b.timeseries['value'] == ts['value']) | [
9,
176
] |
def METHOD_NAME(sampler, machine, parameters, state):
pdf = jnp.absolute(
to_array(sampler.hilbert, machine.apply, parameters) ** sampler.machine_pow
)
pdf = pdf / pdf.sum()
return state.replace(pdf=pdf) | [
656
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The type of identity that last modified the resource
"""
return pulumi.get(self, "last_modified_by_type") | [
679,
680,
604,
44
] |
def METHOD_NAME(self):
return self.template_name | [
19,
671,
83
] |
async def METHOD_NAME(
model_id: str,
currency: str = "USD",
):
wallet = await utils.database.get_object(models.Wallet, model_id)
rate = await utils.wallets.get_rate(wallet, currency.upper(), extra_fallback=False)
if math.isnan(rate):
raise HTTPException(422, "Unsupported fiat currency")
return rate | [
19,
2945,
1585
] |
def METHOD_NAME(inputs: Optional['InputType'] = None, **kwargs) -> None:
"""Validate the inputs and print the first request if success.
:param inputs: the inputs
:param kwargs: keyword arguments
"""
if inputs is None:
# empty inputs is considered as valid
return
if hasattr(inputs, '__call__'):
# it is a function
inputs = inputs()
kwargs['data'] = inputs
kwargs['exec_endpoint'] = '/'
if inspect.isasyncgenfunction(inputs) or inspect.isasyncgen(inputs):
raise BadClientInput(
'checking the validity of an async generator is not implemented yet'
)
try:
from jina.clients.request import request_generator
r = next(request_generator(**kwargs))
from jina.types.request import Request
if not isinstance(r, Request):
raise TypeError(f'{typename(r)} is not a valid Request')
except Exception as ex:
default_logger.error(f'inputs is not valid!')
raise BadClientInput from ex | [
250,
362
] |
def METHOD_NAME(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt | [
447,
80,
600
] |
def METHOD_NAME(*, repo_tag):
try:
result = _run_docker_command(
"image", "inspect", "--format", "{{json .}}", repo_tag
)
return json.loads(result.stdout)
except CalledProcessError as error:
if ": No such image" in error.stderr:
raise ObjectDoesNotExist from error
else:
raise | [
225,
660
] |
def METHOD_NAME(dictionary):
"""Test the equality comparison for the ``Dict`` type.
A node should compare equal to a the plain dictionary that has the same value, as well as any other ``Dict`` node
that has the same content. For context, the discussion on whether to compare nodes by content was started in the
following issue:
https://github.com/aiidateam/aiida-core/issues/1917
A summary and the final conclusion can be found in this discussion:
https://github.com/aiidateam/aiida-core/discussions/5187
"""
different_dict = {'I': {'am': 'different'}}
node = Dict(dictionary)
different_node = Dict(different_dict)
clone = Dict(dictionary)
# Test equality comparison with Python base type
assert node == dictionary
assert node != different_dict
# Test equality comparison between `Dict` nodes
assert node is node # pylint: disable=comparison-with-itself
assert node == clone
assert node != different_node | [
9,
1392
] |
def METHOD_NAME(self, file_path: str, error_msg: str) -> None:
"""
Writes error message to the file.
"""
try:
with open(file_path, "w") as fp:
fp.write(error_msg)
except Exception as e:
warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}") | [
77,
168,
171
] |
def METHOD_NAME(self):
return self._async_queue("get_limit", "table_method") | [
19,
1467
] |
def METHOD_NAME(self):
self._is_showed = False
self._timer_stopped = False
self._refresh_context()
self.hide() | [
1462,
706
] |
def METHOD_NAME(self, value):
qs = Team.objects.filter(slug=value, organization=self.instance.organization).exclude(
id=self.instance.id
)
if qs.exists():
raise serializers.ValidationError(f'The slug "{value}" is already in use.')
super().METHOD_NAME(value)
return value | [
187,
1231
] |
def METHOD_NAME(parent=None):
randomNumber = QtCore.QRandomGenerator.global_().bounded(0, 100) + 1
if randomNumber < 50:
if randomNumber < 33:
return MyWidget1(parent)
return MyWidget3(parent)
return MyWidget2(parent) | [
80,
1192,
706
] |
def METHOD_NAME(path: str) -> str:
"""Turn a path string (task 19) from the original format 's,w' to a verbal model-friendly format 'south west'"""
steps: List[str] = path.split(",")
directions = {"s": "south", "n": "north", "e": "east", "w": "west"}
path = " ".join([directions[step] for step in steps])
return path | [
356,
157
] |
def METHOD_NAME(self, data):
def boolcheck(lst):
return tuple([elem for elem in lst if elem is not None])
category_mapping = {
obj["@id"]: boolcheck(
obj.get("category", {}).get("categoryPath", [])
+ [obj.get("category", {}).get("name")]
+ [obj["name"]]
)
for obj in data["categories"].values()
}
return [
{
"code": obj["@id"],
"name": obj["name"],
"categories": category_mapping[obj["category"]["@id"]],
"location": obj["location"]["name"] if "location" in obj else None,
"exchanges": [],
"unit": "",
"type": "product",
}
for obj in data["flows"].values()
if obj["flowType"] == "PRODUCT_FLOW"
] | [
7923,
947,
4866
] |
def METHOD_NAME():
return {
"name": {
"preferred_name": "John Doe"
},
"bai": "John.Doe.1"
} | [
365,
654,
7723
] |
def METHOD_NAME(sheet):
ridx = sheet.visibleRowAtY(sheet.mouseY)
if ridx is not None:
sheet.cursorRowIndex = ridx
cidx = sheet.visibleColAtX(sheet.mouseX)
if cidx is not None:
sheet.cursorVisibleColIndex = cidx | [
1515,
2571
] |
def METHOD_NAME(self):
"""
Removes all temporary files created during the scheme execution
"""
self._serializer.METHOD_NAME() | [
1356,
1
] |
def METHOD_NAME(self):
"""The group containing this process."""
return self._group | [
846
] |
def METHOD_NAME(mock_get_subparser):
mock_subparsers = mock.Mock()
sysdig.add_subparser(mock_subparsers)
assert mock_get_subparser.called | [
9,
238,
3509
] |
def METHOD_NAME (self, objects):
""" Given a list of objects, reorder them so that the constains specified
by 'add_pair' are satisfied.
The algorithm was adopted from an awk script by Nikita Youshchenko
(yoush at cs dot msu dot su)
"""
# The algorithm used is the same is standard transitive closure,
# except that we're not keeping in-degree for all vertices, but
# rather removing edges.
result = []
if not objects:
return result
constraints = self.__eliminate_unused_constraits (objects)
# Find some library that nobody depends upon and add it to
# the 'result' array.
obj = None
while objects:
new_objects = []
while objects:
obj = objects [0]
if self.__has_no_dependents (obj, constraints):
# Emulate break ;
new_objects.extend (objects [1:])
objects = []
else:
new_objects.append (obj)
obj = None
objects = objects [1:]
if not obj:
raise BaseException ("Circular order dependencies")
# No problem with placing first.
result.append (obj)
# Remove all contains where 'obj' comes first,
# since they are already satisfied.
constraints = self.__remove_satisfied (constraints, obj)
# Add the remaining objects for further processing
# on the next iteration
objects = new_objects
return result | [
852
] |
def METHOD_NAME(x: c64) -> c64:
pass | [
4743,
-1
] |
def METHOD_NAME(ocIRI, osIRI):
query = """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX oc: <http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
PREFIX os: <http://www.theworldavatar.com/ontology/ontospecies/OntoSpecies.owl#>
PREFIX gc: <http://purl.org/gc/>
SELECT ?spin_mult ?frequencies ?rot_constants ?sym_number
WHERE {
<#ocIRI#> oc:hasUniqueSpecies <#osIRI#> ;
gc:isCalculationOn ?geomOpt ;
gc:isCalculationOn ?geomType ;
oc:hasInitialization ?init .
?init a oc:InitializationModule ;
gc:hasParameter ?method ;
gc:hasParameter ?basisSet .
?geomOpt a gc:GeometryOptimization ;
gc:hasMolecule ?mol .
?mol oc:hasSpinMultiplicity ?spin_mult .
?geomType a oc:GeometryType .
?geomType oc:hasGeometryType ?geomTypeValue .
OPTIONAL {
<#ocIRI#> gc:isCalculationOn ?vibAnal ;
gc:isCalculationOn ?rotConsts ;
gc:isCalculationOn ?rotSym .
?vibAnal a gc:VibrationalAnalysis ;
gc:hasResult ?freqResult .
?freqResult oc:hasFrequencies ?frequencies .
?rotConsts a oc:RotationalConstants ;
oc:hasRotationalConstants ?rot_constants .
?rotSym a oc:RotationalSymmetry ;
oc:hasRotationalSymmetryNumber ?sym_number .
}
}""".replace('#ocIRI#',ocIRI).replace('#osIRI#',osIRI)
return query | [
14809,
365,
539
] |
def METHOD_NAME(connector: str, test_type: str) -> MetadataWorkflow:
config_file = Path(
PATH_TO_RESOURCES + f"/{test_type}/{connector}/{connector}.yaml"
)
config_dict = load_config_file(config_file)
return MetadataWorkflow.create(config_dict) | [
19,
3855
] |
def METHOD_NAME(df):
pd.testing.assert_frame_equal(describe(df), Description(df).frame) | [
9,
2517
] |
def METHOD_NAME(cls, value: int) -> str:
try:
return codes(value).phrase # type: ignore
except ValueError:
return "" | [
19,
2293,
4576
] |
def METHOD_NAME(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=typeCallTransformer(), | [
1112,
1537
] |
def METHOD_NAME():
xarray_obj = xr.DataArray([1, 2, 3])
# xarray out arguments should raise
with pytest.raises(NotImplementedError, match=r"`out` argument"):
np.add(xarray_obj, 1, out=xarray_obj)
# but non-xarray should be OK
other = np.zeros((3,))
np.add(other, xarray_obj, out=other)
assert_identical(other, np.array([1, 2, 3])) | [
9,
1737
] |
def METHOD_NAME(self):
return "GET" | [
103
] |
def METHOD_NAME(
root: ProjectPackage, provider: Provider, repo: Repository
) -> None:
root.add_dependency(Factory.create_dependency("foo", "<=1.0.2"))
root.add_dependency(Factory.create_dependency("bar", "1.0.0"))
add_to_repo(repo, "foo", "1.0.0")
add_to_repo(repo, "foo", "1.0.1", deps={"bang": "1.0.0"})
add_to_repo(repo, "foo", "1.0.2", deps={"whoop": "1.0.0"})
add_to_repo(repo, "foo", "1.0.3", deps={"zoop": "1.0.0"})
add_to_repo(repo, "bar", "1.0.0", deps={"foo": "<=1.0.1"})
add_to_repo(repo, "bang", "1.0.0")
add_to_repo(repo, "whoop", "1.0.0")
add_to_repo(repo, "zoop", "1.0.0")
check_solver_result(
root, provider, {"foo": "1.0.1", "bar": "1.0.0", "bang": "1.0.0"}
) | [
9,
1644,
2913,
5337,
1604,
281,
10085
] |
def METHOD_NAME(matrix: npt.NDArray) -> np.ndarray:
"""
Finds the original vector from its skew-symmetric cross product. Finds the
reverse of :meth:`mil_tools.skew_symmetric_cross`.
Args:
matrix (List[float]): The matrix to find the original vector from.
Return:
numpy.typing.NDArray: The original vector.
"""
return np.array([matrix[2, 1], matrix[0, 2], matrix[1, 0]], dtype=np.float32) | [
-1
] |
def METHOD_NAME(logger_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLoggerResult]:
"""
Gets the details of the logger specified by its identifier.
:param str logger_id: Logger identifier. Must be unique in the API Management service instance.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
... | [
19,
2034,
146
] |
def METHOD_NAME(self):
return self._init_opts | [
176,
2766
] |
def METHOD_NAME(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor:
"""
Tracing friendly way to cast tensor to another tensor's device. Device will be treated
as constant during tracing, scripting the casting process as whole can workaround this issue.
"""
return src.to(dst.device) | [
132,
398,
2307
] |
def METHOD_NAME(self, errorKey):
return 0 | [
1462
] |
def METHOD_NAME(filters=None):
if not filters:
filters = {}
float_precision = frappe.db.get_default("float_precision")
avg_daily_outgoing = 0
diff = ((getdate(filters.get("to_date")) - getdate(filters.get("from_date"))).days) + 1
if diff <= 0:
frappe.throw(_("'From Date' must be after 'To Date'"))
columns = get_columns()
items = get_item_info(filters)
consumed_item_map = get_consumed_items(filters)
delivered_item_map = get_delivered_items(filters)
data = []
for item in items:
total_outgoing = flt(consumed_item_map.get(item.name, 0)) + flt(
delivered_item_map.get(item.name, 0)
)
avg_daily_outgoing = flt(total_outgoing / diff, float_precision)
reorder_level = (avg_daily_outgoing * flt(item.lead_time_days)) + flt(item.safety_stock)
data.append(
[
item.name,
item.item_name,
item.item_group,
item.brand,
item.description,
item.safety_stock,
item.lead_time_days,
consumed_item_map.get(item.name, 0),
delivered_item_map.get(item.name, 0),
total_outgoing,
avg_daily_outgoing,
reorder_level,
]
)
return columns, data | [
750
] |
def METHOD_NAME(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return Fals | [
-1
] |
def METHOD_NAME():
bind = op.get_bind()
session = db.Session(bind=bind)
for chart in session.query(Slice):
params = json.loads(chart.params or "{}")
if "time_compare" in params or "comparison_type" in params:
params.pop("time_compare", None)
params.pop("comparison_type", None)
chart.params = json.dumps(params, sort_keys=True)
session.commit()
session.close() | [
1502
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.METHOD_NAME(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed) | [
34,
3935
] |
def METHOD_NAME(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
config_overrides = {
"interface": "127.0.0.1",
}
factory = salt_factories.salt_master_daemon(
"mm-master-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=info"],
)
with factory.started(start_timeout=120):
yield factory | [
2229,
3074,
2614,
1170
] |
def METHOD_NAME(mar_file_path, model_store, torchserve):
"""
Register the model in torchserve
"""
shutil.copy(mar_file_path, model_store)
file_name = Path(mar_file_path).name
model_name = Path(file_name).stem
test_utils.reg_resp = test_utils.METHOD_NAME(model_name, file_name)
yield model_name
test_utils.unregister_model(model_name) | [
372,
578
] |
def METHOD_NAME(self, area: QtCore.Qt.DockWidgetArea) -> None:
"""
Adjust tab icons to always orient upward on dock area move.
"""
if area == QtCore.Qt.LeftDockWidgetArea:
tab_pos = QtWidgets.QTabWidget.East
else:
tab_pos = QtWidgets.QTabWidget.West
self.tabs.setTabPosition(tab_pos)
# Rotate tab icons so they are always oriented upward
for tab_idx in range(self.tabs.count()):
widget = self.tabs.widget(tab_idx)
# Get previously stored, un-rotated, icon from widget
icon = self._tab_icons.get(id(widget))
if icon is not None:
upright_icon = self._rotate_icon(icon, tab_pos)
self.tabs.setTabIcon(tab_idx, upright_icon) | [
69,
5134,
708,
1180
] |
f METHOD_NAME(self, style=None): | [
0,
641
] |
def METHOD_NAME(self):
args = [
self.define("BUILD_UNIT_TESTS", self.run_tests),
self.define_from_variant("BUILD_PYTHON_MODULE", "python"),
self.define_from_variant("BUILD_CUDA_MODULE", "cuda"),
# https://github.com/isl-org/Open3D/issues/4570
# self.define('BUILD_FILAMENT_FROM_SOURCE', 'ON'),
# Use Spack-installed dependencies instead of vendored dependencies
# Numerous issues with using externally installed dependencies:
# https://github.com/isl-org/Open3D/issues/4333
# https://github.com/isl-org/Open3D/issues/4360
self.define("USE_SYSTEM_EIGEN3", True),
self.define("USE_SYSTEM_FLANN", True),
self.define("USE_SYSTEM_FMT", True),
self.define("USE_SYSTEM_GLEW", True),
self.define("USE_SYSTEM_GLFW", True),
# self.define('USE_SYSTEM_IMGUI', True),
self.define("USE_SYSTEM_JPEG", True),
# self.define('USE_SYSTEM_LIBLZF', True),
self.define("USE_SYSTEM_PNG", True),
self.define("USE_SYSTEM_PYBIND11", True),
self.define("USE_SYSTEM_QHULL", True),
# self.define('USE_SYSTEM_TINYGLTF', True),
# self.define('USE_SYSTEM_TINYOBJLOADER', True),
]
if "+python" in self.spec:
args.append(self.define("PYTHON_EXECUTABLE", self.spec["python"].command.path))
return args | [
334,
335
] |
def METHOD_NAME(c):
return (
not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c)
) | [
137,
3296,
2587,
768
] |
def METHOD_NAME(self):
self.client.login(
username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility
) | [
0,
1
] |
def METHOD_NAME(task):
"""Try to start a task. This only succeeds if the task hasn't already
run, and no jobs are currently running that is excluded by the task."""
if jobs[task].state != 'waiting':
return
if all(is_not_running(i) for i in jobs[task].exclude):
jobs[task].state = 'running'
key, job = jobs[task].key, jobs[task].job
job_sink.send((key, job)) | [
1365,
24,
447
] |
def METHOD_NAME(opts):
"""
Prepare a connection to the Splunk HTTP event collector.
"""
http_event_collector_key = opts["token"]
http_event_collector_host = opts["indexer"]
http_event_collector_verify_ssl = opts["verify_ssl"]
# Return the collector
return http_event_collector(
http_event_collector_key,
http_event_collector_host,
verify_ssl=http_event_collector_verify_ssl,
) | [
129,
721,
417,
4523
] |
def METHOD_NAME(self):
with self.assertRaisesRegex(
EnvironmentError,
"hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack",
):
_ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model") | [
9,
578,
171,
130,
622
] |
def METHOD_NAME(target_indexes: TargetIndexes, data: NamedPoints) -> IndexedPoints:
"""Return an indexed version of the given data (arcs or points)."""
results: IndexedPoints = {}
for path, points in data.items():
result_points = results[path] = {}
for point, target_names in points.items():
result_point = result_points[point] = set()
for target_name in target_names:
result_point.add(get_target_index(target_name, target_indexes))
return results | [
567,
1882
] |
def METHOD_NAME(self, obj):
return RoleSerializer(obj.role).data | [
19,
1018
] |
def METHOD_NAME(self, request, *args, **kwargs):
q = request.GET.METHOD_NAME("q", None)
stype = request.GET.METHOD_NAME("type", None)
manuscript = request.GET.METHOD_NAME("manuscript", None)
rows = request.GET.METHOD_NAME("rows", "100")
start = request.GET.METHOD_NAME("start", "0")
# Give a 400 if there's a notation exception, and let
# anything else give a 500
results = self.do_query(manuscript, stype, q)
return Response({"numFound": len(results), "results": results}) | [
19
] |
def METHOD_NAME(state_name):
"""
Test - latest_active when a new kernel is available
"""
reboot = MagicMock(return_value=True)
latest = MagicMock(return_value=1)
with patch.dict(
kernelpkg.__salt__,
{"kernelpkg.needs_reboot": reboot, "kernelpkg.latest_installed": latest},
), patch.dict(kernelpkg.__opts__, {"test": False}):
kernelpkg.__salt__["system.reboot"].reset_mock()
ret = kernelpkg.latest_active(name=state_name)
assert ret["name"] == state_name
assert ret["result"]
assert isinstance(ret["changes"], dict)
assert isinstance(ret["comment"], str)
kernelpkg.__salt__["system.reboot"].assert_called_once()
with patch.dict(kernelpkg.__opts__, {"test": True}):
kernelpkg.__salt__["system.reboot"].reset_mock()
ret = kernelpkg.latest_active(name=state_name)
assert ret["name"] == state_name
assert ret["result"] is None
assert ret["changes"] == {"kernel": {"new": 1, "old": 0}}
assert isinstance(ret["comment"], str)
kernelpkg.__salt__["system.reboot"].assert_not_called() | [
9,
893,
923,
41,
1103
] |
def METHOD_NAME(
native_type: str,
sqla_type: type[types.TypeEngine],
attrs: Optional[dict[str, Any]],
generic_type: GenericDataType,
is_dttm: bool,
) -> None:
from superset.db_engine_specs.presto import PrestoEngineSpec as spec
assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) | [
9,
19,
105,
1457
] |
def METHOD_NAME(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or []) | [
74,
750
] |
def METHOD_NAME(self):
x = ModelOutputTest(a=30)
x["a"] = 10
self.assertEqual(x.a, 10)
self.assertEqual(x["a"], 10) | [
9,
0,
219
] |
def METHOD_NAME(df, encoding):
try:
x_values, y_values = GraphBase._get_x_y_values_aggregated(
df, encoding.x, encoding.y, encoding.y_aggregation
)
except ValueError:
x_values = GraphBase._get_x_values(df, encoding)
y_values = GraphBase._get_y_values(df, encoding)
return x_values, y_values | [
19,
1104,
320,
199
] |
def METHOD_NAME(self, prefix):
"""
Args:
prefix (str): an address prefix under which to look for leaves
Returns:
dict of str,bytes: the state entries at the leaves
"""
return self._tree.METHOD_NAME(prefix) | [
6363
] |
def METHOD_NAME(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup() | [
531,
481
] |
def METHOD_NAME(self):
clone_clf = clone(self.clf)
clone_clf = clone(self.clf_ae) | [
9,
578,
670
] |
def METHOD_NAME(result, target, delta=0):
"""Compare counts dictionary to targets."""
# Don't use get_counts method which converts hex
output = result.data(0)["counts"]
assertDictAlmostEqual(output, target, delta=delta) | [
979,
2496
] |
def METHOD_NAME(self, i=None):
A = self.A[i]
return A | [
19,
690,
604,
1042,
724
] |
def METHOD_NAME(self) -> dict[str, dict[str, EncryptedType]]:
"""
Iterates over SqlAlchemy's metadata, looking for EncryptedType
columns along the way. Builds up a dict of
table_name -> dict of col_name: enc type instance
:return:
"""
meta_info: dict[str, Any] = {}
for table_name, table in self._db.metadata.tables.items():
for col_name, col in table.columns.items():
if isinstance(col.type, EncryptedType):
cols = meta_info.get(table_name, {})
cols[col_name] = col.type
meta_info[table_name] = cols
return meta_info | [
1765,
9213,
342
] |
def METHOD_NAME(
graphene_info: "ResolveInfo",
external_pipeline: ExternalJob,
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
) -> ExternalExecutionPlan:
return graphene_info.context.get_external_execution_plan(
external_job=external_pipeline,
run_config=run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
) | [
19,
751,
2046,
145,
894,
241
] |
def METHOD_NAME( filename ):
if IsHeaderFile( filename ):
basename = p.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if p.exists( replacement_file ):
return replacement_file
return filename | [
416,
14894,
1458,
171
] |
def METHOD_NAME(type_proto: pb.Type) -> computation_types.Type:
"""Deserializes 'type_proto' as a `tff.Type`.
Note: Currently only deserialization for tensor, named tuple, sequence, and
function types is implemented.
Args:
type_proto: A `pb.Type` to deserialize.
Returns:
The corresponding instance of `tff.Type`.
Raises:
TypeError: If the argument is of the wrong type.
NotImplementedError: For type variants for which deserialization is not
implemented.
"""
type_variant = type_proto.WhichOneof('type')
if type_variant == 'tensor':
tensor_proto = type_proto.tensor
return computation_types.TensorType(
dtype=tf.dtypes.as_dtype(tensor_proto.dtype),
shape=_to_tensor_shape(tensor_proto),
)
elif type_variant == 'sequence':
return computation_types.SequenceType(
METHOD_NAME(type_proto.sequence.element)
)
elif type_variant == 'struct':
def empty_str_to_none(s):
if not s:
return None
return s
return computation_types.StructType(
[
(empty_str_to_none(e.name), METHOD_NAME(e.value))
for e in type_proto.struct.element
],
convert=False,
)
elif type_variant == 'function':
if type_proto.function.HasField('parameter'):
parameter_type = METHOD_NAME(type_proto.function.parameter)
else:
parameter_type = None
result_type = METHOD_NAME(type_proto.function.result)
return computation_types.FunctionType(
parameter=parameter_type, result=result_type
)
elif type_variant == 'placement':
return computation_types.PlacementType()
elif type_variant == 'federated':
placement_oneof = type_proto.federated.placement.WhichOneof('placement')
if placement_oneof == 'value':
return computation_types.FederatedType(
member=METHOD_NAME(type_proto.federated.member),
placement=placements.uri_to_placement_literal(
type_proto.federated.placement.value.uri
),
all_equal=type_proto.federated.all_equal,
)
else:
raise NotImplementedError(
'Deserialization of federated types with placement spec as {} '
'is not currently implemented yet.'.format(placement_oneof)
)
else:
raise NotImplementedError('Unknown type variant {}.'.format(type_variant)) | [
2696,
44
] |
def METHOD_NAME(self):
self.auth.servers.running.return_value = {
1: "server1",
2: "server2",
}
self.auth.served["server1"].add("chall1")
self.auth.served["server2"].update(["chall2", "chall3"])
self.auth.cleanup(["chall1"])
assert self.auth.served == {
"server1": set(), "server2": {"chall2", "chall3"}}
self.auth.servers.stop.assert_called_once_with(1)
self.auth.servers.running.return_value = {
2: "server2",
}
self.auth.cleanup(["chall2"])
assert self.auth.served == {
"server1": set(), "server2": {"chall3"}}
assert 1 == self.auth.servers.stop.call_count
self.auth.cleanup(["chall3"])
assert self.auth.served == {
"server1": set(), "server2": set()}
self.auth.servers.stop.assert_called_with(2) | [
9,
950
] |
def METHOD_NAME(march):
if len(march) < 5:
return None
march = march.replace("rv64g", "rv64imafd").replace("rv32g", "rv32imafd")
if march[0:5] not in ['rv64i', 'rv32i', 'rv32e']:
print (march[0:5])
return None
ext_str = march[4:]
idx = 0
extstrlens = len(ext_str)
exts = dict()
while idx < extstrlens:
if ext_str[idx] in SUPPORTTED_EXTS:
idx, ext_name, major, minor = parse_version(ext_str, idx)
elif ext_str[idx] in MC_EXT_PREFIX:
idx, ext_name, major, minor = parse_mc_ext(ext_str, idx)
elif ext_str[idx] == '_':
idx = idx + 1
continue
else:
raise Exception("Unrecognized ext : `%s`, %s" %
(ext_str[idx], ext_str))
exts[ext_name] = (major, minor)
return exts | [
214,
10246
] |
def METHOD_NAME(self, msg, info_only=False):
if not info_only:
self.errors = True
if self.raise_first_error and not info_only:
raise ValidationError(msg)
self.messages.append(msg) | [
276,
168
] |
def METHOD_NAME(apx):
"""Convert the old style xml approximant name to a name
and phase_order.
"""
import lalsimulation as lalsim
apx = str(apx)
try:
order = lalsim.GetOrderFromString(apx)
except:
print("Warning: Could not read phase order from string, using default")
order = -1
name = lalsim.GetStringFromApproximant(lalsim.GetApproximantFromString(apx))
return name, order | [
3116,
-1,
156
] |
def METHOD_NAME(self):
return self.resource_type | [
19,
578
] |
def METHOD_NAME():
"""Check if the rebootmgrd is running and active or not.
CLI Example:
.. code-block:: bash
salt microos rebootmgr is_active
"""
cmd = ["rebootmgrctl", "is_active", "--quiet"]
return _cmd(cmd, retcode=True) == 0 | [
137,
923
] |
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument("--run-id", help="Name of the run-id folder", type=str)
parser.add_argument(
"--local-dir",
help="Path of the run_id folder that contains the trained model",
type=str,
default="./",
)
parser.add_argument(
"--repo-id",
help="Repo id of the model repository from the Hugging Face Hub",
type=str,
)
parser.add_argument(
"--commit-message", help="Commit message", type=str, default="Push to Hub"
)
parser.add_argument(
"--configfile-name",
help="Name of the configuration yaml file",
type=str,
default="configuration.yaml",
)
args = parser.parse_args()
# Push model to hub
package_to_hub(
args.run_id,
args.local_dir,
args.repo_id,
args.commit_message,
args.configfile_name,
) | [
57
] |
def METHOD_NAME(tmp_dir, dvc):
fs = DataFileSystem(index=dvc.index.data["repo"])
for _ in fs.walk("dir"):
pass | [
9,
4716,
1038
] |
def METHOD_NAME(parent): # htest #
from Tkinter import Tk, Text
import re
root = Tk()
root.title("Test WidgetRedirector")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = Text(root)
text.pack()
text.focus_set()
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
root.mainloop() | [
706,
9510
] |
def METHOD_NAME():
X = matrix("X")
Y = matrix_inverse(X)
Z = Y.transpose()
f = aesara.function([X], Z)
if config.mode != "FAST_COMPILE":
for node in f.maker.fgraph.toposort():
if isinstance(node.op, MatrixInverse):
assert isinstance(node.inputs[0].owner.op, DimShuffle)
if isinstance(node.op, DimShuffle):
assert node.inputs[0].name == "X" | [
9,
7296,
24,
7297
] |
def METHOD_NAME(self):
stdout_file = f"{TEST_FILES_DIR}/critic2/MoS2_critic2_stdout.txt"
stdout_file_new_format = f"{TEST_FILES_DIR}/critic2/MoS2_critic2_stdout_new_format.txt"
with open(stdout_file) as f:
reference_stdout = f.read()
with open(stdout_file_new_format) as f:
reference_stdout_new_format = f.read()
structure = Structure.from_file(f"{TEST_FILES_DIR}/critic2/MoS2.cif")
self.c2o = Critic2Analysis(structure, reference_stdout)
self.c2o_new_format = Critic2Analysis(structure, reference_stdout_new_format) | [
0,
1
] |
def METHOD_NAME(self) -> int:
return self._weight | [
1336
] |
def METHOD_NAME(image1, image2):
"""
Superimposes two inverted images on top of each other. At least one of the
images must have mode "1".
.. code-block:: python
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im)) | [
5099
] |
def METHOD_NAME(self):
"""This is neural_compressor function include tuning, export and benchmark option."""
from neural_compressor import set_random_seed
set_random_seed(9527)
if args.tune:
from neural_compressor import quantization
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.utils.create_obj_from_config import create_dataloader
calib_dataloader_args = {
'batch_size': 10,
'dataset': {"ImageRecord": {'root':args.dataset_location}},
'transform': {'ResizeCropImagenet':
{'height': 224, 'width': 224, 'scale': 0.017,
'mean_value': [123.68, 116.78, 103.94]}},
'filter': None
}
calib_dataloader = create_dataloader('tensorflow', calib_dataloader_args)
eval_dataloader_args = {
'batch_size': 32,
'dataset': {"ImageRecord": {'root':args.dataset_location}},
'transform': {'ResizeCropImagenet':
{'height': 224, 'width': 224, 'scale': 0.017,
'mean_value': [123.68, 116.78, 103.94]}},
'filter': None
}
eval_dataloader = create_dataloader('tensorflow', eval_dataloader_args)
conf = PostTrainingQuantConfig(calibration_sampling_size=[50, 100])
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
top1 = metrics['topk']()
from neural_compressor.data import LabelShift
postprocess = LabelShift(label_shift=1)
def eval(model):
return evaluate(model, eval_dataloader, top1, postprocess)
q_model = quantization.fit(args.input_graph, conf=conf, calib_dataloader=calib_dataloader,
eval_func=eval)
q_model.save(args.output_graph)
if args.benchmark:
from neural_compressor.utils.create_obj_from_config import create_dataloader
dataloader_args = {
'batch_size': args.batch_size,
'dataset': {"ImageRecord": {'root':args.dataset_location}},
'transform': {'ResizeCropImagenet': {'height': 224, 'width': 224, 'scale': 0.017,
'mean_value': [123.68, 116.78, 103.94]}},
'filter': None
}
dataloader = create_dataloader('tensorflow', dataloader_args)
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
top1 = metrics['topk']()
from neural_compressor.data import LabelShift
postprocess = LabelShift(label_shift=1)
def eval(model):
return evaluate(model, dataloader, top1, postprocess)
if args.mode == 'performance':
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
conf = BenchmarkConfig(warmup=10, iteration=100, cores_per_instance=4, num_of_instance=1)
fit(args.input_graph, conf, b_dataloader=dataloader)
elif args.mode == 'accuracy':
acc_result = eval(args.input_graph)
print("Batch size = %d" % dataloader.batch_size)
print("Accuracy: %.5f" % acc_result) | [
22
] |
def METHOD_NAME(mocker):
test_create_app(mocker, redirect_uris='http://example.net')
kwargs = requests.post.call_args[1]
assert kwargs['data']['redirect_uris'] == 'http://example.net' | [
9,
129,
991,
1736,
8489
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.