text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
pass | [
6373,
19,
395,
573,
1318
] |
def METHOD_NAME(
parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser.add_argument(
dest="session_name",
metavar="session-name",
nargs="?",
action="store",
)
parser.add_argument(
"-S", dest="socket_path", metavar="socket-path", help="pass-through for tmux -S"
)
parser.add_argument(
"-L", dest="socket_name", metavar="socket-name", help="pass-through for tmux -L"
)
parser.add_argument(
"-f",
"--workspace-format",
choices=["yaml", "json"],
help="format to save in",
)
parser.add_argument(
"-o",
"--save-to",
metavar="output-path",
type=pathlib.Path,
help="file to save to",
)
parser.add_argument(
"--yes",
"-y",
dest="answer_yes",
action="store_true",
help="always answer yes",
)
parser.add_argument(
"--quiet",
"-q",
dest="quiet",
action="store_true",
help="don't prompt for confirmation",
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
help="overwrite the workspace file",
)
return parser | [
129,
3125,
3509
] |
def METHOD_NAME(error_message, from_beginning):
"""Restart all the workflows in ERROR matching the given error message."""
errors = WorkflowObjectModel.query.filter_by(status=ObjectStatus.ERROR).all()
to_restart = [e.id for e in errors if error_message in e.extra_data['_error_msg']]
click.secho("Found {} workflows to restart from {}".format(
len(to_restart),
"first step" if from_beginning else "current step"
))
for wf_id in to_restart:
obj = workflow_object_class.get(wf_id)
if from_beginning:
obj.callback_pos = [0]
obj.status = ObjectStatus.INITIAL
else:
obj.status = ObjectStatus.RUNNING
obj.save()
db.session.commit()
obj.continue_workflow('restart_task', True)
click.secho("Workflow {} restarted successfully".format(wf_id)) | [
1141,
604,
168
] |
def METHOD_NAME(self, container: Size, viewport: Size, width: int) -> int:
return 1 | [
19,
459,
1877
] |
def METHOD_NAME(self):
ret = str(self.next_id)
self.next_id += 1
return ret | [
19,
147
] |
def METHOD_NAME(self):
"""Verify absence of signals outside known injection times"""
clear_times = [
self.injections[0].end_time - 86400,
self.injections[-1].end_time + 86400
]
injections = InjectionSet(self.inj_file.name)
for det in self.detectors:
for epoch in clear_times:
ts = TimeSeries(numpy.zeros(int(10 * self.sample_rate)),
delta_t=1/self.sample_rate,
epoch=lal.LIGOTimeGPS(epoch),
dtype=numpy.float64)
injections.apply(ts, det.name)
max_amp, max_loc = ts.abs_max_loc()
self.assertEqual(max_amp, 0) | [
9,
7147,
10649
] |
def METHOD_NAME(self, strategy, verbose=True):
"""
Apply the specified strategy transform to the importer's data.
This method applies a given strategy to the importer's data and logs the applied strategy's name to
`self.applied_strategies`. If the strategy raises a `StrategyError`, the error message is printed but
not raised.
Parameters
----------
strategy : callable
The strategy function to apply to the importer's data.
verbose : bool, optional
If True, print a message indicating which strategy is being applied. Defaults to True.
Returns
-------
None
Modifies the importer's data in place.
Raises
------
None
If the strategy raises a `StrategyError`, the error message is printed but not raised.
Notes
-----
Strategies should not partially modify data before raising a `StrategyError`.
"""
if not hasattr(self, "applied_strategies"):
self.applied_strategies = []
try:
func_name = strategy.__name__
except AttributeError: # Curried function
func_name = strategy.func.__name__
if verbose:
print("Applying strategy: {}".format(func_name))
try:
self.data = strategy(self.data)
self.applied_strategies.append(func_name)
except StrategyError as err:
print("Couldn't apply strategy {}:\n\t{}".format(func_name, err)) | [
231,
1554
] |
f METHOD_NAME(self): | [
9,
437,
4201
] |
def METHOD_NAME(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.74*(r-y) - 0.27*(b-y)
q = 0.48*(r-y) + 0.41*(b-y)
return (y, i, q) | [
2310,
24,
4613
] |
def METHOD_NAME(ceph_cluster, **kw):
"""
CEPH-83571453-RADOS:
Corrupt an object in ec pool followed by
list-inconsistent-* commands
1. create a jerasure ec pool with k=4,m=2
2. create an object in the pool
3. chose primary osd from the acting set and go to the backend
4. corrupt object attrib from the backend
5. run deep-scrub on the pool
6. rados list-inconsistent-pg <pool>
7. rados list-inconsistent-obj <pg>
Args:
ceph_cluster (ceph.ceph.Ceph): ceph cluster
"""
log.info("Running CEPH-83571453")
log.info(METHOD_NAME.__doc__)
ceph_nodes = kw.get("ceph_nodes")
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
mons = []
role = "client"
for mnode in ceph_nodes:
if mnode.role == role:
mons.append(mnode)
ctrlr = mons[0]
log.info("chosing mon {cmon} as ctrlrmon".format(cmon=ctrlr.hostname))
helper = RadosHelper(ctrlr, config, log)
"""create ec pool with k=4, m=2"""
k = 4
m = 2
pname = "eccorrupt_{rand}_{k}_{m}".format(rand=random.randint(0, 10000), k=k, m=m)
profile = pname
if build.startswith("4"):
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
else:
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
ruleset-failure-domain=osd crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
try:
(outbuf, err) = helper.raw_cluster_cmd(prof_cmd)
log.info(outbuf)
log.info("created profile {ec}".format(ec=profile))
except Exception:
log.error("ec profile creation failed")
log.error(traceback.format_exc())
return 1
"""create ec pool"""
try:
helper.create_pool(pname, 1, profile)
log.info("Pool {pname} is create".format(pname=pname))
except Exception:
log.error("failed to create pool")
log.error(traceback.format_exc())
return 1
"""check whether pool exists"""
try:
helper.get_pool_num(pname)
except Exception:
log.error("Unable to find pool")
log.error(traceback.format_exc())
return 1
time.sleep(10)
oname = "OBJ_{pname}".format(pname=pname)
cmd = "osd map {pname} {obj} --format json".format(pname=pname, obj=oname)
(outbuf, err) = helper.raw_cluster_cmd(cmd)
log.info(outbuf)
cmdout = json.loads(outbuf)
targt_pg = cmdout["pgid"]
"""considering primary only as of now because of bug
1544680
"""
targt_osd_id = cmdout["up"][0]
"""write data and take snaps"""
putobj = "sudo rados -p {pool} put {obj} {path}".format(
pool=pname, obj=oname, path="/etc/hosts"
)
for i in range(10):
(out, err) = ctrlr.exec_command(cmd=putobj)
snapcmd = "sudo rados mksnap -p {pool} {sname}".format(
pool=pname, sname="snap" + str(i)
)
(out, err) = ctrlr.exec_command(cmd=snapcmd)
log.info("put {obj}, snap {snap}".format(obj=oname, snap="snap" + str(i)))
"""
Goto destination osd, stop the osd
use ceph-objectstore-tool to corrupt
snap info
"""
# target_osd = ceph_cluster.get_osd_by_id(targt_osd_id)
# target_osd_node = target_osd.node
target_osd_hostname = ceph_cluster.get_osd_metadata(targt_osd_id).get("hostname")
log.info(target_osd_hostname)
target_osd_node = ceph_cluster.get_node_by_hostname(target_osd_hostname)
cot_environment = target_osd_node
osd_service = ceph_cluster.get_osd_service_name(targt_osd_id)
partition_path = ceph_cluster.get_osd_metadata(targt_osd_id).get("osd_data")
helper.kill_osd(target_osd_node, osd_service)
time.sleep(10)
osd_metadata = ceph_cluster.get_osd_metadata(targt_osd_id)
osd_data = osd_metadata.get("osd_data")
osd_journal = osd_metadata.get("osd_journal")
if ceph_cluster.containerized:
docker_image_string = "{docker_registry}/{docker_image}:{docker_tag}".format(
docker_registry=ceph_cluster.ansible_config.get("ceph_docker_registry"),
docker_image=ceph_cluster.ansible_config.get("ceph_docker_image"),
docker_tag=ceph_cluster.ansible_config.get("ceph_docker_image_tag"),
)
cot_environment = helper.get_mgr_proxy_container(
target_osd_node, docker_image_string
)
device_mount_data, err = cot_environment.exec_command(
cmd='mount | grep "{partition_path} "'.format(
partition_path=partition_path
),
check_ec=False,
)
if not device_mount_data:
cot_environment.exec_command(
cmd="sudo mount {partition_path} {directory}".format(
partition_path=partition_path, directory=osd_data
)
)
slist_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
--head --op list {obj}".format(
osd_data=osd_data, osd_journal=osd_journal, obj=oname
)
(outbuf, err) = cot_environment.exec_command(cmd=slist_cmd)
log.info(outbuf)
corrupt_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
{outbuf} clear-snapset \
corrupt".format(
osd_data=osd_data, osd_journal=osd_journal, outbuf="'" + (outbuf) + "'"
)
(outbuf, err) = cot_environment.exec_command(cmd=corrupt_cmd)
log.info(outbuf)
helper.revive_osd(target_osd_node, osd_service)
time.sleep(10)
run_scrub = "pg deep-scrub {pgid}".format(pgid=targt_pg)
(outbuf, err) = helper.raw_cluster_cmd(run_scrub)
log.info(outbuf)
while "HEALTH_ERR" and "active+clean+inconsistent" not in outbuf:
status = "-s --format json"
(outbuf, err) = helper.raw_cluster_cmd(status)
log.info("HEALTH_ERR found as expected")
log.info("inconsistent foud as expected")
timeout = 300
found = 0
while timeout:
incon_pg = "sudo rados list-inconsistent-pg {pname}".format(pname=pname)
(outbuf, err) = ctrlr.exec_command(cmd=incon_pg)
log.info(outbuf)
if targt_pg not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("pg not listed as inconsistent")
return 1
timeout = 300
found = 0
while timeout:
incon_obj = "sudo rados list-inconsistent-snapset \
{pg}".format(
pg=targt_pg
)
(outbuf, err) = ctrlr.exec_command(cmd=incon_obj)
log.info(outbuf)
if oname not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("object is not listed in inconsistent obj")
return 1
return 0 | [
22
] |
def METHOD_NAME(self, breakdown, df):
demo_col = std_col.RACE_CATEGORY_ID_COL if breakdown == RACE else breakdown
unknown = Race.UNKNOWN.value if breakdown == RACE else UNKNOWN
df = df.rename(columns={'demographic_category': demo_col})
demo_rows = set(BREAKDOWN_MAP[breakdown].keys())
df = df.loc[df[demo_col].isin(demo_rows)].reset_index(drop=True)
df = df.replace(BREAKDOWN_MAP[breakdown])
known_df = df.loc[df[demo_col] != unknown].reset_index(drop=True)
unknown_df = df.loc[df[demo_col] == unknown].reset_index(drop=True)
known_df = known_df.rename(columns={'administered_dose1_pct_known': std_col.VACCINATED_PCT_SHARE})
unknown_df = unknown_df.rename(columns={'administered_dose1_pct_us': std_col.VACCINATED_PCT_SHARE})
df = pd.concat([known_df, unknown_df])
df[std_col.VACCINATED_PER_100K] = df['administered_dose1_pct'].apply(calc_per_100k)
df.loc[df[demo_col].isin(ALLS), std_col.VACCINATED_PCT_SHARE] = 100.0
if breakdown == AGE:
df[std_col.VACCINATED_POP_PCT] = df[demo_col].map(AGE_GROUPS_TO_POP_PCT)
else:
df = merge_pop_numbers(df, breakdown, NATIONAL_LEVEL)
df = df.rename(columns={std_col.POPULATION_PCT_COL: std_col.VACCINATED_POP_PCT})
df[std_col.STATE_FIPS_COL] = US_FIPS
df[std_col.STATE_NAME_COL] = US_NAME
df = df[[std_col.STATE_NAME_COL, std_col.STATE_FIPS_COL, demo_col,
std_col.VACCINATED_PCT_SHARE, std_col.VACCINATED_POP_PCT,
std_col.VACCINATED_PER_100K]]
if breakdown == RACE:
std_col.add_race_columns_from_category_id(df)
return df | [
567,
11316
] |
def METHOD_NAME(self):
"""Receive layer model track line width change event and update slider."""
with self.layer.events.head_length.blocker():
value = self.layer.head_length
self.head_length_slider.setValue(value) | [
69,
373,
799,
194
] |
def METHOD_NAME(self, group):
group_statistics = self.groups_statistics[group]
self.course_statistics['summ_scores'] += group_statistics['group_summ_scores']
self.course_statistics['number_students'] += group_statistics['number_group_students']
self.course_statistics['number_students_with_tasks'] += group_statistics['number_students_with_tasks'] | [
86,
1122,
68
] |
def METHOD_NAME(self):
with capture_connection_state(self.mock_location):
raise errors.NetworkLocationResponseFailure()
self.assertEqual(
self.mock_location.connection_status, ConnectionStatus.ResponseFailure
)
self.assertEqual(self.mock_location.connection_faults, 1) | [
9,
17,
374
] |
def METHOD_NAME(path):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (module, e))
except ValueError as e:
raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr))
try:
getattr(cls, 'supports_object_permissions')
except AttributeError:
warn("Authentication backends without a `supports_object_permissions` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_object_permissions = False
try:
getattr(cls, 'supports_anonymous_user')
except AttributeError:
warn("Authentication backends without a `supports_anonymous_user` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_anonymous_user = False
return cls() | [
557,
3127
] |
def METHOD_NAME(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=securitycenterCallTransformer(), | [
1112,
1537
] |
def METHOD_NAME(self):
data_refs = {}
for elem in self.xpath('./bpmn:ioSpecification/bpmn:dataInput'):
ref = self.create_data_spec(elem, TaskDataReference)
data_refs[ref.bpmn_id] = ref
for elem in self.xpath('./bpmn:ioSpecification/bpmn:dataOutput'):
ref = self.create_data_spec(elem, TaskDataReference)
data_refs[ref.bpmn_id] = ref
inputs, outputs = [], []
for ref in self.xpath('./bpmn:ioSpecification/bpmn:inputSet/bpmn:dataInputRefs'):
if ref.text in data_refs:
inputs.append(data_refs[ref.text])
for ref in self.xpath('./bpmn:ioSpecification/bpmn:outputSet/bpmn:dataOutputRefs'):
if ref.text in data_refs:
outputs.append(data_refs[ref.text])
return BpmnIoSpecification(inputs, outputs) | [
214,
249,
1457
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return self._output() | [
1519
] |
def METHOD_NAME(dtype):
image = np.zeros((10, 10), dtype=dtype)
label_image = np.zeros((10, 10), dtype=np.uint8)
label_image[2:7, 2:7] = 1
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
marked = mark_boundaries(image, label_image, color=white, mode='thick')
assert marked.dtype == _supported_float_type(dtype)
result = np.mean(marked, axis=-1)
assert_array_equal(result, ref)
ref = np.array([[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
[2, 1, 1, 2, 0, 2, 1, 1, 2, 0],
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
marked = mark_boundaries(image, label_image, color=white,
outline_color=(2, 2, 2), mode='thick')
result = np.mean(marked, axis=-1)
assert_array_equal(result, ref) | [
9,
1743,
1744
] |
def METHOD_NAME():
mlflow.statsmodels.autolog()
with mlflow.start_run() as run:
ols_model()
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id | [
9,
6831,
6832,
15778,
8077,
152,
22
] |
def METHOD_NAME(DT_list, mesh_dir):
if not os.path.exists(mesh_dir):
os.makedirs(mesh_dir)
mesh_files = []
xml_filename = mesh_dir + "temp.xml"
for input_file in DT_list:
print(' ' + get_prefix(input_file))
output_vtk = mesh_dir + "original_" + get_prefix(input_file) + ".vtk"
image = sw.Image(input_file)
image.toMesh(isovalue=0).write(output_vtk)
mesh_files.append(output_vtk)
return sorted(mesh_files) | [
19,
1949,
280,
3475
] |
def METHOD_NAME(plugin_config):
return plugin_config or {} | [
187,
2793,
200
] |
def METHOD_NAME(
script_folder: Folder, level_list: Optional[LevelListBin] = None
) -> ScriptFiles:
"""Returns information about the files used by the script engine in an 'introspectable' way."""
script_files = ScriptFiles(common=[], maps=OrderedDict())
for map_or_common_name, folder in script_folder.folders:
if map_or_common_name == COMMON_DIR:
# Common script directory
for filename in folder.files:
script_files["common"].append(filename)
else:
# Map directory
map = MapEntry(
name=map_or_common_name,
enter_sse=None,
enter_ssbs=[],
subscripts=OrderedDict(),
lsd=None,
ssas=[],
)
ssa_stems = []
ssbs = []
script_files["maps"][map_or_common_name] = map
for filename in folder.files:
if filename == ENTER_SSE:
# Enter SSE
map["enter_sse"] = filename
elif ENTER_SSB_PATTERN.match(filename):
# Enter SSB
map["enter_ssbs"].append(filename)
elif filename == map_or_common_name.lower() + LSD_EXT:
# LSD file
map["lsd"] = filename
elif filename.endswith(SSS_EXT):
# Subscript SSS
map["subscripts"][filename] = []
elif filename.endswith(SSA_EXT):
# Acting SSA file
ssa_stems.append(filename[: -(len(SSA_EXT))])
elif filename.endswith(SSB_EXT):
# Acting or Subscript SSB:
ssbs.append(filename)
# Process ssbs
for ssb in ssbs:
ssb_stem = ssb[: -(len(SSB_EXT))]
if ssb_stem in ssa_stems:
# SSB is for SSA file:
map["ssas"].append((ssb_stem + SSA_EXT, ssb))
for subscript_name, list_of_ssbs_for_subscript in map[
"subscripts"
].items():
# SSB is for subscript
if ssb_stem.startswith(subscript_name[: -len(SSS_EXT)]):
list_of_ssbs_for_subscript.append(ssb)
break
if level_list:
# Add all empty levels
for level in level_list.list:
if level.name not in script_files["maps"]:
script_files["maps"][level.name] = MapEntry(
name=level.name,
enter_sse=None,
enter_ssbs=[],
subscripts=OrderedDict(),
lsd=None,
ssas=[],
)
return script_files | [
557,
782,
1537
] |
def METHOD_NAME(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--psk",
help="Pre-shared key",
type=lambda x: x if len(x) > 0 else None,
default=None,
dest="psk",
) | [
238,
335
] |
def METHOD_NAME(self, qinsn, event: "QGraphicsSceneMouseEvent"):
return False | [
276,
212,
13844
] |
def METHOD_NAME(self, mock_event_watcher):
# NOTE: We only test kwarg plumbing for watch_dir since watcher_class
# selection is tested extensively in test_watch_file, and the two
# functions are otherwise identical.
on_file_changed = Mock()
watching_dir = watch_dir(
"some/dir/path",
on_file_changed,
watcher_type="watchdog",
glob_pattern="*.py",
allow_nonexistent=True,
)
self.assertTrue(watching_dir)
mock_event_watcher.assert_called_with(
"some/dir/path",
on_file_changed,
glob_pattern="*.py",
allow_nonexistent=True,
) | [
9,
1619,
1190,
2001,
-1
] |
def METHOD_NAME(cls, method: t.Callable[P, R]) -> t.Callable[P, Paginator[R]]:
"""
This is an alternate method for getting a paginator for a paginated method which
correctly preserves the type signature of the paginated method.
It should be used on instances of clients and only passed bound methods of those
clients. For example, given usage
>>> tc = TransferClient()
>>> paginator = tc.paginated.endpoint_search(...)
a well-typed paginator can be acquired with
>>> tc = TransferClient()
>>> paginated_call = Paginator.wrap(tc.endpoint_search)
>>> paginator = paginated_call(...)
Although the syntax is slightly more verbose, this allows `mypy` and other type
checkers to more accurately infer the type of the paginator.
:param method: The method to convert to a paginator
:type method: callable
"""
if not inspect.ismethod(method):
raise TypeError(f"Paginator.wrap can only be used on methods, not {method}")
if not getattr(method, "_has_paginator", False):
raise ValueError(f"'{method}' is not a paginated method")
as_paginated = t.cast(_PaginatedFunc[PageT], method)
paginator_class = as_paginated._paginator_class
paginator_params = as_paginated._paginator_params
paginator_items_key = as_paginated._paginator_items_key
@functools.wraps(method)
def paginated_method(*args: t.Any, **kwargs: t.Any) -> Paginator[PageT]:
return paginator_class(
method,
client_args=list(args),
client_kwargs=kwargs,
items_key=paginator_items_key,
**paginator_params,
)
return t.cast(t.Callable[P, Paginator[R]], paginated_method) | [
503
] |
def METHOD_NAME(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=[
"agent",
"start",
"--prefetch-seconds",
"30",
"-q",
"test",
"--run-once",
],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=["test"],
work_queue_prefix=ANY,
work_pool_name=None,
prefetch_seconds=30,
limit=None,
) | [
9,
447,
1849,
41,
518,
633
] |
def METHOD_NAME(self) -> bool:
return self._inner.version == 4 | [
137,
8046
] |
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument(
"--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path"
)
parser.add_argument(
"--imgsz",
"--img",
"--img-size",
type=int,
default=640,
help="inference size (pixels)",
)
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument(
"--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path"
)
parser.add_argument(
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
)
parser.add_argument(
"--half", action="store_true", help="use FP16 half-precision inference"
)
parser.add_argument("--test", action="store_true", help="test exports only")
parser.add_argument("--pt-only", action="store_true", help="test PyTorch only")
parser.add_argument(
"--hard-fail", action="store_true", help="throw error on benchmark failure"
)
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
print_args(vars(opt))
return opt | [
214,
1671
] |
def METHOD_NAME():
"""Test that failing to specify a measurement
raises an exception"""
dev = qml.device("default.qubit.legacy", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.PauliY(0)
with pytest.raises(qml.QuantumFunctionError, match="must return either a single measurement"):
_ = circuit(0.65) | [
9,
654,
599
] |
def METHOD_NAME(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
return ("%.3f" % n).rstrip("0").rstrip(".") | [
-1
] |
def METHOD_NAME():
return mock.Mock(traits.CacheAware, rest=mock.AsyncMock()) | [
248,
991
] |
def METHOD_NAME(
blob_reference: oci.model.OciBlobRef,
image_reference: typing.Union[str, oci.model.OciImageReference],
oci_client: oci.client.Client,
clamav_client: clamav.client.ClamAVClient,
) -> typing.Generator[clamav.model.ScanResult, None, None]:
try:
yield from scan_oci_blob_filewise(
blob_reference=blob_reference,
image_reference=image_reference,
oci_client=oci_client,
clamav_client=clamav_client,
)
except tarfile.TarError as te:
logger.warning(f'{image_reference=} {te=} - falling back to layerwise scan')
yield from scan_oci_blob_layerwise(
blob_reference=blob_reference,
image_reference=image_reference,
oci_client=oci_client,
clamav_client=clamav_client,
) | [
793,
10358,
260
] |
def METHOD_NAME(opt):
"""
Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options
"""
enhance_lib() | [
1881
] |
def METHOD_NAME(self, parent):
self.sync_value(self.factory.choices, 'choices', 'from', is_list=True)
self.selected = self.value
return self.edit_traits(parent=parent, kind='subpanel') | [
176,
882
] |
def METHOD_NAME(self):
self._test_plan_create_new(is_active=False) | [
9,
145,
129,
80,
3439
] |
def METHOD_NAME():
img_str = 'wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg'
if not os.path.exists('000000014439.jpg'):
print(img_str)
run(img_str, shell=True)
prepare('000000014439.jpg', [320, 320])
cp_npz_str = 'cp ./inputs.npz ./picodet'
print(cp_npz_str)
run(cp_npz_str, shell=True) | [
660,
123
] |
def METHOD_NAME():
return _formatDate(getEnigmaVersionString().replace("-", "")) | [
19,
679,
86
] |
def METHOD_NAME():
mnist.run_keras_single_device('cpu', 0) | [
9,
4098,
97,
2265
] |
def METHOD_NAME():
"""Index of subclass of tuple is None"""
class TupleTest(tuple):
"""Subclass of tuple"""
pass
return TupleTest()[None] # [invalid-sequence-index] | [
18191
] |
def METHOD_NAME(reason):
rospy.signal_shutdown(reason)
roscpp_shutdown() | [
631,
12016
] |
def METHOD_NAME(state: State, title: str | None, suppress_warning: bool) -> str:
if title is not None:
return title
if state.file:
return state.file.title
if not suppress_warning:
warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
return DEFAULT_TITLE | [
19,
73,
2893
] |
def METHOD_NAME(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self, model):
model.fs.unit.report() | [
9,
339
] |
def METHOD_NAME():
"""
Acquires Pebble's ephemeral root certificate that when trusted implies trust
to the ACME client's certificates. We can use the response of this function
when we make web requests with the requests library.
requests.get(..., verify=<True|False|path_to_certificate>)
If HUB_URL is http:// return False since no certificate is required
"""
if os.getenv("HUB_URL", "").startswith("http://"):
return False
# 'localhost' may resolve to an ipv6 address which may not be supported on older K3S
# 127.0.0.1 is more reliable
response = requests.get("https://127.0.0.1:32444/roots/0", verify=False, timeout=10)
if not response.ok:
return True
base_dir = os.path.dirname(os.path.dirname(__file__))
cert_path = os.path.join(base_dir, "ci/ephemeral-pebble-acme-ca.crt")
with open(cert_path, "w+") as f:
f.write(response.text)
return cert_path | [
15745,
13115,
1246,
1941
] |
def METHOD_NAME(self, content, **blob_meta_args):
"""Put a blob in persistent storage
:param content: A file-like object in binary read mode.
:param **blob_meta_args: A single `"meta"` argument (`BlobMeta`
object) or arguments used to construct a `BlobMeta` object:
- domain - (required, text) domain name.
- parent_id - (required, text) parent identifier, used for
sharding.
- type_code - (required, int) blob type code. See
`corehq.blobs.CODES`.
- key - (optional, text) globally unique blob identifier. A
new key will be generated with `uuid4().hex` if missing or
`None`. This is the key used to store the blob in the external
blob store.
- name - (optional, text) blob name.
- content_length - (optional, int) content length. Will be
calculated from the given content if not given.
- content_type - (optional, text) content type.
- timeout - minimum number of minutes the object will live in
the blobdb. `None` means forever. There are no guarantees on the
maximum time it may live in blob storage.
NOTE: it is important to delete any blobs saved with this method
if it is called within a database transaction that ends up being
rolled back. Otherwise those blobs will be orphaned, meaning
they will be stored in the blob db backend indefinitely, but
their metadata will be lost.
:returns: A `BlobMeta` object. The returned object has a
`key` attribute that may be used to get or delete the blob.
"""
raise NotImplementedError | [
1276
] |
def METHOD_NAME(self, entry):
return {
'id': entry.id,
'input_id': entry.input.id,
'description': entry.description,
'lhs': entry.lhs,
} | [
362,
475,
24,
553
] |
def METHOD_NAME(self):
return self.network_traffic | [
19,
1228,
2219
] |
def METHOD_NAME(stream, data):
try:
stream.write(data)
stream.flush()
except IOError:
pass | [
77,
61,
1579
] |
def METHOD_NAME(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(lbann, current_file, 'get_sample',
'num_samples', 'sample_dims', 'train')
])
message.reader.extend([
tools.create_python_data_reader(lbann, current_file, 'get_sample',
'num_samples', 'sample_dims', 'test')
])
return message | [
363,
365,
781
] |
def METHOD_NAME(self, feats, curr_sample_rate):
def resample(x, factor):
return F.interpolate(x.view(1, 1, -1), scale_factor=factor).squeeze()
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
factor = self.sample_rate / curr_sample_rate
feats = resample(feats, factor)
assert feats.dim() == 1, feats.dim()
return feats | [
1710
] |
def METHOD_NAME(step):
date_txt.value = _dt_step(date_txt.value, step)
on_date_change(date_txt) | [
104,
8175
] |
def METHOD_NAME():
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
ner1 = EntityRecognizer(Vocab(), model)
ner1.add_label("C")
ner1.add_label("B")
ner1.add_label("A")
ner1.initialize(lambda: [_ner_example(ner1)])
ner2 = EntityRecognizer(Vocab(), model)
# the second model needs to be resized before we can call from_bytes
ner2.model.attrs["resize_output"](ner2.model, ner1.moves.n_moves)
ner2.from_bytes(ner1.to_bytes())
assert ner1.moves.n_moves == ner2.moves.n_moves
for i in range(ner1.moves.n_moves):
assert ner1.moves.get_class_name(i) == ner2.moves.get_class_name(i) | [
9,
238,
636,
14666,
3705
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME():
return False | [
721,
10902,
1295
] |
def METHOD_NAME(self):
iplt.contourf(self.cube, self.few)
self.check_graphic()
iplt.contourf(self.cube, self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, self.many_levels)
self.check_graphic() | [
9,
434
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(self):
return self['Summary'] + [''] | [
3,
2718
] |
def METHOD_NAME(self, widget):
for i in widget.parent().findChildren(QLayout):
if i.indexOf(widget) > -1:
#print i.layout(), widget.objectName(), i.objectName()
return i.layout(), i.indexOf(widget)
print(widget.parent())
LOG.error('No layout found for {}'.format(widget))
return widget.parent(),None | [
1070
] |
def METHOD_NAME(self):
cleaned_data = super().METHOD_NAME()
apt_number = cleaned_data.get("apt_number")
no_apt_number = cleaned_data.get("no_apt_number")
if apt_number and no_apt_number:
raise ValidationError(
_(
"Please either provide an apartment number or check the "
'"I have no apartment number" checkbox (but not both).'
)
)
if not apt_number and not no_apt_number:
raise ValidationError(
_(
"Please either provide an apartment number or check the "
'"I have no apartment number" checkbox.'
)
)
if "no_apt_number" in cleaned_data:
cleaned_data.pop("no_apt_number")
return cleaned_data | [
1356
] |
def METHOD_NAME(self):
"""
Have the stream unsubscribe from the streaming channel
"""
self.stream.queue(
"/openc3-api/cable",
{"command": "unsubscribe", "identifier": self.streaming_id()},
)
self.stream.unsubscribe("/openc3-api/cable") | [
3560,
307,
5002
] |
def METHOD_NAME(self) -> DagsterRun:
"""DagsterRun: The Dagster run that was executed."""
return self._dagster_run | [
1686,
22
] |
def METHOD_NAME(self, path: Text) -> None:
path = os.path.abspath(path)
if os.path.exists(path) and rasa.shared.data.is_config_file(path):
config = rasa.shared.utils.io.read_config_file(path)
parent_directory = os.path.dirname(path)
self._init_from_dict(config, parent_directory)
else:
rasa.shared.utils.io.raise_warning(
f"'{path}' does not exist or is not a valid config file."
) | [
176,
280,
171
] |
def METHOD_NAME(separator=None):
"""
Returning running OCP semantic version from cluster.
Args:
separator (str): String that would separate major and
minor version numbers
Returns:
semantic_version.base.Version: Object of semantic version for OCP.
"""
# Importing here to avoid circular import
from ocs_ci.utility.utils import get_running_ocp_version
return get_semantic_version(get_running_ocp_version(separator), True) | [
19,
6123,
6124,
1340,
281
] |
def METHOD_NAME(config_file):
# Setup logging
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return create_models(config_file) | [
57
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self, little_endian):
"""Test the outcome for different endianness."""
qform = QuadraticForm(2, linear=[0, 1], little_endian=little_endian)
circuit = QuantumCircuit(4)
circuit.x(1)
circuit.compose(qform, inplace=True)
# the result is x_0 linear_0 + x_1 linear_1 = 1 = '0b01'
result = "01"
# the state is encoded as |q(x)>|x>, |x> = |x_1 x_0> = |10>
index = (result if little_endian else result[::-1]) + "10"
ref = np.zeros(2**4, dtype=complex)
ref[int(index, 2)] = 1
self.assertTrue(Statevector.from_instruction(circuit).equiv(ref)) | [
9,
8814
] |
def METHOD_NAME(self):
self.object_type, self.event, self.object = get_object_from_args()
if self.object is None:
raise NotFound | [
356,
335
] |
def METHOD_NAME(environ, s3):
s3.create_bucket(Bucket='test-bucket')
s3.put_object(Bucket='test-bucket', Key='test-key', Body=new_cfn_json)
return 'test-bucket', 'test-key', new_cfn_json | [
172,
763
] |
def METHOD_NAME(app: Sphinx) -> None:
"""Synchronize external contents.
Args:
app: Sphinx application instance.
"""
srcdir = Path(app.srcdir).resolve()
to_copy = []
to_delete = set(f for f in srcdir.glob("**/*") if not f.is_dir())
to_keep = set(
f
for k in app.config.external_content_keep
for f in srcdir.glob(k)
if not f.is_dir()
)
for content in app.config.external_content_contents:
prefix_src, glob = content
for src in prefix_src.glob(glob):
if src.is_dir():
to_copy.extend(
[(f, prefix_src) for f in src.glob("**/*") if not f.is_dir()]
)
else:
to_copy.append((src, prefix_src))
for entry in to_copy:
src, prefix_src = entry
dst = (srcdir / src.relative_to(prefix_src)).resolve()
if dst in to_delete:
to_delete.remove(dst)
if not dst.parent.exists():
dst.parent.mkdir(parents=True)
# just copy if it does not exist
if not dst.exists():
shutil.copy(src, dst)
adjust_includes(
dst,
src.parent,
app.config.external_content_directives,
app.config.source_encoding,
)
# if origin file is modified only copy if different
elif src.stat().st_mtime > dst.stat().st_mtime:
with tempfile.TemporaryDirectory() as td:
# adjust origin includes before comparing
src_adjusted = Path(td) / src.name
shutil.copy(src, src_adjusted)
adjust_includes(
src_adjusted,
src.parent,
app.config.external_content_directives,
app.config.source_encoding,
dstpath=dst.parent,
)
if not filecmp.cmp(src_adjusted, dst):
dst.unlink()
shutil.move(os.fspath(src_adjusted), os.fspath(dst))
# remove any previously copied file not present in the origin folder,
# excepting those marked to be kept.
for file in to_delete - to_keep:
file.unlink() | [
164,
192
] |
def METHOD_NAME(instrument, elasticapm_client):
query_string = """
mutation {
createPost(text: "Try this out") {
result {
__typename
}
}
}
"""
schema = graphene.Schema(query=Query, mutation=Mutations)
elasticapm_client.begin_transaction("transaction.test")
with capture_span("test_graphene", "test"):
result = schema.execute(query_string)
assert not result.errors
assert result.data["createPost"]["result"]["__typename"] == "Success"
elasticapm_client.end_transaction("BillingView")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
expected_signatures = {
"GraphQL.mutation createPost",
"GraphQL.mutation result",
"test_graphene",
}
assert {t["name"] for t in spans} == expected_signatures
assert transactions[0]["name"] == "GraphQL MUTATION createPost" | [
9,
129,
72
] |
def METHOD_NAME(self, msg):
"""
Allocates one chunk of memory for the entire message including
the header which needs to be prepended. This avoids extra allocations
and copying.
Args:
msg: message to be encapsulated by IPA
Returns:
memoryview: Allocated buf of header + length bytes.
"""
buf = memoryview(bytearray(msg.length))
return buf | [
19,
77,
6107
] |
def METHOD_NAME(stream: AbstractStream) -> torch.cuda.Stream:
"""Casts the given stream as :class:`torch.cuda.Stream`."""
return cast(torch.cuda.Stream, stream) | [
947,
2590
] |
def METHOD_NAME(image, mntpnt, check_files, debugdir):
"""
Mount the images and check the coredump files
:return: Format as Bool, List
If the checked file exists
return True, ["checked file name"]
If not, return False []
"""
found_coredump = False
msgs_return = []
try:
error_context.context("Mount the guest image %s to host mount point" %
image,
LOG_JOB.info)
status = lgf.guestmount(image, mntpnt,
True, True, debug=True, is_disk=True)
if status.exit_status:
msgs_return.append("Could not mount guest image %s." % image)
error_context.context(msgs_return[0], LOG_JOB.error)
else:
found_coredump, msgs_return = coredump_exists(mntpnt,
check_files,
debugdir)
finally:
if os.path.ismount(mntpnt):
error_context.context("guestunmount host mount point")
lgf.lgf_command("guestunmount %s" % mntpnt)
return found_coredump, msgs_return | [
250,
3669,
17448
] |
def METHOD_NAME(self):
self.assertReduce('(foo / 2) * 2', 'foo')
self.assertReduce('(foo / 2) * 10', 'foo * 5') | [
9,
9674,
332,
441,
-1
] |
def METHOD_NAME(
self,
process_index: int,
label: str,
internal_address: str,
external_address: Union[str, List[str]],
env: Dict = None,
modules: List[str] = None,
suspend_sigint: bool = False,
use_uvloop: bool = False,
logging_conf: Dict = None,
kwargs: Dict = None,
):
pools: Dict = self._conf["pools"]
if not isinstance(external_address, list):
external_address = [external_address]
pools[process_index] = {
"label": label,
"internal_address": internal_address,
"external_address": external_address,
"env": env,
"modules": modules,
"suspend_sigint": suspend_sigint,
"use_uvloop": use_uvloop,
"logging_conf": logging_conf,
"kwargs": kwargs or {},
}
mapping: Dict = self._conf["mapping"]
for addr in external_address:
mapping[addr] = internal_address | [
238,
1567,
2546
] |
def METHOD_NAME(self, context='dcp') -> bool:
if context.lower() == 'dcp':
return self.is_dcp(dpp=True)
elif context.lower() == 'dgp':
return self.is_dgp(dpp=True)
else:
raise ValueError("Unsupported context ", context) | [
137,
6815
] |
def METHOD_NAME():
with mock.patch.dict(
os.environ,
{
"VDK_AUDIT_HOOK_ENABLED": "True",
"VDK_AUDIT_HOOK_FORBIDDEN_EVENTS_LIST": "os.system",
"VDK_AUDIT_HOOK_EXIT_CODE": "0",
},
):
os._exit = mock.MagicMock()
runner = CliEntryBasedTestRunner(audit_plugin)
result: Result = runner.invoke(
["run", jobs_path_from_caller_directory("os-system-command-job")]
)
print(result.output)
os._exit.assert_called_with(0) | [
9,
1422,
97,
417,
1111,
61,
4422
] |
def METHOD_NAME(cls, string_or_stream: typing.Union[str, typing.IO]) -> 'SeverityRules':
"""Instantiates a new SeverityRules object from YAML rule definitions"""
raw_data = yaml.safe_load(string_or_stream) or {}
rules = cls._parse_raw_severity_rules(raw_data)
return cls(rules) | [
557
] |
def METHOD_NAME(os_, compiler):
if tools.is_apple_os(os_):
return "darwin"
if compiler == "Visual Studio":
return "msvc"
# Assume gn knows about the os
return str(os_).lower() | [
24,
9597,
2773
] |
def METHOD_NAME(x):
return type(x) in [ type(1), type(1.0), type(1j), numpy.ndarray, int, numpy.int_, numpy.int8, numpy.int16, numpy.int32, float, numpy.float_, numpy.float32, numpy.float64, complex, numpy.complex_, numpy.complex64, numpy.complex128 ] | [
137,
1997
] |
def METHOD_NAME(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None | [
211,
590
] |
def METHOD_NAME(self, files, pattern):
""" Takes a file dict {"filepath": "...", "last_modified": "..."} and
a regex pattern string, and returns
files matching that pattern. """
matcher = re.compile(pattern)
LOGGER.info(f"Searching for files for matching pattern: {pattern}")
return [f for f in files if matcher.search(f["filepath"])] | [
19,
1537,
3626,
652
] |
def METHOD_NAME(path, mtime):
os.utime(path, (mtime, mtime)) | [
194,
8406
] |
def METHOD_NAME(self, msg):
"""Given a response from the Perspective server, resolve the Future
with the response or an exception."""
if not msg.get("data"):
return
handler = self._handlers.get(msg["data"].get("id"))
if handler:
future = handler.get("future", None)
keep_alive = handler.get("keep_alive", False)
if keep_alive and handler.get("callback_id"):
# Must look up callback function and execute it, and then
# return without re-setting the result of the Future.
callback = self._callback_id_cache.get(handler["callback_id"])
data = msg["data"]["data"]
if data and isinstance(data, dict):
callback(**data)
elif data:
callback(data)
else:
callback()
return
elif future:
if msg["data"].get("error"):
future.set_exception(PerspectiveError(msg["data"]["error"]))
else:
cmd = handler.get("cmd", None)
if cmd == "view":
future.set_result(PerspectiveViewProxy(self, msg["data"]["data"]))
elif cmd == "table":
future.set_result(PerspectiveTableProxy(self, msg["data"]["data"]))
else:
future.set_result(msg["data"]["data"])
if not keep_alive:
del self._handlers[msg["data"]["id"]] | [
276
] |
def METHOD_NAME(make_organization_and_user_with_plugin_token, make_user_auth_headers):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
url = reverse("api-internal:api-organization")
data = {"is_resolution_note_required": True}
assert organization.is_resolution_note_required is False
response = client.put(url, format="json", data=data, **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
organization.refresh_from_db()
assert organization.is_resolution_note_required is True | [
9,
86,
1044,
817
] |
def METHOD_NAME(self):
"""
Return the host from my session's transport.
"""
return self.session.transport.METHOD_NAME() | [
19,
1806
] |
METHOD_NAME( self, name = defaultName, direction = Gaffer.Plug.Direction.In,
faultValue = valueType(), flags = Gaffer.Plug.Flags.Default ): | [
176
] |
def METHOD_NAME(self, plain=False, box_style: box.Box = None):
"""
Print this table to the global Console using console.print().
"""
orig_box = self._table.box
if plain:
self._table.box = box.ASCII2
else:
self._table.box = box_style or orig_box
console = rich.get_console()
console.print(self._table)
self._table.box = orig_box | [
1605
] |
def METHOD_NAME():
asset_category = frappe.new_doc("Asset Category")
asset_category.asset_category_name = "Equipment"
asset_category.total_number_of_depreciations = 3
asset_category.frequency_of_depreciation = 3
asset_category.append(
"accounts",
{
"company_name": "_Test Company",
"fixed_asset_account": "_Test Fixed Asset - _TC",
"accumulated_depreciation_account": "_Test Accumulated Depreciations - _TC",
"depreciation_expense_account": "_Test Depreciations - _TC",
},
)
asset_category.insert() | [
129,
3455,
253
] |
def METHOD_NAME():
"""Test that small discrete grid does not lead to duplicates"""
dim = Integer("yolo", "uniform", -2, 4)
assert discrete_grid(dim, 3) == [-2, 0, 2]
assert discrete_grid(dim, 5) == [-2, -1, 0, 1, 2]
assert discrete_grid(dim, 50) == [-2, -1, 0, 1, 2] | [
9,
564,
565,
-1,
753
] |
def METHOD_NAME(self, pad):
return super().METHOD_NAME(pad)[0], 'center', 'right' | [
19,
11240,
-1,
1053
] |
def METHOD_NAME(self, parser):
parser.add_argument(
"--dry-run",
help="Prints what it would do, without doing any of it.",
action="store_true",
) | [
238,
134
] |
def METHOD_NAME(emr_cluster_config):
context = create_test_pipeline_execution_context()
cluster = EmrJobRunner(region=REGION)
cluster_id = cluster.run_job_flow(context.log, emr_cluster_config)
assert cluster.cluster_id_from_name("test-emr") == cluster_id
with pytest.raises(EmrError) as exc_info:
cluster.cluster_id_from_name("cluster-doesnt-exist")
assert "cluster cluster-doesnt-exist not found in region us-west-1" in str(exc_info.value) | [
9,
14076,
147,
280,
156
] |
def METHOD_NAME(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close() | [
214,
171
] |
def METHOD_NAME(
mock_request,
mock_fetch,
webhook_plugin,
tax_data_response,
order,
tax_app,
):
# given
mock_request.return_value = tax_data_response
plugin = webhook_plugin()
webhook = Webhook.objects.create(
name="Tax checkout webhook",
app=tax_app,
target_url="https://localhost:8888/tax-order",
subscription_query=(
"subscription{event{... on CalculateTaxes{taxBase{currency}}}}"
),
)
webhook.events.create(event_type=WebhookEventSyncType.ORDER_CALCULATE_TAXES)
# when
tax_data = plugin.get_taxes_for_order(order, None)
# then
payload = EventPayload.objects.get()
assert payload.payload == json.dumps({"taxBase": {"currency": "USD"}})
delivery = EventDelivery.objects.get()
assert delivery.status == EventDeliveryStatus.PENDING
assert delivery.event_type == WebhookEventSyncType.ORDER_CALCULATE_TAXES
assert delivery.payload == payload
assert delivery.webhook == webhook
mock_request.assert_called_once_with(delivery)
mock_fetch.assert_not_called()
assert tax_data == parse_tax_data(tax_data_response) | [
9,
19,
13913,
43,
852,
41,
164
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"authorizationRuleName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters | [
274,
386
] |
def METHOD_NAME(self):
async def main():
await asyncio.sleep(0)
raise ValueError('spam')
with self.assertRaisesRegex(ValueError, 'spam'):
asyncio.run(main()) | [
9,
1769,
22,
45
] |
def METHOD_NAME(last_price, steepness, i):
return last_price + 1/(2*np.sqrt(steepness*(i+1))) | [
-1,
10090,
2567,
806
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.