text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(default_profile):
wrapper = webenginesettings._SettingsWrapper()
return webenginesettings.WebEngineSettings(wrapper) | [
817
] |
def METHOD_NAME() -> Any:
"""Prep-parsed schema for testing."""
path2 = get_data("tests/metaschema-pre.yml")
assert path2
with open(path2) as f:
pre = json.load(f)
return pre | [
13593,
709
] |
def METHOD_NAME(self):
return reverse('article', args=[self.slug]) | [
19,
4653,
274
] |
def METHOD_NAME(exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1, jitter=0, logger=logging_logger):
"""Returns a retry decorator.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:returns: a retry decorator.
"""
@decorator
def retry_decorator(f, *fargs, **fkwargs):
args = fargs if fargs else list()
kwargs = fkwargs if fkwargs else dict()
return __retry_internal(partial(f, *args, **kwargs), exceptions, tries, delay, max_delay, backoff, jitter,
logger)
return retry_decorator | [
2052
] |
def METHOD_NAME(data_import_name):
data_import = frappe.get_doc("Bank Statement Import", data_import_name)
data_import.export_errored_rows() | [
136,
3733,
671
] |
def METHOD_NAME(argspec, constructor, *args, **kwargs):
"""
Return (args, kwargs) matching the argspec object
:param argspec: argspec to use
:type argspec: argspec
:param constructor: is it a constructor ?
:type constructor: bool
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: (args, kwargs) matching the function signature
:rtype: tuple
"""
if argspec.varkw:
call_kwarg = kwargs
else:
call_kwarg = dict((k, kwargs[k]) for k in kwargs if k in argspec.args) # pylint:disable=consider-using-dict-items
if argspec.varargs:
call_args = args
else:
call_args = args[:len(argspec.args) - (1 if constructor else 0)]
return call_args, call_kwarg | [
15893,
335
] |
def METHOD_NAME(self):
"""Returns the height of the keyboard plate.
"""
return (self.rows * self.key_width) + self.key_width / 2 | [
1877
] |
def METHOD_NAME(self):
"""Is History allowed currently?"""
return self._block | [
137,
2822
] |
def METHOD_NAME(registry_name: Optional[str] = None,
replication_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationResult:
"""
Gets the properties of the specified replication.
Azure REST API version: 2022-12-01.
:param str registry_name: The name of the container registry.
:param str replication_name: The name of the replication.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['replicationName'] = replication_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry:getReplication', __args__, opts=opts, typ=GetReplicationResult).value
return AwaitableGetReplicationResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
region_endpoint_enabled=pulumi.get(__ret__, 'region_endpoint_enabled'),
status=pulumi.get(__ret__, 'status'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
zone_redundancy=pulumi.get(__ret__, 'zone_redundancy')) | [
19,
3185
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(self, type, plugin):
if self.config.get("use_{}_list".format(type)):
plugin_list = self.config.get("{}_list".format(type))
plugin_list = plugin_list.replace(" ", "").replace("\\", "")
plugin_list = plugin_list.replace("|", ",").replace(";", ",")
plugin_list = plugin_list.lower().split(",")
plugin_set = set(plugin_list)
if self.config.get("use_builtin_list"):
builtin_list = getattr(self, "BUILTIN_{}S".format(type.upper()))
plugin_set.update(builtin_list)
plugin_set.difference_update(("", ""))
if not plugin_set:
self.log_info(self._("No {} to handle").format(type))
return
match_list = "|".join(sorted(plugin_set)).replace(".", r"\.")
pattern = self._regexmap[type][1].format(match_list)
self.log_info(
self._("Handle {} {}{}: {}").format(
len(plugin_set),
type,
"" if len(plugin_set) == 1 else "s",
match_list.replace(r"\.", ".").replace("|", ", "),
)
)
else:
plugin_list = []
is_xfs = lambda klass: any(
k.__name__.startswith("XFS") for k in inspect.getmro(klass)
)
for p in self.pyload.plugin_manager.plugins[type].values():
try:
klass = self.pyload.plugin_manager.load_class(type, p["name"])
except AttributeError as exc:
self.log_debug(
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
continue
if (
hasattr(klass, "PLUGIN_DOMAIN")
and klass.PLUGIN_DOMAIN
and is_xfs(klass)
):
plugin_list.append(klass.PLUGIN_DOMAIN)
unmatch_list = "|".join(sorted(plugin_list)).replace(".", r"\.")
pattern = self._regexmap[type][0].format(unmatch_list)
self.log_info(self._("Auto-discover new {}s").format(type))
return pattern | [
19,
652
] |
def METHOD_NAME(stages: List[WarmupStage]) -> List[WarmupStage]:
last_stage = WarmupStage(policy=WarmupPolicy.NONE, max_iters=1 << 63, value=1.0)
if len(stages) == 0:
return [last_stage]
start_iter = 0
for stage in stages:
assert stage.max_iters > start_iter, (
f"Max iter of the stage {stage} must be greater than the previous "
f"max iter {start_iter}"
)
start_iter = stage.max_iters
if stage.decay_iters <= 0:
if stage.policy == WarmupPolicy.STEP:
stage.decay_iters = 1
else:
stage.decay_iters = stage.max_iters
return stages + [last_stage] | [
6941,
7308
] |
def METHOD_NAME(self):
enrollment = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
enrollment.schedule.save()
# Test body with incorrect body param (course_key is required)
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course': self.course.id})
assert response.status_code == 400
assert enrollment.schedule == Schedule.objects.get(id=enrollment.schedule.id)
self.assert_no_events_were_emitted()
# Test correct post body
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 200
assert enrollment.schedule.start_date < Schedule.objects.get(id=enrollment.schedule.id).start_date
self.assert_event_emitted(
'edx.ui.lms.reset_deadlines.clicked',
courserun_key=str(self.course.id),
is_masquerading=False,
is_staff=False,
org_key=self.course.org,
user_id=self.user.id,
) | [
9,
656,
10702
] |
def METHOD_NAME(self):
self.assertThat(ExtensionImpl.get_supported_bases(), Equals(("core20",))) | [
9,
616,
7346
] |
def METHOD_NAME(C, rrup, mag):
"""
Returns the distance scaling term
"""
rscale1 = rrup + C["c2"] * (10.0 ** (C["c3"] * mag))
return -np.log10(rscale1) - (C["c4"] * rrup) | [
226,
1886,
500
] |
def METHOD_NAME(self, value):
if self.mode not in ["rw", "wo"]:
raise KeyError(self.name + "register not writable")
datas = []
for i in range(self.length):
datas.append((value >> ((self.length-1-i)*self.data_width)) & (2**self.data_width-1))
self.writefn(self.addr, datas) | [
77
] |
def METHOD_NAME(environment, args):
print(color_notice("\nPreparing to deploy Formplayer to: "), end="")
print(f"{environment.name}\n")
tag_commits = environment.fab_settings_config.tag_deploy_commits
repo = github_repo('dimagi/formplayer', require_write_permissions=tag_commits)
diff = get_deploy_diff(environment, repo)
diff.print_deployer_diff()
context = DeployContext(
service_name="Formplayer",
revision=args.commcare_rev,
diff=diff,
start_time=datetime.utcnow()
)
if not ask('Continue with deploy?', quiet=args.quiet):
return 1
record_deploy_start(environment, context)
rc = run_ansible_playbook_command(environment, args)
if rc != 0:
record_deploy_failed(environment, context)
return rc
rc = commcare_cloud(
args.env_name, 'run-shell-command', 'formplayer',
('supervisorctl reread; '
'supervisorctl update {project}-{deploy_env}-formsplayer-spring; '
'supervisorctl restart {project}-{deploy_env}-formsplayer-spring').format(
project='commcare-hq',
deploy_env=environment.meta_config.deploy_env,
), '-b',
)
if rc != 0:
record_deploy_failed(environment, context)
return rc
record_deploy_success(environment, context)
return 0 | [
2749,
16953
] |
def METHOD_NAME(tup):
"""sort a list of tuples by its second item."""
lst = len(tup)
for i in range(0, lst):
for j in range(0, lst - i - 1):
if tup[j][1] > tup[j + 1][1]:
temp = tup[j]
tup[j] = tup[j + 1]
tup[j + 1] = temp
return tup | [
266,
1815
] |
async def METHOD_NAME(self):
return | [
537,
5067
] |
def METHOD_NAME() -> Optional[str]:
value = os.getenv("LIGHTNING_CLOUD_QUEUE_TYPE", None)
if value is None and enable_interruptible_works():
value = "http"
return value | [
19,
4054,
651,
44
] |
def METHOD_NAME():
METHOD_NAME = unittest.TestSuite()
METHOD_NAME.addTest(unittest.makeSuite(TestInitialAssignment))
return METHOD_NAME | [
482
] |
def METHOD_NAME(self, auth: Authentication, request: DecodeRequest) -> DecodeRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/decode?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(DecodeRequestResult, response) | [
1268
] |
def METHOD_NAME(self, asn: int) -> AutonomousSystem:
"""!
@brief Create an existing AutonomousSystem.
@param asn ASN of the AS.
@returns AS.
@throws AssertionError if asn does not exist.
"""
assert asn in self.__ases, "as{} does not exist.".format(asn)
return self.__ases[asn] | [
19,
5759,
112
] |
def METHOD_NAME(self):
pass | [
1032,
280,
200
] |
def METHOD_NAME(
conference_factory,
day_factory,
slot_factory,
room,
admin_graphql_client,
schedule_item_factory,
):
conference = conference_factory(
start=datetime(2020, 4, 2, tzinfo=pytz.UTC),
end=datetime(2020, 4, 2, tzinfo=pytz.UTC),
)
day = day_factory(conference=conference, day=date(2020, 4, 2))
slot = slot_factory(day=day, hour=time(8, 45), duration=60)
slot_2 = slot_factory(day=day, hour=time(8, 45), duration=60)
item = schedule_item_factory(slot=slot, submission=None, type="submission")
resp = admin_graphql_client.query(
"""
mutation($input: UpdateOrCreateSlotItemInput!) {
updateOrCreateSlotItem(input: $input) {
... on UpdateOrCreateSlotItemResult {
updatedSlots {
id
items {
id
type
title
}
}
}
}
}
""",
variables={
"input": {"slotId": slot_2.id, "itemId": item.id, "rooms": [room.id]}
},
)
assert "errors" not in resp
updated_slots = resp["data"]["updateOrCreateSlotItem"]["updatedSlots"]
assert updated_slots[0]["id"] == str(slot.id)
assert updated_slots[1]["id"] == str(slot_2.id)
assert updated_slots[1]["items"] == [
{"id": str(item.id), "title": item.title, "type": item.type}
] | [
9,
2004,
1024
] |
def METHOD_NAME(self) -> InsightsUserRolesList:
warn(
"insights_user_roles is deprecated. Use v1.insights_user_roles instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.METHOD_NAME | [
1689,
21,
2173
] |
def METHOD_NAME(
lam: Any,
shape: Any,
seed: Any = None,
dtype: tf.DType = ztypes.float,
name: Any = None,
):
if seed is None:
seed = get_prng().make_seeds(1)[:, 0]
return tf.random.stateless_poisson(
lam=lam, seed=seed, shape=shape, dtype=dtype, name=name
) | [
1638
] |
f METHOD_NAME(self, N): | [
9,
2003,
1154
] |
def METHOD_NAME(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response() | [
377
] |
def METHOD_NAME(request, when, line):
"""Write line instantly."""
terminal = request.config.pluginmanager.getplugin('terminalreporter')
if terminal.verbosity <= 0:
return
capman = request.config.pluginmanager.getplugin('capturemanager')
outerr = capman.suspend_global_capture()
try:
if outerr is not None:
out, err = outerr
request.node.add_report_section(when, 'out', out)
request.node.add_report_section(when, 'err', err)
terminal.METHOD_NAME(line)
finally:
capman.resume_global_capture() | [
77,
534
] |
def METHOD_NAME(self):
m = SluggedTestModel(title="foo")
m.save()
m = SluggedTestModel(title="foo")
m.save()
self.assertEqual(m.slug, "foo-2") | [
9,
803,
129,
243,
1231
] |
def METHOD_NAME(s: str, length: int, end: str = '...') -> str:
if len(s) <= length:
return s
return s[:length - len(end)] + end | [
5419
] |
def METHOD_NAME(
client: Client,
cell: NotebookCell,
proxy_url: str = None,
reactivate: bool = False,
**kwargs: t.Dict[str, t.Union[str, bool]],
) -> None:
iframe = IPython.display.IFrame(
focx.get_url(
cell.address,
cell.port,
notebook=True,
proxy_url=proxy_url,
subscription=cell.subscription,
**kwargs,
),
height=cell.height,
width="100%",
)
if reactivate:
cell.handle.update(iframe)
else:
cell.handle.display(iframe) | [
52,
4741
] |
def METHOD_NAME(monkeypatch, mocker, app):
"""
Given an handler for the /get-identity endpoint
When authentication is enabled
When identity token is missing some user attributes
Then it should fallback to the attributes contained in the access token
"""
monkeypatch.setattr('api.PclusterApiHandler.USER_ROLES_CLAIM', 'user_roles_claim')
accessTokenDecoded = {
'user_roles_claim': ['access-token-group'],
'username': 'access-token-username',
'email': 'access-token-email'
}
identityTokenDecoded = {
'email': 'id-token-email'
}
with app.test_request_context(headers={'Cookie': 'accessToken=access-token; idToken=identity-token'}):
mocker.patch('api.PclusterApiHandler.jwt_decode', side_effect=[accessTokenDecoded, identityTokenDecoded])
assert get_identity() == {
'attributes': {'email': 'id-token-email'},
'user_roles': ['access-token-group'],
'username': 'access-token-username'
} | [
9,
19,
2989,
2433,
1111,
1089,
1735
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME(
L: ArrayLike,
constants: Structure = CONSTANTS_DAVINCI_INTERMEDIATE,
) -> NDArrayFloat:
"""
Define the *DaVinci Intermediate* opto-electronic transfer function.
Parameters
----------
L
Linear light value :math`L`.
constants
*DaVinci Intermediate* colour component transfer function constants.
Returns
-------
:class:`numpy.ndarray`
Encoded value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``L`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`BlackmagicDesign2020a`
Examples
--------
>>> oetf_DaVinciIntermediate(0.18) # doctest: +ELLIPSIS
0.3360432...
"""
L = to_domain_1(L)
DI_LIN_CUT = constants.DI_LIN_CUT
DI_A = constants.DI_A
DI_B = constants.DI_B
DI_C = constants.DI_C
DI_M = constants.DI_M
V_out = np.where(
L <= DI_LIN_CUT,
L * DI_M,
DI_C * (np.log2(L + DI_A) + DI_B),
)
return as_float(from_range_1(V_out)) | [
3580,
8224,
16604,
5086
] |
def METHOD_NAME(self, cleanup_at_exit=True):
if self.is_running():
if self.tor_daemon_proc is None:
raise Exception(
"Tor Daemon running but not via this Controller instance."
)
return
self.tor_daemon_proc = subprocess.Popen(
f'{"exec " if platform.system() != "Windows" else ""}"{self.tor_daemon_path}" --defaults-torrc {self.tor_config_path}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
logger.debug(
"Running Tor daemon process with pid {}".format(self.tor_daemon_proc.pid)
) | [
447,
9783,
1687
] |
async def METHOD_NAME(client):
resp = await client.get("/fail")
assert resp.status == web.HTTPServiceUnavailable.status_code
assert_healthy_app(client.app) | [
9,
13874,
69,
374
] |
def METHOD_NAME(api_url: str, openapi_dict: dict) -> list[inference.PluginOpenAPIEndpoint]:
endpoints = []
base_url = openapi_dict.get("servers", [{}])[0].get("url")
if base_url is not None:
parsed_link = urlsplit(api_url)
base_url = (
f"{parsed_link.scheme}://{parsed_link.netloc}{base_url}" if not urlsplit(base_url).scheme else base_url
)
for path, methods in openapi_dict.get("paths", {}).items():
for method, details in methods.items():
endpoints.append(parse_plugin_endpoint(api_url, method, details, base_url, path, openapi_dict))
return endpoints | [
19,
2793,
1197
] |
def METHOD_NAME(contents):
pass | [
-1
] |
def METHOD_NAME(project, ticket_event_list):
for ticket_event_dict in ticket_event_list:
ticket_event = models.TicketEvent.upsert(
project,
current_title=ticket_event_dict.get('title'),
title=ticket_event_dict.get('title'),
project=project,
)
for ticket_type_title in ticket_event_dict.get('ticket_types'):
ticket_type = models.TicketType.upsert(
project,
current_name=None,
current_title=ticket_type_title,
project=project,
title=ticket_type_title,
)
ticket_event.ticket_types.append(ticket_type) | [
2278,
5592
] |
def METHOD_NAME(self) -> bool:
"""Is the atom log-log convex?
"""
return True | [
137,
4637,
390,
390,
5066
] |
def METHOD_NAME(cls, lib):
match = re.search(r"lib\S*\.so\.\d+\.\d+\.(\d)(\d\d)(\d\d)", lib)
if match:
ver = "{0}.{1}.{2}".format(
int(match.group(1)), int(match.group(2)), int(match.group(3))
)
else:
ver = None
return ver | [
2982,
281
] |
def METHOD_NAME(data):
for marker in GENERATED_GO_MARKERS:
if marker in data:
return True
return False | [
137,
4207
] |
def METHOD_NAME(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self, store, load_fn, target_name, idx, parameters, is_placeholder=False):
was_loading = self._loading
sprite, x, y, w, h = load_fn(*parameters,
lambda: GLib.idle_add(
partial(self._reload_icon, parameters, idx, store, load_fn, target_name, was_loading)
))
if is_placeholder:
ctx = cairo.Context(sprite)
ctx.set_source_rgb(*ORANGE_RGB)
ctx.rectangle(0, 0, w, h)
ctx.set_operator(cairo.OPERATOR_IN)
ctx.fill()
data = bytes(sprite.get_data())
# this is painful.
new_data = bytearray()
for b, g, r, a in grouper(data, 4):
new_data += bytes([r, g, b, a])
self._icon_pixbufs[target_name] = GdkPixbuf.Pixbuf.new_from_data(
new_data, GdkPixbuf.Colorspace.RGB, True, 8, w, h, sprite.get_stride()
)
return self._icon_pixbufs[target_name] | [
19,
875
] |
def METHOD_NAME(tag_expression_parts):
"""Parse old style tag-expressions and build a TagExpression object."""
# -- HINT: DEPRECATING
if isinstance(tag_expression_parts, six.string_types):
tag_expression_parts = tag_expression_parts.split()
elif not isinstance(tag_expression_parts, (list, tuple)):
raise TypeError("EXPECTED: string, sequence<string>", tag_expression_parts)
# print("parse_tag_expression_v1: %s" % " ".join(tag_expression_parts))
return TagExpression(tag_expression_parts) | [
214,
82,
1120,
3392
] |
def METHOD_NAME(user_factory, post_factory):
user1 = user_factory()
user2 = user_factory()
db.session.add(user1)
db.session.flush()
assert user1.comment_count == 0
post1 = post_factory()
post2 = post_factory()
db.session.add_all(
[
model.PostFavorite(post=post1, time=datetime.utcnow(), user=user1),
model.PostFavorite(post=post2, time=datetime.utcnow(), user=user2),
]
)
db.session.flush()
db.session.refresh(user1)
assert user1.favorite_post_count == 1 | [
9,
8065,
29
] |
def METHOD_NAME(infrastructure: dict) -> str:
return infrastructure.get("VersionedBucket", "") | [
8969,
2538,
156
] |
def METHOD_NAME(self):
"""Test initialization."""
assert self.request.label == TestConfig.test_label
assert self.request.did == TestConfig.test_did
assert self.request.goal_code == TestConfig.goal_code
assert self.request.goal == TestConfig.goal | [
9,
176
] |
f METHOD_NAME(self): | [
136,
277
] |
def METHOD_NAME(self) -> list[bytes]:
"""
The application layer protocols offered by the client as part of the
[ALPN](https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation) TLS extension.
"""
if ext := getattr(self._client_hello, "extensions", None):
for extension in ext.extensions:
if extension.type == 0x10:
return list(x.name for x in extension.body.METHOD_NAME)
return [] | [
11974,
1312
] |
def METHOD_NAME(self) -> tuple[int, int]: ... | [
-1
] |
def METHOD_NAME(self):
non_org1_users = ["user2", "worker3"]
for user in non_org1_users:
self._test_cannot_see_memberships(user, org_id=1) | [
9,
256,
3563,
2286,
2049,
3563,
2397
] |
def METHOD_NAME(self):
return self.column_family | [
19,
105,
6498
] |
def METHOD_NAME(self):
responses.add(
method=responses.POST,
url="https://example.com/sentry/alert-rule",
status=200,
body=bytes(self.success_message, encoding="utf-8"),
)
result = AlertRuleActionRequester.run(
install=self.install,
uri="/sentry/alert-rule",
fields=self.fields,
)
assert result["success"]
assert result["message"] == f"{self.sentry_app.name}: {DEFAULT_SUCCESS_MESSAGE}" | [
9,
1670,
3163,
377,
41,
1193,
277
] |
def METHOD_NAME(i: int, tokens: list[Token]) -> None:
j = find_op(tokens, i, '(')
if (
tokens[j + 1].name == 'STRING' and
tokens[j + 1].src.isascii() and
tokens[j + 2].src == ')'
):
func_args, end = parse_call_args(tokens, j)
replace_call(tokens, i, end, func_args, 'b{args[0]}') | [
1112,
26,
1484
] |
def METHOD_NAME(
*, db_session, project_id: int, case_severity_in=CaseSeverityRead
) -> CaseSeverity:
"""Returns the case severity specified or raises ValidationError."""
case_severity = get_by_name(
db_session=db_session, project_id=project_id, name=case_severity_in.name
)
if not case_severity:
raise ValidationError(
[
ErrorWrapper(
NotFoundError(
msg="Case severity not found.",
case_severity=case_severity_in.name,
),
loc="case_severity",
)
],
model=CaseSeverityRead,
)
return case_severity | [
19,
604,
156,
894,
241
] |
def METHOD_NAME(self):
"""Number of items in the queue."""
return len(self._queue) | [
10723
] |
def METHOD_NAME(self, name):
return self._var(name).as_dict(meta_only=True) | [
19,
1094
] |
def METHOD_NAME(self, data_shape):
return finalize_image_shape(self.dst_height, self.dst_width, data_shape) | [
1593,
1737,
97,
555
] |
def METHOD_NAME(func):
# type: (F) -> F
def _sentry_execute(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(HueyIntegration) is None:
return func(*args, **kwargs)
try:
result = func(*args, **kwargs)
except Exception:
exc_info = sys.exc_info()
_capture_exception(exc_info)
reraise(*exc_info)
return result
return _sentry_execute # type: ignore | [
503,
758,
750
] |
def METHOD_NAME(self):
"""Removes the drawn cursor position."""
self.textEdit().setExtraSelections([]) | [
537,
3230
] |
def METHOD_NAME(self, env: BuildEnvironment) -> str:
"""Read content from source and do post-process."""
content = self.source.read()
# emit "source-read" event
arg = [content]
env.events.emit('source-read', env.docname, arg)
return arg[0] | [
203,
1458
] |
def METHOD_NAME(wandb_init):
"""Ensure that `wandb.init(dir=DIR)` creates the proper directory and nothing else."""
default_path = os.path.join(os.getcwd(), "wandb")
dir_name = "dir_custom"
custom_dir_path = os.path.join(os.getcwd(), dir_name)
# test for the case that the dir is set
reload_fn(wandb)
_remove_dir_if_exists(default_path)
if not os.path.isdir(custom_dir_path):
os.makedirs(custom_dir_path)
run = wandb_init(dir="./" + dir_name)
run.finish()
assert not os.path.isdir(default_path), f"Unexpected directory at {default_path}"
assert os.path.isdir(custom_dir_path), f"Expected directory at {custom_dir_path}"
# And for the duplicate-run case
_remove_dir_if_exists(default_path)
run = wandb_init(dir="./" + dir_name)
run.finish()
assert not os.path.isdir(default_path), f"Unexpected directory at {default_path}"
assert os.path.isdir(custom_dir_path), f"Expected directory at {custom_dir_path}" | [
9,
1190,
69,
176,
1190
] |
def METHOD_NAME(self):
try:
self.assertEqual(np.float32(3.52),3.5)
except AssertionError as e:
if not str(e).startswith("Floats not almost equal to 6 decimals"):
raise self.failureException("Numpy float mismatch error not raised.") | [
9,
1819,
5565,
17212
] |
def METHOD_NAME(
plasmod_solver: Solver,
show: bool = True,
f: Optional[plt.Figure] = None,
ax: Optional[plt.Axes] = None,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot a default set of profiles from a PLASMOD solver.
Parameters
----------
plasmod_solver:
Solver for which to plot profiles
show:
Whether or not to show the plot
f:
Matplotlib figure
ax:
Array of matplotlib Axes
Returns
-------
f:
Matplotlib figure
ax:
Array of matplotlib Axes
"""
plot_defaults()
if f is None and ax is None:
f, ax = plt.subplots(2, 3, figsize=(18, 10))
rho = plasmod_solver.get_profile("x")
R_0 = plasmod_solver.params.R_0.value
# Corrected flux function profiles (used as output profiles)
pprime = plasmod_solver.get_profile("pprime")
ffprime = plasmod_solver.get_profile("ffprime")
# Current density profile reconstruction from flux functions
jpar_recon = 2 * np.pi * (R_0 * pprime + ffprime / (MU_0 * R_0))
# Temperature profiles
ti = plasmod_solver.get_profile("Ti")
te = plasmod_solver.get_profile("Te")
ax[0, 0].plot(rho, ti, label="$T_{i}$")
ax[0, 0].plot(rho, te, label="$T_{e}$")
ax[0, 0].set_ylabel("Temperature [keV]")
# Current profiles
jpar = plasmod_solver.get_profile("jpar")
jbs = plasmod_solver.get_profile("jbs")
jcd = plasmod_solver.get_profile("jcd")
ax[0, 1].plot(rho, jpar, label="$j_{||}$")
ax[0, 1].plot(rho, jbs, label="$j_{BS}$")
ax[0, 1].plot(rho, jcd, label="$j_{CD}$")
ax[0, 1].plot(rho, jpar_recon, linestyle="--", label="$j_{p', FF'}$")
ax[0, 1].set_ylabel("Current density [A/m²]")
# Density profiles
ni = plasmod_solver.get_profile("n_ion")
ne = plasmod_solver.get_profile("n_e")
ax[1, 0].plot(rho, ni, label="$n_{i}$")
ax[1, 0].plot(rho, ne, label="$n_{e}$")
ax[1, 0].set_ylabel("Density [10¹⁹/m³]")
# q profile
qprof = plasmod_solver.get_profile("q")
ax[1, 1].plot(rho, qprof, label="$q$")
ax[1, 1].set_ylabel("Safety factor")
# Flux functions
ax[0, 2].plot(rho, pprime, label="$p'_{*corr}$")
ax[0, 2].set_ylabel("[Pa/Wb]")
axi: plt.Axes = ax[0, 2]
axi.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
ax[1, 2].plot(rho, ffprime, label="$FF'_{*corr}$")
ax[1, 2].set_ylabel("[T]")
for axe in ax.flat:
axe.grid()
axe.set_xlabel("$\\rho$")
axe.set_xlim([0.0, 1.0])
axe.legend(loc="best")
plt.subplots_adjust(hspace=0.3, wspace=0.3)
if show:
plt.show()
return f, ax | [
1288,
235,
1348
] |
def METHOD_NAME(self):
layer_with_1_group = layers.GroupNormalization(
groups=1, axis=-1, scale=False, center=False
)
layer_with_2_groups = layers.GroupNormalization(
groups=2, axis=2, scale=False, center=False
)
inputs = np.array([[[-1.0, -1.0, 2.0, 2.0], [1.0, 1.0, 0, -2.0]]])
expected_output_1_group = np.array(
[[[-0.898, -0.898, 1.257, 1.257], [0.539, 0.539, -0.180, -1.616]]]
)
self.assertAllClose(
layer_with_1_group(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = np.array(
[[[-1.0, -1.0, 0.904, 0.904], [1.0, 1.0, -0.301, -1.507]]]
)
self.assertAllClose(
layer_with_2_groups(inputs),
expected_output_2_groups,
atol=1e-3,
) | [
9,
7050,
1085
] |
def METHOD_NAME(dicom_image, dicom_struct,
struct_name_sequence,
spacing_override=None,
interrupt_flag=None):
"""Converts a set of points from a DICOM RTSTRUCT into a mask array.
This function is modified from the function
platipy.dicom.io.transform_point_set_from_dicom_struct to align with
the specific usage of OnkoDICOM.
Args:
dicom_image (sitk.Image): The reference image
dicom_struct (pydicom.Dataset): The DICOM RTSTRUCT
struct_name_sequence: the name of ROIs to be transformed
spacing_override (list): The spacing to override. Defaults to None
interrupt_flag: interrupt flag to stop the process
Returns:
tuple: Returns a list of masks and a list of structure names
"""
if spacing_override:
current_spacing = list(dicom_image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else
spacing_override[k]
for k in range(3)
]
)
dicom_image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
all_name_sequence = [
"_".join(i.ROIName.split()) for i in
dicom_struct.StructureSetROISequence
]
# find corresponding rois in roi contour sequence
roi_indexes = {}
for index, roi_name in enumerate(all_name_sequence):
if roi_name in struct_name_sequence:
roi_indexes[roi_name] = index
struct_list = []
final_struct_name_sequence = []
for struct_name in roi_indexes.keys():
if interrupt_flag is not None and \
not check_interrupt_flag(interrupt_flag):
return [], []
struct_index = roi_indexes[struct_name]
image_blank = np.zeros(dicom_image.GetSize()[::-1], dtype=np.uint8)
logging.debug(
"Converting structure {0} with name: {1}".format(struct_index,
struct_name))
if not hasattr(struct_point_sequence[struct_index], "ContourSequence"):
logging.debug(
"No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[struct_index].ContourSequence) == 0:
logging.debug(
"Contour sequence empty for this structure, skipping.")
continue
if len(struct_point_sequence[struct_index].ContourSequence) == 0:
logging.debug(
"Contour sequence empty for this structure, skipping.")
continue
for sl in range(
len(struct_point_sequence[struct_index].ContourSequence)):
if interrupt_flag is not None and \
not check_interrupt_flag(interrupt_flag):
return [], []
contour_data = fix_missing_data(
struct_point_sequence[struct_index].ContourSequence[
sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertex_arr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[dicom_image.TransformPhysicalPointToIndex(i) for i in
vertex_arr_physical]
).T
[x_vertex_arr_image, y_vertex_arr_image] = point_arr[[0, 1]]
z_index = point_arr[2][0]
if np.any(point_arr[2] != z_index):
logging.debug(
"Error: axial slice index varies in contour. Quitting now.")
logging.debug("Structure: {0}".format(struct_name))
logging.debug("Slice index: {0}".format(z_index))
quit()
if z_index >= dicom_image.GetSize()[2]:
logging.debug(
"Warning: Slice index greater than image size. Skipping "
"slice.")
logging.debug("Structure: {0}".format(struct_name))
logging.debug("Slice index: {0}".format(z_index))
continue
slice_arr = np.zeros(image_blank.shape[-2:], dtype=np.uint8)
filled_indices_x, filled_indices_y = polygon(
x_vertex_arr_image, y_vertex_arr_image, shape=slice_arr.shape
)
slice_arr[filled_indices_y, filled_indices_x] = 1
image_blank[z_index] += slice_arr
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(dicom_image)
struct_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
final_struct_name_sequence.append(struct_name)
return struct_list, final_struct_name_sequence | [
1053,
1669,
0,
280,
7087,
1755
] |
def METHOD_NAME(self):
pass | [
72,
710
] |
def METHOD_NAME(self):
"""
Handle POST request, especially replying to a chat message.
"""
if self.path == '/interact':
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self._interactive_running(body)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
model_response = {'id': 'Model', 'episode_done': False}
message_available.wait()
model_response['text'] = new_message
message_available.clear()
json_str = json.dumps(model_response)
self.wfile.write(bytes(json_str, 'utf-8'))
elif self.path == '/reset':
self._interactive_running(b"[RESET]")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{}", 'utf-8'))
message_available.wait()
message_available.clear()
else:
return self._respond({'status': 500}) | [
74,
72
] |
def METHOD_NAME(
module, conda, installed, name, version, installed_version):
"""
Install a package at a specific version, or install a missing package at
the latest version if no version is specified.
"""
if installed and (version is None or installed_version == version):
module.exit_json(changed=False, name=name, version=version)
if module.check_mode:
if not installed or (installed and installed_version != version):
module.exit_json(changed=True)
if version:
install_str = name + '=' + version
else:
install_str = name
command = [
conda,
'install',
'--yes',
install_str
]
command = _add_channels_to_command(command, module.params['channels'])
command = _add_extras_to_command(command, module.params['extra_args'])
rc, stdout, stderr = module.run_command(command)
if rc != 0:
module.fail_json(msg='failed to install package ' + name)
module.exit_json(
changed=True, name=name, version=version, stdout=stdout, stderr=stderr) | [
428,
360
] |
METHOD_NAME( self ) : | [
9,
748
] |
def METHOD_NAME(self, key):
if key == self.minimum or key == self.maximum:
return True
if self.universe_size == 2:
return False
return self.cluster[key // int(math.sqrt(self.universe_size))].METHOD_NAME(
key % int(math.sqrt(self.universe_size))) | [
1992
] |
def METHOD_NAME(microstates, lifetimes, out, ax_prop=None, ax_distrib=None):
states = np.unique(microstates)
# Plot
if ax_prop is None and ax_distrib is None:
fig, axes = plt.subplots(ncols=2)
ax_prop = axes[0]
ax_distrib = axes[1]
else:
fig = None
for s in states:
ax_prop.bar(s, out[str(s) + "_Proportion"])
ax_distrib.plot(lifetimes[s], label=str(s))
plt.legend()
ax_prop.set_xticks(range(len(states)))
ax_prop.set_title("Proportion")
ax_distrib.set_title("Lifetime Distribution")
return fig | [
10228,
-1,
1288
] |
METHOD_NAME(self, node): | [
84
] |
def METHOD_NAME(self):
super(NnmClubTrackerTest, self).METHOD_NAME()
cloudflare_challenge_solver_settings = CloudflareChallengeSolverSettings(False, 10000, False, False, 0)
self.tracker_settings = TrackerSettings(10, None, cloudflare_challenge_solver_settings)
self.tracker = NnmClubTracker()
self.tracker.tracker_settings = self.tracker_settings | [
0,
1
] |
def METHOD_NAME(self, config: Mapping[str, Any], amount_of_days_allowed_for_lookup: int = 89):
config = copy.deepcopy(config)
today = pendulum.today()
latest_date_allowed_by_api = today.subtract(days=amount_of_days_allowed_for_lookup)
start_date = config["start_date"]
if not start_date:
config["start_date"] = latest_date_allowed_by_api
else:
try:
config["start_date"] = pendulum.from_format(config["start_date"], "YYYY-MM-DD")
except ValueError:
message = "Entered `Start Date` does not match format YYYY-MM-DD"
raise AirbyteTracedException(
message=message,
internal_message=message,
failure_type=FailureType.config_error,
)
if (today - config["start_date"]).days > amount_of_days_allowed_for_lookup:
config["start_date"] = latest_date_allowed_by_api
return config | [
187,
61,
1053
] |
def METHOD_NAME(tmp_path):
"""This test requires the package in tests/resources/mlflow-test-plugin to be installed"""
from mlflow_test_plugin.dummy_dataset_source import DummyDatasetSource
registry = DatasetSourceRegistry()
registry.register_entrypoints()
uri = "dummy:" + str(tmp_path)
resolved_source = registry.resolve(uri)
assert isinstance(resolved_source, DummyDatasetSource)
# Verify that the DummyDatasetSource is constructed with the correct URI
assert resolved_source.uri == uri | [
9,
372,
3641,
61,
1014
] |
def METHOD_NAME(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusPDUWriteSingleCoilRequest):
return False
that: ModbusPDUWriteSingleCoilRequest = ModbusPDUWriteSingleCoilRequest(o)
return (
(self.address == that.address)
and (self.value == that.value)
and super().METHOD_NAME(that)
and True
) | [
816
] |
def METHOD_NAME(self,lineno,line):
"""
read a tooltable line
if an entry was parsed successfully, return a Tool() instance
"""
line.rstrip("\n")
if re.match('\A\s*T\d+',line):
semi = line.find(";")
if semi != -1:
comment = line[semi+1:]
else:
comment = None
entry = line.split(';')[0]
result = dict()
for field in entry.split():
(name,value) = re.search('([a-zA-Z])([+-]?\d*\.?\d*)',field).groups()
if name:
key = name.upper()
result[key] = EmcToolTable.ttype[key](value)
else:
print("%s:%d bad line: '%s' " % (self.filename, lineno, entry))
result['comment'] = comment
return result
print("%s:%d: unrecognized tool table entry '%s'" % (self.filename,lineno,line)) | [
11448
] |
def METHOD_NAME(self):
return self._keys(self, '') | [
219
] |
def METHOD_NAME( model, opts ):
casadi_version = CasadiMeta.version()
casadi_opts = dict(mex=False, casadi_int='int', casadi_real='double')
if casadi_version not in (ALLOWED_CASADI_VERSIONS):
casadi_version_warning(casadi_version)
generate_hess = opts["generate_hess"]
code_export_dir = opts["code_export_directory"]
# load model
x = model.x
u = model.u
p = model.p
f_expl = model.f_expl_expr
model_name = model.name
## get model dimensions
nx = x.size()[0]
nu = u.size()[0]
if isinstance(f_expl, casadi.MX):
symbol = MX.sym
elif isinstance(f_expl, casadi.SX):
symbol = SX.sym
else:
raise Exception("Invalid type for f_expl! Possible types are 'SX' and 'MX'. Exiting.")
## set up functions to be exported
Sx = symbol('Sx', nx, nx)
Sp = symbol('Sp', nx, nu)
lambdaX = symbol('lambdaX', nx, 1)
fun_name = model_name + '_expl_ode_fun'
## Set up functions
expl_ode_fun = Function(fun_name, [x, u, p], [f_expl])
vdeX = jtimes(f_expl,x,Sx)
vdeP = jacobian(f_expl,u) + jtimes(f_expl,x,Sp)
fun_name = model_name + '_expl_vde_forw'
expl_vde_forw = Function(fun_name, [x, Sx, Sp, u, p], [f_expl, vdeX, vdeP])
adj = jtimes(f_expl, vertcat(x, u), lambdaX, True)
fun_name = model_name + '_expl_vde_adj'
expl_vde_adj = Function(fun_name, [x, lambdaX, u, p], [adj])
if generate_hess:
S_forw = vertcat(horzcat(Sx, Sp), horzcat(DM.zeros(nu,nx), DM.eye(nu)))
hess = mtimes(transpose(S_forw),jtimes(adj, vertcat(x,u), S_forw))
hess2 = []
for j in range(nx+nu):
for i in range(j,nx+nu):
hess2 = vertcat(hess2, hess[i,j])
fun_name = model_name + '_expl_ode_hess'
expl_ode_hess = Function(fun_name, [x, Sx, Sp, lambdaX, u, p], [adj, hess2])
## generate C code
if not os.path.exists(code_export_dir):
os.makedirs(code_export_dir)
cwd = os.getcwd()
os.chdir(code_export_dir)
model_dir = model_name + '_model'
if not os.path.exists(model_dir):
os.mkdir(model_dir)
model_dir_location = os.path.join('.', model_dir)
os.chdir(model_dir_location)
fun_name = model_name + '_expl_ode_fun'
expl_ode_fun.generate(fun_name, casadi_opts)
fun_name = model_name + '_expl_vde_forw'
expl_vde_forw.generate(fun_name, casadi_opts)
fun_name = model_name + '_expl_vde_adj'
expl_vde_adj.generate(fun_name, casadi_opts)
if generate_hess:
fun_name = model_name + '_expl_ode_hess'
expl_ode_hess.generate(fun_name, casadi_opts)
os.chdir(cwd)
return | [
567,
2629,
544,
1086,
9940
] |
def METHOD_NAME(self, dy_model, metrics_list, batch_data, config):
sentence_left_size = config.get("hyper_parameters.sentence_left_size")
sentence_right_size = config.get(
"hyper_parameters.sentence_right_size")
batch_size = config.get("runner.train_batch_size", 128)
inputs = self.create_feeds(batch_data, sentence_left_size,
sentence_right_size)
prediction = dy_model.forward(inputs)
loss = self.create_loss(prediction)
# update metrics
print_dict = {"loss": loss}
return loss, metrics_list, print_dict | [
849,
76
] |
def METHOD_NAME(self, masterid, name):
log.msg(f"doing housekeeping for master {masterid} {name}")
# common code for deactivating a master
yield self.master.data.rtypes.worker._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.builder._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.scheduler._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.changesource._masterDeactivated(
masterid=masterid)
# for each build running on that instance..
builds = yield self.master.data.get(('builds',),
filters=[resultspec.Filter('masterid', 'eq',
[masterid]),
resultspec.Filter('complete', 'eq', [False])])
for build in builds:
# stop any running steps..
steps = yield self.master.data.get(
('builds', build['buildid'], 'steps'),
filters=[resultspec.Filter('results', 'eq', [None])])
for step in steps:
# finish remaining logs for those steps..
logs = yield self.master.data.get(
('steps', step['stepid'], 'logs'),
filters=[resultspec.Filter('complete', 'eq',
[False])])
for _log in logs:
yield self.master.data.updates.finishLog(
logid=_log['logid'])
yield self.master.data.updates.finishStep(
stepid=step['stepid'], results=RETRY, hidden=False)
# then stop the build itself
yield self.master.data.updates.finishBuild(
buildid=build['buildid'], results=RETRY)
# unclaim all of the build requests owned by the deactivated instance
buildrequests = yield self.master.db.buildrequests.getBuildRequests(
complete=False, claimed=masterid)
yield self.master.db.buildrequests.unclaimBuildRequests(
brids=[br['buildrequestid'] for br in buildrequests]) | [
2614,
9238,
-1
] |
def METHOD_NAME(self):
"""get_input_molecule()
Return a |Molecule| instance with initial coordinates.
All data used by this method is taken from ``$JN.runkf`` file. The ``molecule`` attribute of the corresponding job is ignored.
"""
if ('History', 'nr of geometries') in self._kf:
ret = self.get_molecule(section='History', variable='xyz0', unit='bohr', internal=False)
lattice = self.readkf('History', 'lattice0')
lattice = Units.convert(lattice, 'bohr', 'angstrom')
lattice = [lattice[i:i+3] for i in range(0,len(lattice),3)]
while len(lattice) > 0 and not any(lattice[-1]):
lattice.pop()
ret.lattice = [tuple(i) for i in lattice]
return ret
return self.get_main_molecule() | [
19,
362,
6955
] |
def METHOD_NAME(self):
# TODO: Return only the allowed instruments for at least one contained
# analysis
bsc = getToolByName(self, 'senaite_catalog_setup')
items = [('', '')] + [(o.UID, o.Title) for o in
bsc(portal_type='Instrument',
is_active=True)]
o = self.context.getInstrument()
if o and o.UID() not in [i[0] for i in items]:
items.append((o.UID(), o.Title()))
items.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))
return DisplayList(list(items)) | [
19,
7180
] |
def METHOD_NAME(self, *_args):
OptionDialog(
parent=self,
title=_("Clear Wishlist?"),
message=_("Do you really want to clear your wishlist?"),
destructive_response_id="ok",
callback=self.clear_wishlist_response
).show() | [
69,
537,
8871
] |
async def METHOD_NAME(self, check_interval: float = 0.1) -> None:
"""Block execution until the client disconnects."""
if not self.has_socket_connection:
await self.connected()
self.is_waiting_for_disconnect = True
while self.id in globals.clients:
await asyncio.sleep(check_interval)
self.is_waiting_for_disconnect = False | [
7959
] |
def METHOD_NAME():
return [{"title": m} for m in MUNICIPALITIES] | [
9255,
385,
100
] |
def METHOD_NAME(constant, config):
if constant in config:
return config[constant]
else:
if constant not in config_aliases:
return None
else:
value = config_aliases[constant]
for v in value:
if v in config and config[v] != "":
return config[v]
return None | [
297,
99
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at") | [
679,
680,
1541
] |
async def METHOD_NAME(
self, colors: List[Color], transition_time_ms: int = 250
) -> bool:
"""Cycle through a list of colors, leaving the status bar on the last color."""
steps = [
ColorStep(
transition_type=LightTransitionType.linear,
transition_time_ms=transition_time_ms,
color=c,
)
for c in colors
]
return await self.start_animation(
steps=steps, type=LightAnimationType.single_shot
) | [
3351,
424
] |
def METHOD_NAME(self, url, *args, **kwargs):
url = f"{API_PATH}{url}"
return original_patch(self, url, *args, **kwargs) | [
14940,
1575
] |
def METHOD_NAME(self) -> str:
"""
Type of this resource.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(mocker, caplog):
"""
Verify that `mqttwarn.commands.run_mqttwarn` works as expected.
Here, a configuration file is obtained using the `--config-file` command line option.
"""
mocker.patch("sys.argv", ["mqttwarn-custom"])
mocker.patch("mqttwarn.commands.subscribe_forever")
mqttwarn.commands.run_mqttwarn(configfile="tests/etc/no-functions.ini")
assert caplog.messages == [
"Starting mqttwarn-custom",
"Log level is DEBUG",
] | [
9,
22,
13137,
41,
830,
280,
171
] |
def METHOD_NAME():
"""Test that line endings are correct"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write("# start\n")
src_f.write("# end\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# start\n" "# end\n"
assert generated_cmd == expected_cmd | [
9,
679,
534,
699
] |
def METHOD_NAME(self, mock_get_variants, mock_get_logger):
mock_get_variants.side_effect = lambda families, variant_ids, **kwargs: \
[{'variantId': variant_id, 'familyGuids': [family.guid for family in families]}
for variant_id in variant_ids]
mock_logger = mock_get_logger.return_value
# Test with a specific project and a family id.
call_command('reload_saved_variant_json',
PROJECT_NAME,
'--family-id={}'.format(FAMILY_ID))
family_1 = Family.objects.get(id=1)
mock_get_variants.assert_called_with(
[family_1], ['1-1562437-G-C', '1-46859832-G-A','21-3343353-GAGA-G'], user=None)
logger_info_calls = [
mock.call('Project: 1kg project n\xe5me with uni\xe7\xf8de'),
mock.call('Updated 3 variants for project 1kg project n\xe5me with uni\xe7\xf8de'),
mock.call('Done'),
mock.call('Summary: '),
mock.call(' 1kg project n\xe5me with uni\xe7\xf8de: Updated 3 variants')
]
mock_logger.info.assert_has_calls(logger_info_calls)
mock_get_variants.reset_mock()
mock_logger.reset_mock()
# Test for all projects and no specific family ids
call_command('reload_saved_variant_json')
self.assertEqual(mock_get_variants.call_count, 3)
family_2 = Family.objects.get(id=2)
mock_get_variants.assert_has_calls([
mock.call(
[family_1, family_2], ['1-1562437-G-C', '1-46859832-G-A', '12-48367227-TC-T', '21-3343353-GAGA-G'], user=None,
),
mock.call([Family.objects.get(id=12)], ['12-48367227-TC-T', 'prefix_19107_DEL'], user=None),
mock.call([Family.objects.get(id=14)], ['12-48367227-TC-T'], user=None)
], any_order=True)
logger_info_calls = [
mock.call('Project: 1kg project n\xe5me with uni\xe7\xf8de'),
mock.call('Updated 4 variants for project 1kg project n\xe5me with uni\xe7\xf8de'),
mock.call('Project: Empty Project'),
mock.call('Updated 0 variants for project Empty Project'),
mock.call('Project: Test Reprocessed Project'),
mock.call('Updated 2 variants for project Test Reprocessed Project'),
mock.call('Project: Non-Analyst Project'),
mock.call('Updated 1 variants for project Non-Analyst Project'),
mock.call('Done'),
mock.call('Summary: '),
mock.call(' 1kg project n\xe5me with uni\xe7\xf8de: Updated 4 variants'),
mock.call(' Test Reprocessed Project: Updated 2 variants'),
mock.call(' Non-Analyst Project: Updated 1 variants'),
]
mock_logger.info.assert_has_calls(logger_info_calls)
mock_get_variants.reset_mock()
mock_logger.reset_mock()
# Test with an exception.
mock_get_variants.side_effect = Exception("Database error.")
call_command('reload_saved_variant_json',
PROJECT_GUID,
'--family-id={}'.format(FAMILY_ID))
mock_get_variants.assert_called_with([family_1], ['1-1562437-G-C', '1-46859832-G-A', '21-3343353-GAGA-G'], user=None)
logger_info_calls = [
mock.call('Project: 1kg project n\xe5me with uni\xe7\xf8de'),
mock.call('Done'),
mock.call('Summary: '),
mock.call('1 failed projects'),
mock.call(' 1kg project n\xe5me with uni\xe7\xf8de: Database error.')
]
mock_logger.info.assert_has_calls(logger_info_calls)
mock_logger.error.assert_called_with('Error in project 1kg project n\xe5me with uni\xe7\xf8de: Database error.') | [
9,
41,
49,
462
] |
def METHOD_NAME(self, attribute, value):
"""
Notifies to the GUI that the IPCServer's status has changed
:param attribute: the attribute that has changed ("port", "running")
:type attribute: str
:param value: the value that the attribute has changed to
:type value: any
"""
if attribute == "port":
pass
elif attribute == "running":
if value: # server is running now
self.ui.serverStatus.setText("running: %s" % self.server_status["port"])
self.ui.serverStatus.setStyleSheet(self.status_style % "lime")
self.ui.toggleStatus.setText("Stop Server")
else:
self.ui.serverStatus.setText("shutdown")
self.ui.serverStatus.setStyleSheet(self.status_style % "red")
self.ui.toggleStatus.setText("Start Server")
self.change_port_action.setEnabled(not value)
self.ui.toggleStatus.setEnabled(True)
else:
logger.warning("'%s' is no valid server status attribute ")
return
self.server_status[attribute] = value | [
959,
163,
452,
86
] |
def METHOD_NAME(self, number_of_items):
list_to_use = self.session.db[self.name]
if number_of_items == 0 and self.session.settings["general"]["persist_size"] == 0: return
log.debug("The list contains %d items " % (self.buffer.list.get_count(),))
log.debug("Putting %d items on the list" % (number_of_items,))
safe = True
if self.session.settings["general"]["read_preferences_from_instance"]:
safe = self.session.expand_spoilers == False
if self.buffer.list.get_count() == 0:
for i in list_to_use:
post = self.compose_function(i.status, self.session.db, self.session.settings, self.session.settings["general"]["relative_times"], self.session.settings["general"]["show_screen_names"], safe=safe)
self.buffer.list.insert_item(False, *post)
self.buffer.set_position(self.session.settings["general"]["reverse_timelines"])
elif self.buffer.list.get_count() > 0 and number_of_items > 0:
if self.session.settings["general"]["reverse_timelines"] == False:
items = list_to_use[len(list_to_use)-number_of_items:]
for i in items:
post = self.compose_function(i.status, self.session.db, self.session.settings, self.session.settings["general"]["relative_times"], self.session.settings["general"]["show_screen_names"], safe=safe)
self.buffer.list.insert_item(False, *post)
else:
items = list_to_use[0:number_of_items]
items.reverse()
for i in items:
post = self.compose_function(i.status, self.session.db, self.session.settings, self.session.settings["general"]["relative_times"], self.session.settings["general"]["show_screen_names"], safe=safe)
self.buffer.list.insert_item(True, *post)
log.debug("Now the list contains %d items " % (self.buffer.list.get_count(),)) | [
1276,
1768,
69,
245
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.