text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(f, name_extra, config):
f.write(""" | [
77,
1944,
450
] |
def METHOD_NAME(buffer):
return np.frombuffer(buffer, dtype=np.int8).reshape((-1, 2), order="C") | [
321,
24,
4265
] |
def METHOD_NAME() -> List[str]:
return ["customer", "listing"] | [
19,
3403,
2253
] |
def METHOD_NAME(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (
hasattr(form, "cleaned_data")
and form.cleaned_data.get("category", None)
and not form.cleaned_data.get("DELETE", False)
):
num_categories += 1
return num_categories | [
19,
181,
2065
] |
def METHOD_NAME(self, session):
pass | [
69,
2333
] |
def METHOD_NAME(requests, yaml_file):
yaml_result = []
with open(yaml_file, 'a') as y_f:
id_ev = 0
for req, event in requests:
type_ev = event['data']['type']
stage_ev = type_ev.title()
mode = None
agent_id = callback_analysisd_agent_id(req) or '000'
del event['data']['mode']
del event['data']['type']
if 'tags' in event['data']:
del event['data']['tags']
if type_ev == 'added':
mode = 'save2'
output_ev = json.dumps(event['data'])
elif type_ev == 'deleted':
mode = 'delete'
output_ev = json.dumps(event['data']['path']).replace('"', '')
elif type_ev == 'modified':
mode = 'save2'
for field in ['old_attributes', 'changed_attributes', 'content_changes']:
if field in event['data']:
del event['data'][field]
output_ev = json.dumps(event['data'])
yaml_result.append({
'name': f"{stage_ev}{id_ev}",
'test_case': [
{
'input': f"{req}",
'output': f"agent {agent_id} syscheck {mode} {output_ev}",
'stage': f"{stage_ev}"
}
]
})
id_ev += 1
y_f.write(yaml.safe_dump(yaml_result)) | [
214,
239,
409,
406
] |
def METHOD_NAME():
global mol, mf_lda, mf_gga, nstates
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0.)], ]
mol.unit = 'B'
mol.basis = '631g'
mol.build()
mf_lda = dft.RKS(mol).set(xc='LDA,')
mf_lda.grids.prune = False
mf_lda.conv_tol = 1e-10
mf_lda.kernel()
mf_gga = dft.RKS(mol).set(xc='b88,')
mf_gga.grids.prune = False
mf_gga.conv_tol = 1e-10
mf_gga.kernel()
nstates = 5 # to ensure the first 3 TDSCF states are converged | [
0,
1,
298
] |
def METHOD_NAME(step, key, value):
assert check_key_value(world.responses[world.response_count].json(), key,
value) is False, 'The key {key} is in the response and has the value {value}. \
Response: {response}'.format(
key=key, value=value, response=world.responses[world.response_count]) | [
250,
983,
17,
220,
130,
983,
59
] |
def METHOD_NAME(self, addr, value, depth=0):
try:
value = self.dev.ctrl_transfer(bmRequestType=0x43, bRequest=0x00,
wValue=addr & 0xffff,
wIndex=(addr >> 16) & 0xffff,
data_or_wLength=bytes([(value >> 0) & 0xff,
(value >> 8) & 0xff,
(value >> 16) & 0xff,
(value >> 24) & 0xff]
), timeout=None)
except usb.core.USBError as e:
if e.errno == 13:
print("Access Denied. Maybe try using sudo?")
self.close()
self.open()
if depth < self.max_recursion_count:
return self.METHOD_NAME(addr, value, depth+1) | [
8323,
77
] |
def METHOD_NAME(vertical, horizontal):
top = int(vertical / 2)
bottom = vertical - top
right = int(horizontal / 2)
left = horizontal - right
return (top, right, bottom, left) | [
19,
712,
2459,
771
] |
def METHOD_NAME(self):
if "AWS_DEFAULT_REGION" not in os.environ:
# We need to provide a region to boto3 to avoid no region exception.
# Which region to provide is arbitrary.
os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
status_manager = JsonComputeFleetStatusManager("cluster-name")
return status_manager | [
226,
5187,
452,
722
] |
def METHOD_NAME(self):
self.check_filter(TrackerFilter,
filter_names=('status', 'st'),
items=({'id': 1, 'status': 'stopped'},
{'id': 2, 'status': 'idle'},
{'id': 3, 'status': 'queued'},
{'id': 4, 'status': 'announcing'},
{'id': 5, 'status': 'scraping'}),
test_cases=(('{name}', (1, 2, 3, 4, 5)),
('!{name}', ()),
('{name}=queued', (3,))))
with self.assertRaises(ValueError) as cm:
TrackerFilter('status=foo')
self.assertEqual(str(cm.exception), "Invalid value for filter 'status': 'foo'") | [
9,
452
] |
def METHOD_NAME(graphql_client, user, grant_factory):
graphql_client.force_login(user)
grant = grant_factory(user_id=user.id, status=Grant.Status.pending)
response = _send_grant_reply(graphql_client, grant, status="refused")
assert response["data"]["sendGrantReply"]["__typename"] == "SendGrantReplyError"
assert (
response["data"]["sendGrantReply"]["message"]
== "You cannot reply to this grant"
) | [
9,
21,
2286,
1922,
217,
452,
137
] |
def METHOD_NAME(key, value, depth):
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + inline_code + str(value) + inline_code + "\n"
return chain | [
56,
99,
357
] |
def METHOD_NAME(self, completion):
try:
sig = completion.get_signatures()
return self.callback(completion, sig)
except Exception as e: # pylint: disable=broad-except
log.warning(f'Something went wrong when resolving label for {completion}: {e}')
return self.resolve_on_error | [
1014
] |
def METHOD_NAME(
client: "opensearchpy.OpenSearch",
index: Optional[str] = "_all",
search_body: Optional[Dict[str, Any]] = None,
doc_type: Optional[str] = None,
is_scroll: Optional[bool] = False,
filter_path: Optional[Union[str, Collection[str]]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Return results matching query DSL as pandas DataFrame.
Parameters
----------
client : OpenSearch
instance of opensearchpy.OpenSearch to use.
index : str, optional
A comma-separated list of index names to search.
use `_all` or empty string to perform the operation on all indices.
search_body : Dict[str, Any], optional
The search definition using the `Query DSL <https://opensearch.org/docs/opensearch/query-dsl/full-text/>`_.
doc_type : str, optional
Name of the document type (for Elasticsearch versions 5.x and earlier).
is_scroll : bool, optional
Allows to retrieve a large numbers of results from a single search request using
`scroll <https://opensearch.org/docs/opensearch/rest-api/scroll/>`_
for example, for machine learning jobs.
Because scroll search contexts consume a lot of memory, we suggest you don’t use the scroll operation
for frequent user queries.
filter_path : Union[str, Collection[str]], optional
Use the filter_path parameter to reduce the size of the OpenSearch Service response \ | [
1070
] |
def METHOD_NAME(self):
session_factory = self.replay_flight_data('test_sfn_untag_resource')
p = self.load_policy(
{
'name': 'test-untag-sfn',
'resource': 'step-machine',
'actions': [
{
'type': 'remove-tag',
'tags': [
'test'
]
}
]
},
config={'account_id': '101010101111'},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('stepfunctions')
tags = client.list_tags_for_resource(resourceArn=resources[0]['stateMachineArn'])
self.assertTrue([t for t in tags['tags'] if t['key'] != 'test']) | [
9,
14981,
6911,
191
] |
def METHOD_NAME():
return app_common.METHOD_NAME() | [
1553,
377
] |
def METHOD_NAME(self, index):
if index.isValid():
return 0
return len(self.model) | [
843,
29
] |
def METHOD_NAME():
val = {'init': 'epsg:4326', 'no_defs': True}
assert crs.CRS.from_user_input(val).to_string() == "EPSG:4326" | [
9,
24,
144,
7396
] |
def METHOD_NAME(filesystem_id: str) -> werkzeug.Response:
"""deleting a single collection from its /col page"""
source = get_source(filesystem_id)
try:
delete_collection(filesystem_id)
except GpgKeyNotFoundError as e:
current_app.logger.error("error deleting collection: %s", e)
abort(500)
flash(
Markup(
"<b>{}</b> {}".format(
# Translators: Precedes a message confirming the success of an operation.
escape(gettext("Success!")),
escape(
gettext("The account and data for the source {} have been deleted.").format(
source.journalist_designation
)
),
)
),
"success",
)
return redirect(url_for("main.index")) | [
34,
97
] |
def METHOD_NAME(self) -> Tuple[torch.Tensor]:
result = None
with torch.no_grad():
for i, x in enumerate(self.x_l):
y_pred = self.xlmr.extract_features(x)
result = y_pred
return (result, | [
1171
] |
def METHOD_NAME(self):
f, outer, inner = self.make_frames()
outer_locals = outer.f_locals
self.assertIsInstance(outer_locals.pop('inner'), types.FunctionType)
self.assertEqual(outer_locals, {'x': 5, 'y': 6})
inner_locals = inner.f_locals
self.assertEqual(inner_locals, {'x': 5, 'z': 7}) | [
9,
2048
] |
def METHOD_NAME(cls):
searx.search.initialize(TEST_ENGINES) | [
0,
1,
2
] |
def METHOD_NAME(
expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool
) -> sympy.Basic:
e = expr
op = type(expr)
if isinstance(e, sympy.Rel):
# Move any constants in the left-hand side to the right-hand side.
lhs_not_thing = (
sum([a for a in e.lhs.args if not a.has(thing)])
if isinstance(e.lhs, sympy.Add)
else 0
)
e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined]
# Divide both sides by the factors that don't contain thing.
if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul):
lhs, rhs = e.args
other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)])
# If we can't tell whether 'other' is negative or positive, we do nothing.
# That is because we don't know whether we have mirror the operation or not.
if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None):
# Divide both sides by 'other'.
lhs = lhs / other
rhs = rhs / other
# If 'e' is an inequality and 'other' is negative, we have to
# mirror the expression.
if isinstance(e, INEQUALITY_TYPES) and other.is_negative:
op = mirror_rel_op(op) # type: ignore[assignment]
assert op is not None
e = op(lhs, rhs)
################################################################################
# left-hand side is FloorDiv
################################################################################
#
# Given the expression: a // b op c
# where 'op' is a relational operation, these rules only work if:
# - b > 0
# - c is an integer
if (
floordiv_inequality
and isinstance(e, sympy.Rel)
and isinstance(e.lhs, FloorDiv)
and e.lhs.divisor.is_positive
and e.rhs.is_integer
):
# a // b == expr
# => a >= (b * expr) and a < (b * (expr + 1))
if isinstance(expr, sympy.Eq):
numerator, denominator = e.lhs.args
return sympy.And(
sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
)
# a // b != expr
# => a < (b * expr) or a >= (b * (expr + 1))
if isinstance(expr, sympy.Ne):
numerator, denominator = e.lhs.args
return sympy.Or(
sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type]
sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type]
)
# The transformations below only work if b is positive.
# Note: we only have this information for constants.
# a // b > expr => a >= b * (expr + 1)
# a // b >= expr => a >= b * expr
if isinstance(expr, (sympy.Gt, sympy.Ge)):
quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type]
return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
# a // b < expr => a < b * expr
# a // b <= expr => a < b * (expr + 1)
if isinstance(expr, (sympy.Lt, sympy.Le)):
quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type]
return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type]
return e | [
1365,
10832,
7455
] |
def METHOD_NAME(self):
G = nx.Graph()
with pytest.raises(nx.NetworkXError):
nx.average_degree_connectivity(G, target="bogus")
with pytest.raises(nx.NetworkXError):
nx.average_degree_connectivity(G, source="bogus") | [
9,
532,
1930,
303
] |
def METHOD_NAME(managed_zone: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedZoneIamPolicyResult]:
"""
Retrieves the current IAM policy data for managedzone
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.dns.get_managed_zone_iam_policy(project=google_dns_managed_zone["default"]["project"],
managed_zone=google_dns_managed_zone["default"]["name"])
```
:param str managed_zone: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
... | [
19,
3627,
2456,
1694,
54,
146
] |
def METHOD_NAME(self, config):
return [PingEndpoints(self.matched.guest1.vlan0, self.matched.guest2.vlan0)] | [
567,
1677,
1197
] |
def METHOD_NAME(exc_type, exc_value, exc_tb):
enriched_tb = _add_missing_frames(exc_tb) if exc_tb else exc_tb
ExceptionDispatcher().add(exc_type, exc_value, enriched_tb) | [
6921
] |
def METHOD_NAME(self):
appstream_client = mock.MagicMock
appstream_client.fleets = []
with mock.patch(
"prowler.providers.aws.services.appstream.appstream_service.AppStream",
new=appstream_client,
):
# Test Check
from prowler.providers.aws.services.appstream.appstream_fleet_session_idle_disconnect_timeout.appstream_fleet_session_idle_disconnect_timeout import (
appstream_fleet_session_idle_disconnect_timeout,
)
check = appstream_fleet_session_idle_disconnect_timeout()
result = check.execute()
assert len(result) == 0 | [
9,
654,
-1
] |
def METHOD_NAME(self, key=None, reverse=False):
is_immutable(self) | [
266
] |
def METHOD_NAME(database_name: Optional[str] = None,
geo_backup_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGeoBackupPolicyResult:
"""
Gets a Geo backup policy for the given database resource.
:param str database_name: The name of the database.
:param str geo_backup_policy_name: The name of the Geo backup policy. This should always be 'Default'.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['geoBackupPolicyName'] = geo_backup_policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20230201preview:getGeoBackupPolicy', __args__, opts=opts, typ=GetGeoBackupPolicyResult).value
return AwaitableGetGeoBackupPolicyResult(
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
state=pulumi.get(__ret__, 'state'),
storage_type=pulumi.get(__ret__, 'storage_type'),
type=pulumi.get(__ret__, 'type')) | [
19,
6278,
1001,
54
] |
def METHOD_NAME(full_name):
module_name, unit_name = full_name.rsplit('.', 1)
mod = importlib.import_module(module_name)
return getattr(mod, unit_name) | [
512,
604,
144
] |
def METHOD_NAME(hcx_enterprise_site_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHcxEnterpriseSiteResult:
"""
An HCX Enterprise Site resource
:param str hcx_enterprise_site_name: Name of the HCX Enterprise Site in the private cloud
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['hcxEnterpriseSiteName'] = hcx_enterprise_site_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20220501:getHcxEnterpriseSite', __args__, opts=opts, typ=GetHcxEnterpriseSiteResult).value
return AwaitableGetHcxEnterpriseSiteResult(
activation_key=pulumi.get(__ret__, 'activation_key'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
status=pulumi.get(__ret__, 'status'),
type=pulumi.get(__ret__, 'type')) | [
19,
15790,
4108,
1055
] |
def METHOD_NAME():
sdfg = make_sdfg(False, 'nonsqueezed')
N = 20
M = 10
A = np.random.randint(0, 100, size=[N + 1, M]).astype(np.int64)
expected = np.copy(A)
expected[0:N - 2, 1:M] = np.reshape(np.arange(N - 2) + 99, (N - 2, 1))
expected[2:N, 0:M - 1] = np.reshape(np.arange(N - 2) + 101, (N - 2, 1))
sdfg(A=A, N=N, M=M)
assert np.allclose(A, expected)
# Check the propagated memlet out of the nested SDFG.
N = dace.symbolic.symbol('N')
j = dace.symbolic.symbol('j')
main_state = sdfg.nodes()[0]
out_memlet = main_state.edges()[1].data
assert out_memlet.volume == 2 * N - 4
assert out_memlet.dynamic == False
assert out_memlet.subset[0] == (0, N - 1, 1)
assert out_memlet.subset[1] == (j - 1, j, 1) | [
9,
13894,
654,
3822
] |
def METHOD_NAME():
selection_parser = ArgumentParser(add_help=False)
selection_parser.add_argument(
'selection', type=valid_entry, help='Entity ID or \'all\' to approve all pending entries'
)
filter_parser = ArgumentParser(add_help=False)
filter_parser.add_argument('--task-name', help='Filter by task name')
parser = options.register_command('pending', do_cli, help='View and manage pending entries')
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action')
list_parser = subparsers.add_parser(
'list', help='Shows all existing pending entries', parents=[table_parser, filter_parser]
)
list_group = list_parser.add_mutually_exclusive_group()
list_group.add_argument(
'--pending',
action='store_false',
help='Show only pending entries',
dest='approved',
default=None,
)
list_group.add_argument(
'--approved',
action='store_true',
help='Show only approved entries',
dest='approved',
default=None,
)
subparsers.add_parser('approve', help='Approve pending entries', parents=[selection_parser])
subparsers.add_parser('reject', help='Reject pending entries', parents=[selection_parser])
subparsers.add_parser('clear', help='Clear all pending entries', parents=[filter_parser]) | [
372,
1319,
134
] |
def METHOD_NAME(self):
return self.server.addr['ipaddr'] | [
1368
] |
def METHOD_NAME(self, train_split=0.6):
"""generate the train_list.txt and val_list.txt"""
txtname = [
join_paths(self.phase_path, 'train_list.txt'),
join_paths(self.phase_path, 'val_list.txt')
]
self.write_txt(txtname[0], self.train_image_files_npy,
self.train_label_files_npy)
self.write_txt(txtname[1], self.val_image_files_npy,
self.val_label_files_npy) | [
567,
310
] |
def METHOD_NAME():
ds = data_dir_load(magneticum, kwargs=mag_kwargs)
for test in sph_answer(ds, "snap_132", 3718111, mag_fields, center="max"):
METHOD_NAME.__name__ = test.description
yield test | [
9,
-1
] |
def METHOD_NAME(self) -> Any:
"""Get client.
Returns:
Any: DeepLake vectorstore dataset.
"""
return self.vectorstore.dataset | [
340
] |
def METHOD_NAME(self):
font = self.get_font(SpyderFontType.Monospace)
self.get_widget().set_font(font) | [
86,
2584
] |
def METHOD_NAME(client):
"""Tests getting the global tasks list."""
tasks = client.get_tasks()
assert len(tasks.results) >= 1 | [
9,
19,
620,
235
] |
def METHOD_NAME(
dir_path: str,
algo: str,
env: str,
repo_id: str,
rewards: list = None,
enjoy_name: str = None,
train_name: str = None,
):
readme_path = os.path.join(dir_path, "README.md")
repo_name = repo_id.split("/")[1]
readme = f""" | [
567,
578,
5427
] |
def METHOD_NAME(
x: AnyArray | None,
items: Sequence[Label] | Index,
major_axis: Sequence[Label] | Index,
minor_axis: Sequence[Label] | Index,
swap: bool = False,
) -> DataFrame:
"""
Construct a multiindex DataFrame using Panel-like arguments
Parameters
----------
x : ndarray
3-d array with size nite, nmajor, nminor
items : list-like
List like object with item labels
major_axis : list-like
List like object with major_axis labels
minor_axis : list-like
List like object with minor_axis labels
swap : bool
Swap is major and minor axes
Notes
-----
This function is equivalent to
Panel(x, items, major_axis, minor_axis).to_frame()
if `swap` is True, it is equivalent to
Panel(x, items, major_axis, minor_axis).swapaxes(1,2).to_frame()
"""
nmajor = np.arange(len(major_axis))
nminor = np.arange(len(minor_axis))
final_levels = [major_axis, minor_axis]
mi = MultiIndex.from_product([nmajor, nminor])
if x is not None:
shape = x.shape
x = x.reshape((shape[0], shape[1] * shape[2])).T
df = DataFrame(x, columns=Index(items), index=mi)
if swap:
df.index = mi.swaplevel()
df.sort_index(inplace=True)
final_levels = [minor_axis, major_axis]
mi_index = cast(MultiIndex, df.index)
df.index = mi_index.set_levels(levels=final_levels, level=[0, 1])
df.index.names = ["major", "minor"]
return df | [
519,
24,
896
] |
def METHOD_NAME(self, app):
super().METHOD_NAME(app)
with app.app_context():
self._check_default_provider() | [
176,
991
] |
def METHOD_NAME(self):
super(MemoryLeaksTestCase, self).METHOD_NAME()
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
os.environ["TEST_VAR"] = "foobar"
# There are always some uncollectable objects which are part of the runners which we
# are not interested in.
gc.collect()
self.base_gargage = self._garbage_to_set(gc.garbage) | [
0,
1
] |
def METHOD_NAME(engine_traits: EngineTraits):
"""Fetch locales & languages from dailymotion.
Locales fetched from `api/locales <https://api.dailymotion.com/locales>`_.
There are duplications in the locale codes returned from Dailymotion which
can be ignored::
en_EN --> en_GB, en_US
ar_AA --> ar_EG, ar_AE, ar_SA
The language list `api/languages <https://api.dailymotion.com/languages>`_
contains over 7000 *languages* codes (see PR1071_). We use only those
language codes that are used in the locales.
.. _PR1071: https://github.com/searxng/searxng/pull/1071
"""
resp = get('https://api.dailymotion.com/locales')
if not resp.ok: # type: ignore
print("ERROR: response from dailymotion/locales is not OK.")
for item in resp.json()['list']: # type: ignore
eng_tag = item['locale']
if eng_tag in ('en_EN', 'ar_AA'):
continue
try:
sxng_tag = region_tag(babel.Locale.parse(eng_tag))
except babel.UnknownLocaleError:
print("ERROR: item unknown --> %s" % item)
continue
conflict = engine_traits.regions.get(sxng_tag)
if conflict:
if conflict != eng_tag:
print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
continue
engine_traits.regions[sxng_tag] = eng_tag
locale_lang_list = [x.split('_')[0] for x in engine_traits.regions.values()]
resp = get('https://api.dailymotion.com/languages')
if not resp.ok: # type: ignore
print("ERROR: response from dailymotion/languages is not OK.")
for item in resp.json()['list']: # type: ignore
eng_tag = item['code']
if eng_tag in locale_lang_list:
sxng_tag = language_tag(babel.Locale.parse(eng_tag))
engine_traits.languages[sxng_tag] = eng_tag | [
1047,
4718
] |
def METHOD_NAME(msg):
if DEBUG:
print(f"DEBUG [{EXENAME}]: {msg}",file=sys.stderr) | [
290
] |
def METHOD_NAME(self):
"""
:return: path to jfr directory
"""
return os.path.join(self.persistent_root, "jfr") | [
-1,
1190
] |
def METHOD_NAME(bucket):
s3 = boto3.Session().resource('s3')
return s3.Bucket(bucket) | [
176,
2538
] |
def METHOD_NAME(self):
return "DELETE" | [
103
] |
def METHOD_NAME(body, headers):
"""Wrapper for a callback function for responses.add_callback"""
def request_callback(request):
if request.headers["content-type"] == "application/json":
assert json.loads(request.body) == body
else:
assert request.body == body
assert (request.headers[key] == headers[key] for key in headers)
return 200, {}, json.dumps({"proxy": True})
return request_callback | [
1162,
377,
2829
] |
def METHOD_NAME(f_of_X, f_of_Y):
loss = 0.0
delta = f_of_X - f_of_Y
#loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
loss = torch.mean(torch.mm(delta, torch.transpose(delta, 0, 1)))
#delta = f_of_X - f_of_Y
#loss = torch.mean((delta * delta).sum(1))
#print(loss)
return loss | [
1783,
7888
] |
def METHOD_NAME(depth = 3, maxparam = 0):
def recursion():
return METHOD_NAME(depth = depth-1, maxparam = maxparam)
if depth == 0:
if maxparam != 0 and random.randint(0, 1) != 0:
return 'p%02d' % random.randint(0, maxparam-1)
return random.choice([ '%e', '%f', '%g' ]) % random.uniform(-2, +2)
if random.randint(0, 4) == 0:
return recursion() + random.choice([ ' < ', ' <= ', ' == ', ' != ', ' >= ', ' > ' ]) + recursion() + ' ? ' + recursion() + ' : ' + recursion()
op_prefix = [ '+(', '-(' ]
op_infix = [ ' + ', ' - ', ' * ', ' / ' ]
op_func1 = [ '$ln', '$log10', '$exp', '$sqrt', '$floor', '$ceil', '$sin', '$cos', '$tan', '$asin', '$acos', '$atan', '$sinh', '$cosh', '$tanh', '$asinh', '$acosh', '$atanh' ]
op_func2 = [ '$pow', '$atan2', '$hypot' ]
op = random.choice(op_prefix + op_infix + op_func1 + op_func2)
if op in op_prefix:
return op + recursion() + ')'
if op in op_infix:
return '(' + recursion() + op + recursion() + ')'
if op in op_func1:
return op + '(' + recursion() + ')'
if op in op_func2:
return op + '(' + recursion() + ', ' + recursion() + ')'
raise | [
236,
1120
] |
def METHOD_NAME(i3):
data = i3.statusline()
assert data["blocks"] == []
assert data["suffix"] == "," | [
9,
35,
452,
534
] |
def METHOD_NAME(self, remote_cmd: str = "") -> bool:
"""Check if the dest_file already exists on the file system (return boolean)."""
return self._check_file_exists_unix(remote_cmd=remote_cmd) | [
250,
171,
954
] |
def METHOD_NAME(self):
self.manager.request_product('name')
assert self.manager.product_requests == ['product:name'] | [
9,
377,
1188
] |
def METHOD_NAME(self) -> str:
"""
A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/).
"""
return pulumi.get(self, "storage_container_path") | [
948,
224,
157
] |
f METHOD_NAME(self): | [
531,
481
] |
def METHOD_NAME(self, basis):
x = Image(randn(*basis.sz, seed=self.seed), dtype=basis.dtype)
# Express in an FB basis
v1 = basis.expand(x)
# Convert real FB coef to complex coef,
cv = basis.to_complex(v1)
# then convert back to real coef representation.
v2 = basis.to_real(cv)
# The round trip should be equivalent up to machine precision
assert np.allclose(v1, v2) | [
9,
2587,
-1
] |
def METHOD_NAME():
ursula._availability_tracker.start()
assert ursula._availability_tracker.score == 10
ursula._availability_tracker.record(False)
assert ursula._availability_tracker.score == 9.0
for i in range(7):
ursula._availability_tracker.record(True)
assert ursula._availability_tracker.score > 9.5 | [
599
] |
def METHOD_NAME(theme, index, color=None, title_filter=None):
"""Update coloring filter rule at index, on active theme"""
tfr = theme.TabFilterRules[index]
if color:
tfr.Brush = hex_to_brush(color)
if title_filter:
tfr.TitleFilter = Regex(title_filter) | [
86,
5678,
-1
] |
def METHOD_NAME(status, signal=True):
return MockMessage(is_signal=signal, body=("", {"PlaybackStatus": obj(status)}, [])) | [
2494,
452
] |
def METHOD_NAME(self, formula):
"""
Args
-----
formula : str
Returns
-----
Tuple[np.ndarray, np.ndarray]
"""
if self.analysis_version < 2:
cpyMSpec = cpyMSpec_0_3_5 # pylint: disable=invalid-name
else:
# noinspection PyPep8Naming
cpyMSpec = cpyMSpec_0_4_2 # pylint: disable=invalid-name
try:
iso_pattern = cpyMSpec.isotopePattern(str(formula))
iso_pattern.addCharge(int(self.charge))
fwhm = self.sigma * SIGMA_TO_FWHM
if self.analysis_version < 2:
resolving_power = iso_pattern.masses[0] / fwhm
instrument_model = cpyMSpec.InstrumentModel('tof', resolving_power)
else:
resolving_power = BASE_MZ / fwhm
instrument_model = cpyMSpec.InstrumentModel(
self.instrument.lower(), resolving_power, at_mz=BASE_MZ
)
centr = iso_pattern.centroids(instrument_model)
mzs_ = np.array(centr.masses)
ints_ = 100.0 * np.array(centr.intensities)
mzs_, ints_ = self._trim(mzs_, ints_, self.n_peaks)
n = len(mzs_)
mzs = np.zeros(self.n_peaks)
mzs[:n] = np.array(mzs_)
ints = np.zeros(self.n_peaks)
ints[:n] = ints_
return mzs, ints
except Exception as e:
logger.warning(f'{formula} - {e}')
return None, None | [
12532,
10507
] |
def METHOD_NAME(self):
"""
Whether to drop late spikes.
:rtype: bool
"""
return self.__drop_late_spikes | [
1050,
2890,
2238
] |
def METHOD_NAME(solc_binary_path) -> None:
solc_path = solc_binary_path("0.8.15")
slither = Slither(
Path(USING_FOR_TEST_DATA_DIR, "using-for-3-0.8.0.sol").as_posix(), solc=solc_path
)
contract_c = slither.get_contract_from_name("C")[0]
libCall = contract_c.get_function_from_full_name("libCall(uint256)")
for ir in libCall.all_slithir_operations():
if isinstance(ir, LibraryCall) and ir.destination == "Lib" and ir.function_name == "a":
return
assert False | [
9,
626,
43,
1635,
33,
1101,
156
] |
def METHOD_NAME(self, module, method, wrapped, instance, args, kwargs):
if "method" in kwargs:
method = kwargs["method"]
else:
method = args[0]
host = instance.host
if instance.port != default_ports.get(instance.scheme):
host += ":" + str(instance.port)
if "url" in kwargs:
url = kwargs["url"]
else:
url = args[1]
signature = method.upper() + " " + host
if url.startswith("/"):
url = "%s://%s%s" % (instance.scheme, host, url)
transaction = execution_context.get_transaction()
with capture_span(
signature,
span_type="external",
span_subtype="http",
extra={"http": {"url": url}},
leaf=True,
) as span:
# if urllib3 has been called in a leaf span, this span might be a DroppedSpan.
leaf_span = span
while isinstance(leaf_span, DroppedSpan):
leaf_span = leaf_span.parent
parent_id = leaf_span.id if leaf_span else transaction.id
trace_parent = transaction.trace_parent.copy_from(
span_id=parent_id, trace_options=TracingOptions(recorded=True)
)
args, kwargs = update_headers(args, kwargs, instance, transaction, trace_parent)
if leaf_span:
leaf_span.dist_tracing_propagated = True
response = wrapped(*args, **kwargs)
if response:
if span.context:
span.context["http"]["status_code"] = response.status
span.set_success() if response.status < 400 else span.set_failure()
return response | [
128
] |
def METHOD_NAME(self) -> Check_Report_AWS:
findings = []
for role in iam_client.roles:
if (
not role.is_service_role
): # Avoid service roles since they cannot be modified by the user
report = Check_Report_AWS(self.metadata())
report.region = iam_client.region
report.resource_arn = role.arn
report.resource_id = role.name
report.resource_tags = role.tags
report.status = "PASS"
report.status_extended = (
f"IAM Role {role.name} does not have ReadOnlyAccess policy."
)
for policy in role.attached_policies:
if policy["PolicyName"] == "ReadOnlyAccess":
report.status_extended = f"IAM Role {role.name} has read-only access but is not cross account."
cross_account_access = False
if isinstance(role.assume_role_policy["Statement"], list):
for statement in role.assume_role_policy["Statement"]:
if not cross_account_access:
if (
statement["Effect"] == "Allow"
and "AWS" in statement["Principal"]
):
if isinstance(
statement["Principal"]["AWS"], list
):
for aws_account in statement["Principal"][
"AWS"
]:
if (
iam_client.audited_account
not in aws_account
or "*" == aws_account
):
cross_account_access = True
break
else:
if (
iam_client.audited_account
not in statement["Principal"]["AWS"]
or "*" == statement["Principal"]["AWS"]
):
cross_account_access = True
else:
break
else:
statement = role.assume_role_policy["Statement"]
if (
statement["Effect"] == "Allow"
and "AWS" in statement["Principal"]
):
if isinstance(statement["Principal"]["AWS"], list):
for aws_account in statement["Principal"]["AWS"]:
if (
iam_client.audited_account
not in aws_account
or "*" == aws_account
):
cross_account_access = True
break
else:
if (
iam_client.audited_account
not in statement["Principal"]["AWS"]
or "*" == statement["Principal"]["AWS"]
):
cross_account_access = True
if cross_account_access:
report.status = "FAIL"
report.status_extended = f"IAM Role {role.name} gives cross account read-only access."
findings.append(report)
return findings | [
750
] |
def METHOD_NAME(self):
return cookies.get_sponsorship_selected_benefits(self.request) | [
19,
2471
] |
f METHOD_NAME(self, raz_requests, doAsUser=None): | [
250,
7969
] |
def METHOD_NAME(service_path, modify_info):
"""
Modify libvirtd or virtqemud service
:param service_path: service path
:param modify_info: service modify info
"""
ori_value = process.getoutput("cat %s | grep LimitNOFILE" % service_path, shell=True)
with open(service_path, 'r+') as f:
content = f.read()
content = re.sub(ori_value, modify_info, content)
f.seek(0)
f.write(content)
f.truncate() | [
2444,
162,
1687
] |
def METHOD_NAME(self, req, resp, worker, dataset):
# Make sure load balancers and other proxies do not cache this
resp.cache_control = cache_control
resp.set_header('Expires', expires)
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
if dataset:
dataset_path = self.store.get_dataset_path(dataset)
service = req.get_param('service', required=True)
if service == 'git-receive-pack' or service == 'git-upload-pack':
# Prefix is line length as hex followed by service name to start
if service == 'git-receive-pack':
prefix = b'001f# service=git-receive-pack\n0000'
elif service == 'git-upload-pack':
prefix = b'001e# service=git-upload-pack\n0000'
# git-receive-pack or git-upload-pack handle the other requests
with subprocess.Popen([service, '--advertise-refs', '--stateless-rpc', dataset_path], stdout=subprocess.PIPE) as process:
resp.content_type = 'application/x-{}-advertisement'.format(
service)
resp.body = prefix + process.stdout.read()
resp.status = falcon.HTTP_OK
else:
resp.status = falcon.HTTP_UNPROCESSABLE_ENTITY
else:
resp.status = falcon.HTTP_UNPROCESSABLE_ENTITY | [
69,
19
] |
def METHOD_NAME(self):
return "ODataV4Format" | [
168,
275
] |
def METHOD_NAME(self, csvfile):
outf=open(csvfile, 'wb')
outf.write(codecs.BOM_UTF8)
out = unicodecsv.writer(outf)
#Header
out.writerow([
"Department Name English / Nom du ministère en anglais",
"Department Name French / Nom du ministère en français",
"Openness report (score:count) / Rapport d'ouverture (score: compter)",
])
for k,v in self.reports.iteritems():
names = map(lambda x: x.strip(), k.split('|'))
line=[names[0], names[1], dict(v)]
out.writerow(line)
outf.close() | [
278
] |
def METHOD_NAME(self, class_p3m):
p3m = class_p3m(prefactor=2., accuracy=1e-3, mesh=16, cao=6, r_cut=7.5)
self.system.electrostatics.solver = p3m
skin = self.system.cell_system.tune_skin(
min_skin=0.0, max_skin=2.5, tol=0.05, int_steps=100)
print(f"Tuned skin: {skin}")
pressures_via_virial = []
num_samples = 25
pressure_via_volume_scaling = pressureViaVolumeScaling(
self.system, self.kT)
for _ in range(num_samples):
self.system.integrator.run(50)
pressures_via_virial.append(
self.system.analysis.pressure()['total'])
pressure_via_volume_scaling.measure_pressure_via_volume_scaling()
pressure_virial = np.mean(pressures_via_virial)
# deviation should be below 5%
abs_deviation_in_percent = 100 * abs(
pressure_virial / pressure_via_volume_scaling.get_result() - 1.0)
np.testing.assert_array_less(abs_deviation_in_percent, 5.0) | [
250,
17915,
4710
] |
METHOD_NAME( self ) : | [
9,
1300,
171
] |
def METHOD_NAME(string_operation, markers, stack):
def check_marker_format(marker):
"""
Checks if marker has proper suffix.
"""
for s in OperationFactory.__suffix:
if marker.endswith(s):
return
raise NotSupportedOperationException(
"Incorrect marker format {}, suffix is missing.".format(marker)
)
def check_pair_consistency(stack, marker):
"""
Checks if markers do not cross.
You can pop from stack only if end
marker match previous one.
Example OK:
MACRO1.BEGIN
MACRO2.BEGIN
MACRO2.END
MACRO1.END
Example NOT OK:
MACRO1.BEGIN
MACRO2.BEGIN
MACRO1.END
MACRO2.END
"""
top = stack[-1][0]
if top.endswith(OperationFactory.__suffix[0]):
top = top[: -len(OperationFactory.__suffix[0])]
if marker.endswith(OperationFactory.__suffix[-1]):
marker = marker[: -len(OperationFactory.__suffix[-1])]
if top != marker:
raise NotSupportedOperationException(
"Cannot cross markers: {0}, {1}".format(top, marker)
)
"""
Creates the object based on the pre-formatted string.
The string needs to be in the specific format. Each specific value
in the string has to be separated with a `;`. The first field
has to be the name of the operation, the rest are operation
specific values.
:param string_operation: The string describing the operation.
:param markers: The dict describing the pair marker-engine.
:param stack: The stack describing the order of engine changes.
:return: The specific object instantiated based on the string.
"""
id_ = string_operation.split(";")[0]
id_case_sensitive = id_.lower().capitalize()
# checks if id_ is one of memoryoperation classes
mem_ops = getattr(memoryoperations, id_case_sensitive, None)
# if class is not one of memoryoperations
# it means it can be user defined marker
if mem_ops is None:
check_marker_format(id_)
# if id_ is section BEGIN
if id_.endswith(OperationFactory.__suffix[0]):
# BEGIN defined by user
marker_name = id_.partition(".")[0]
if markers is not None and marker_name in markers:
engine = markers[marker_name]
try:
mem_ops = getattr(memoryoperations, engine)
except AttributeError:
raise NotSupportedOperationException(
"Not supported reorder engine: {}".format(engine)
)
# BEGIN but not defined by user
else:
mem_ops = stack[-1][1]
if issubclass(mem_ops, memoryoperations.ReorderBase):
stack.append((id_, mem_ops))
# END section
elif id_.endswith(OperationFactory.__suffix[-1]):
check_pair_consistency(stack, id_)
stack.pop()
mem_ops = stack[-1][1]
# here we have proper memory operation to perform,
# it can be Store, Fence, ReorderDefault etc.
id_ = mem_ops.__name__
if id_ not in OperationFactory.__factories:
OperationFactory.__factories[id_] = mem_ops.Factory()
return OperationFactory.__factories[id_].create(string_operation) | [
129,
2206
] |
def METHOD_NAME(lines, outfile):
outfile.write("\n_Default Value (complex option)_:\n\n....\n")
for line in lines:
outfile.write((" " * 4) + line)
outfile.write("....\n") | [
77,
2587,
1335
] |
def METHOD_NAME(transaction: sqlalchemy.engine.Connection) -> None:
"""Migrate to schema version 1.
This migration adds the following nullable columns to the run table:
- Column("state_summary, sqlalchemy.PickleType, nullable=True)
- Column("commands", sqlalchemy.PickleType, nullable=True)
- Column("engine_status", sqlalchemy.String, nullable=True)
- Column("_updated_at", sqlalchemy.DateTime, nullable=True)
"""
add_summary_column = sqlalchemy.text("ALTER TABLE run ADD state_summary BLOB")
add_commands_column = sqlalchemy.text("ALTER TABLE run ADD commands BLOB")
# NOTE: The column type of `STRING` here is mistaken. SQLite won't recognize it,
# so this column's type affinity will mistakenly default to `NUMERIC`.
# It should be `VARCHAR`, to match what SQLAlchemy uses when creating a new
# database from scratch. Fortunately, for this particular column, this inconsistency
# is harmless in practice because of SQLite's weak typing.
add_status_column = sqlalchemy.text("ALTER TABLE run ADD engine_status STRING")
add_updated_at_column = sqlalchemy.text("ALTER TABLE run ADD _updated_at DATETIME")
transaction.execute(add_summary_column)
transaction.execute(add_commands_column)
transaction.execute(add_status_column)
transaction.execute(add_updated_at_column) | [
2744,
1506,
24,
1170
] |
def METHOD_NAME(eta, u, v, h, f, nu, nonlin=True):
# evaluate equations
if nonlin:
depth = eta + h
else:
depth = h
div_hu = sympy.diff(depth*u, x) + sympy.diff(depth*v, y)
res_elev = sympy.diff(eta, t) + div_hu
u_x = sympy.diff(u, x)
u_y = sympy.diff(u, y)
v_x = sympy.diff(v, x)
v_y = sympy.diff(v, y)
if nonlin:
adv_u = u*u_x + v*u_y
adv_v = u*v_x + v*v_y
else:
adv_u = adv_v = 0
cori_u = -f*v
cori_v = f*u
pg_u = g*sympy.diff(eta, x)
pg_v = g*sympy.diff(eta, y)
visc_u = -(2*sympy.diff(nu*sympy.diff(u, x), x)
+ sympy.diff(nu*sympy.diff(u, y), y)
+ sympy.diff(nu*sympy.diff(v, x), y))
visc_v = -(2*sympy.diff(nu*sympy.diff(v, y), y)
+ sympy.diff(nu*sympy.diff(v, x), x)
+ sympy.diff(nu*sympy.diff(u, y), x))
visc_u += -sympy.diff(depth, x)/depth * nu * 2 * sympy.diff(u, x)
visc_v += -sympy.diff(depth, y)/depth * nu * 2 * sympy.diff(v, y)
res_u = sympy.diff(u, t) + adv_u + cori_u + pg_u + visc_u
res_v = sympy.diff(v, t) + adv_v + cori_v + pg_v + visc_v
return res_elev, res_u, res_v | [
1195,
1458,
3108
] |
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name") | [
156
] |
def METHOD_NAME(serialize):
initial_learning_rate = 0.1
maximal_learning_rate = 1
step_size = 4000
def scale_fn(x):
return 1 / (5 ** (x * 0.0001))
custom_cyclical_lr = cyclical_learning_rate.CyclicalLearningRate(
initial_learning_rate=initial_learning_rate,
maximal_learning_rate=maximal_learning_rate,
step_size=step_size,
scale_fn=scale_fn,
)
custom_cyclical_lr = _maybe_serialized(custom_cyclical_lr, serialize)
for step in range(1, 8001):
cycle = np.floor(1 + step / (2 * step_size))
non_bounded_value = np.abs(step / step_size - 2 * cycle + 1)
expected = initial_learning_rate + (
maximal_learning_rate - initial_learning_rate
) * np.maximum(0, 1 - non_bounded_value) * scale_fn(cycle)
np.testing.assert_allclose(
custom_cyclical_lr(step).numpy(), expected, 1e-6, 1e-6
) | [
9,
343,
7356,
4960,
1585
] |
def METHOD_NAME(self, targs: Iterable[TypedArg]) -> Sequence[Tuple[str, str]]:
'''[override] Post-process function call arguments values to
determine how to display them.
Args:
targs: an iterable of typed args (3-tuples: type, name, value)
Returns: a sequence of arguments (2-tuples: name, string representation of arg value)
'''
def fallback(v):
'''Use original processing method for other types.
'''
# the original method accepts a list and returns a list, so here we
# craft a list containing one 3-tuple, and extracting the single element
# the result list contains. that element is a 2-tuple, from which we
# only need the value
return super(QlOsUefi, self).METHOD_NAME([(None, '', v)])[0][1]
ahandlers: Mapping[Any, Callable[[Any], str]] = {
POINTER : lambda v: f'{v:#010x}' if v else 'NULL',
STRING : lambda v: QlOsUtils.stringify(v),
WSTRING : lambda v: f'L{QlOsUtils.stringify(v)}',
GUID : lambda v: guids_db.get(v.upper(), v) if v else 'NULL'
}
return tuple((aname, ahandlers.get(atype, fallback)(avalue)) for atype, aname, avalue in targs) | [
356,
17018,
434
] |
def METHOD_NAME(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, w, h), where (x, y)
is the center position, w the width, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
self.x = mean.T
self.P = covariance
std = [
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
]
innovation_cov = np.diag(np.square(std))
super().METHOD_NAME(measurement, R=innovation_cov)
return self.x.T, self.P | [
86
] |
def METHOD_NAME():
"""A serving input fn."""
text_features = tf.compat.v1.placeholder(dtype=tf.string, shape=[None])
return tf_estimator.export.ServingInputReceiver(
features={_TEXT_FEATURE_NAME: text_features},
receiver_tensors=text_features) | [
6565,
362,
667
] |
def METHOD_NAME(decomposed_cls):
op = HasKrausWhenDecomposed(decomposed_cls).on(cirq.NamedQubit('test'))
assert cirq.has_kraus(op)
assert not cirq.has_kraus(op, allow_decompose=False) | [
9,
220,
17129,
1646,
9978
] |
def METHOD_NAME(self):
return RandBin(RandNum(0, self.max_length() or 1200)) | [
-1
] |
def METHOD_NAME() -> None:
param = ng.p.Scalar(init=12.0)
v = utils.MultiValue(param, 4, reference=param)
np.testing.assert_equal(v.count, 1)
v.add_evaluation(3)
np.testing.assert_equal(v.count, 2)
np.testing.assert_equal(v.mean, 3.5)
np.testing.assert_equal(v.square, 12.5)
np.testing.assert_almost_equal(v.variance, 0.3536, decimal=4)
assert v.optimistic_confidence_bound < v.pessimistic_confidence_bound
assert v.get_estimation("optimistic") < v.get_estimation("pessimistic")
assert v.get_estimation("minimum") == 3
np.testing.assert_raises(NotImplementedError, v.get_estimation, "blublu")
repr(v)
assert v.parameter.value == 12 | [
9,
99,
61,
1669
] |
def METHOD_NAME(type_, *trans):
"""Return a new type transformed according to the given rules.
Applies each of the transformation rules in trans in order.
If an element of trans is a string, return it.
If an element of trans is a function, call it with type_ as its only
argument.
If an element of trans is a dict, search type_ in its keys. If type_ is
a key, use the value as a transformation rule for type_. Otherwise, if
None is a key use the value as a transformation rule for type_.
Otherwise, return type_.
Parameters
----------
type_ : str
Type to transform.
trans : list of function or dict
Type transformation rules.
"""
if len(trans) == 0:
raise ValueError
res = type_
for t in trans:
res = _transform_type(res, t)
return res | [
1053,
44
] |
def METHOD_NAME(config: ConvexConfig):
setup_good_responses(config)
destination = DestinationConvex()
logger = logging.getLogger("airbyte")
destination.check(logger, config) | [
9,
250
] |
def METHOD_NAME():
"""Get IP."""
result = subprocess.run([
'hostname',
'-i',
], capture_output=True, check=True)
ip = result.stdout.decode().strip()
print('Got cloudbuild host', ip)
return ip | [
19,
1213
] |
def METHOD_NAME(self):
G = nx.path_graph(4)
top_nodes = [0, 2]
X, Y = bipartite.sets(G, top_nodes)
assert X == {0, 2}
assert Y == {1, 3} | [
9,
3625,
2757,
1393,
1635,
480
] |
def METHOD_NAME(self) -> None: ... | [
1462
] |
def METHOD_NAME(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.METHOD_NAME(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.METHOD_NAME(key, node, None)
self.METHOD_NAME(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver() | [
183,
1716
] |
def METHOD_NAME(self, signal):
if signal == "hardware_object_name,stateChanged":
self.motor_state_changed()
elif signal == "position_changed":
self.motor_position_changed()
elif signal == "limitsChanged":
self.distance_min_changed()
self.set_is_ready(True) | [
707,
959
] |
def METHOD_NAME(no_warnings, url):
with pytest.raises(ModuleNotFoundError):
APIClient().get(url) | [
9,
3590,
9942,
298,
245,
512,
168
] |
def METHOD_NAME(self, message):
pass | [
921,
353
] |
def METHOD_NAME(v: Union[str, bytes]) -> bytes:
if isinstance(v, str):
v = v.encode('ascii')
return v | [
2825,
362
] |
def METHOD_NAME(filename):
from sfepy.discrete import Problem
problem = Problem.from_conf_file(filename,
init_equations=False, init_solvers=False)
return problem | [
129,
3095
] |
async def METHOD_NAME():
class Test(object):
def __init__(self):
self.in_call = False
self._my_lock = asyncio.Lock()
@synchronizedmethod("_my_lock")
async def sleep_for_1s(self):
assert self._my_lock.locked(), \
"Lock was not held during function execution!"
assert self.in_call is False, "Multiple concurrent executions!"
self.in_call = True
await asyncio.sleep(1)
self.in_call = False
a = Test()
b = Test()
# Calls to different instances should not block eachother
await asyncio.wait(
[asyncio.create_task(a.sleep_for_1s()) for _ in range(500)] +
[asyncio.create_task(b.sleep_for_1s()) for _ in range(500)],
timeout=502
) | [
9,
9599,
-1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.