text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
async def METHOD_NAME(QueryComponents):
with dependencies.temporary_query_components(QueryComponents()):
db = dependencies.provide_database_interface()
assert type(db.queries) == QueryComponents | [
9,
7072,
1153,
539,
811
] |
def METHOD_NAME(self, connection):
if connection is not None:
for channel in connection.channels.values():
channel.connection = None
try:
os.close(connection.fileno())
except (OSError, ValueError):
pass
connection.channels.clear()
connection.callbacks.clear()
self.client.drain_events = None
self.client = None | [
1444
] |
def METHOD_NAME(_conda_exe_type: str) -> PathLike:
kwargs = dict(
mamba=False,
micromamba=False,
conda=False,
METHOD_NAME=False,
)
if platform.system().lower() == "windows":
if _conda_exe_type == "micromamba":
pytest.skip(reason="micromamba tests are failing on windows")
kwargs[_conda_exe_type] = True
_conda_exe = _ensureconda(**kwargs)
if _conda_exe is not None:
return _conda_exe
pytest.skip(f"{_conda_exe_type} is not installed") | [
4542,
3198
] |
def METHOD_NAME(self) -> None:
parser = ArgumentParser(prog='chipsec_util iommu', usage=__doc__)
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list')
parser_list.set_defaults(func=self.iommu_list)
parser_config = subparsers.add_parser('config')
parser_config.add_argument('engine', type=str, default='', nargs='?', help='IOMMU Engine')
parser_config.set_defaults(func=self.iommu_config)
parser_status = subparsers.add_parser('status')
parser_status.add_argument('engine', type=str, default='', nargs='?', help='IOMMU Engine')
parser_status.set_defaults(func=self.iommu_status)
parser_enable = subparsers.add_parser('enable')
parser_enable.add_argument('engine', type=str, help='IOMMU Engine')
parser_enable.set_defaults(func=self.iommu_enable)
parser_disable = subparsers.add_parser('disable')
parser_disable.add_argument('engine', type=str, help='IOMMU Engine')
parser_disable.set_defaults(func=self.iommu_disable)
parser_pt = subparsers.add_parser('pt')
parser_pt.add_argument('engine', type=str, default='', nargs='?', help='IOMMU Engine')
parser_pt.set_defaults(func=self.iommu_pt)
parser.parse_args(self.argv, namespace=self) | [
214,
134
] |
def METHOD_NAME():
if request.config.getoption("--skip-fixture-teardown") == "no":
print("Clean up the Disable IPV6 Application:")
delete_common_app(kube_apis, "simple", test_namespace)
delete_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace)
delete_secret(kube_apis.v1, secret_name, test_namespace) | [
3221
] |
def METHOD_NAME(self):
self.overwrite = True
return S_OK() | [
0,
3345
] |
def METHOD_NAME(self):
##in range
self.assert_cmd('{"source": "lbl-pt1.es.net", "dest": "10.0.0.1", "ip-tos": 128}')
##out of range
self.assert_cmd('{"source": "lbl-pt1.es.net", "dest": "10.0.0.1", "ip-tos": "blah"}', expected_valid=False) | [
9,
1213,
3181
] |
def METHOD_NAME(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_head = SOLOHead(num_classes=4, in_channels=1)
cls_scores = torch.empty(0, 80)
mask_preds = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
cls_scores=cls_scores,
mask_preds=mask_preds,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0) | [
9,
2033,
373,
35,
1571
] |
def METHOD_NAME(pointer, expected):
assert resolve_pointer(DOCUMENT, pointer) == expected | [
9,
4347
] |
def METHOD_NAME(host):
"""Normalize a host string."""
if misc.IPv6_MATCHER.match(host):
percent = host.find("%")
if percent != -1:
percent_25 = host.find("%25")
# Replace RFC 4007 IPv6 Zone ID delimiter '%' with '%25'
# from RFC 6874. If the host is '[<IPv6 addr>%25]' then we
# assume RFC 4007 and normalize to '[<IPV6 addr>%2525]'
if (
percent_25 == -1
or percent < percent_25
or (percent == percent_25 and percent_25 == len(host) - 4)
):
host = host.replace("%", "%25", 1)
# Don't normalize the casing of the Zone ID
return host[:percent].lower() + host[percent:]
return host.lower() | [
1137,
1806
] |
def METHOD_NAME(self):
workspaces_client = mock.MagicMock
workspaces_client.workspaces = []
workspaces_client.workspaces.append(
WorkSpace(
id=workspace_id,
region=AWS_REGION,
user_volume_encryption_enabled=True,
root_volume_encryption_enabled=True,
subnet_id="subnet-12345678",
)
)
with mock.patch(
"prowler.providers.aws.services.workspaces.workspaces_service.WorkSpaces",
workspaces_client,
), mock.patch(
"prowler.providers.aws.services.workspaces.workspaces_client.workspaces_client",
workspaces_client,
):
from prowler.providers.aws.services.workspaces.workspaces_volume_encryption_enabled.workspaces_volume_encryption_enabled import (
workspaces_volume_encryption_enabled,
)
check = workspaces_volume_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert search(
"without root or user unencrypted volumes", result[0].status_extended
)
assert result[0].resource_id == workspace_id
assert result[0].resource_arn == "" | [
9,
5826,
9213
] |
def METHOD_NAME(exctype, exc, tb):
try:
from edb.common import markup
markup.dump(exc, file=sys.stderr)
if _is_internal_error(exc):
print(
f'This is most likely a bug in EdgeDB. '
f'Please consider opening an issue ticket '
f'at https://github.com/edgedb/edgedb/issues/new'
f'?template=bug_report.md'
)
except Exception as ex:
print('!!! exception in edb.excepthook !!!', file=sys.stderr)
# Attach the original exception as a context to top of the new chain,
# but only if it's not already there. Take some care to avoid looping
# forever.
visited = set()
parent = ex
while parent.__cause__ or (
not parent.__suppress_context__ and parent.__context__):
if (parent in visited or parent.__context__ is exc or
parent.__cause__ is exc):
break
visited.add(parent)
parent = parent.__cause__ or parent.__context__
parent.__context__ = exc
parent.__cause__ = None
_old_excepthook(type(ex), ex, ex.__traceback__) | [
6921
] |
def METHOD_NAME(self):
for y in range(self.h):
for x in range(self.w):
if self.get(x, y).edge_right.is_solid():
print(" |", end="")
elif self.get(x, y).edge_right.get_open_range():
print(" ", end="")
else:
print(" ?", end="")
print()
for x in range(self.w):
if self.get(x, y).edge_down.is_solid():
print("-+", end="")
elif self.get(x, y).edge_down.get_open_range():
print(" +", end="")
else:
print("?+", end="")
print()
print() | [
278
] |
def METHOD_NAME():
from desktop.conf import AUDIT_EVENT_LOG_DIR, AUDIT_LOG_MAX_FILE_SIZE
audit_logger = logging.getLogger('audit')
if not filter(lambda hclass: isinstance(hclass, AuditHandler), audit_logger.handlers): # Don't add handler twice
size, unit = int(AUDIT_LOG_MAX_FILE_SIZE.get()[:-2]), AUDIT_LOG_MAX_FILE_SIZE.get()[-2:]
maxBytes = size * 1024 ** (1 if unit == 'KB' else 2 if unit == 'MB' else 3)
audit_handler = AuditHandler(AUDIT_EVENT_LOG_DIR.get(), maxBytes=maxBytes, backupCount=50)
audit_handler.setFormatter(MessageOnlyFormatter())
audit_logger.addHandler(audit_handler)
return audit_logger | [
19,
1422,
2034
] |
def METHOD_NAME():
plat = platform.platform()
if platform.system() == "Linux":
try:
distro = platform.freedesktop_os_release()["PRETTY_NAME"]
except OSError:
# play it safe
distro = "unknown distribution"
return f"{plat} ({distro})"
else:
return plat | [
6213,
112
] |
def METHOD_NAME(self):
self.world = None
self.worldNP.remove_node() | [
950
] |
def METHOD_NAME(input_size):
"""Constructs a ir_se-152 model.
"""
model = Backbone(input_size, 152, 'ir_se')
return model | [
497,
3264,
1305,
1178,
12177
] |
def METHOD_NAME(self, level):
return super().METHOD_NAME(level) | [
0,
5167,
33
] |
def METHOD_NAME(self, sitecheck_logger, serialised_php, key, val):
sitecheck_logger.parse_serialised_php(serialised_php)
fetched_val = sitecheck_logger.prefixed_attr(key)
assert fetched_val == val | [
9,
1047,
6430,
59
] |
f METHOD_NAME(self, the_api, method, unused_resource): | [
1380,
103
] |
def METHOD_NAME():
nn = 64
max_threads = 4
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline"""
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, (n + 1) // 2) as i:
ib.emit(
outs[0].vstore(
i * 2, ins[0].vload(i * 2, "float32x2") + tvm.tir.const(1, "float32x2")
)
)
return ib.get()
def extern_generator_gpu(ins, outs):
"""Manually write the IR for the extern function, add pipeline"""
ib = tvm.tir.ir_builder.create()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", (nn + max_threads - 1) // max_threads)
ib.scope_attr(tx, "thread_extent", max_threads)
idx = bx.var * max_threads + tx.var
with ib.if_scope(ib.likely(idx < n)):
ib.emit(
outs[0].vstore(
idx * 2, ins[0].vload(idx * 2, "float32x2") + tvm.tir.const(1, "float32x2")
)
)
return ib.get()
C_cpu = te.extern(A.shape, [A], extern_generator, name="C")
C_gpu = te.extern(A.shape, [A], extern_generator_gpu, name="C")
s_cpu = te.create_schedule(C_cpu.op)
s_gpu = te.create_schedule(C_gpu.op)
print(tvm.lower(s_cpu, [A, C_cpu], simple_mode=True))
print(tvm.lower(s_gpu, [A, C_gpu], simple_mode=True))
def check_target(target):
if not tvm.testing.device_enabled(target):
return
s = s_gpu if target in ["opencl", "cuda"] else s_cpu
C = C_gpu if target in ["opencl", "cuda"] else C_cpu
# build and invoke the kernel.
f = tvm.build(s, [A, C], target)
dev = tvm.device(target, 0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_target("llvm")
check_target("opencl")
check_target("cuda") | [
9,
238,
1148
] |
def METHOD_NAME(coordinates, patch_get):
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Cone',
radius=2 * u.arcmin, get_query_payload=True)
assert response['radius'] == 2
assert response['radunits'] == 'arcmin'
response = Irsa.query_region_async(
coordinates, catalog='fp_psc', spatial='Cone', radius=2 * u.arcmin)
assert response is not None | [
9,
539,
1216,
5568,
958
] |
def METHOD_NAME(net, ppc, ppci, result_pm):
options = net._options
# status if result is from multiple grids
multinetwork = False
sol = result_pm["solution"]
ppci["obj"] = result_pm["objective"]
termination_status = str(result_pm["termination_status"])
ppci["success"] = "LOCALLY_SOLVED" in termination_status or "OPTIMAL" in termination_status
ppci["et"] = result_pm["solve_time"]
ppci["f"] = result_pm["objective"]
if "multinetwork" in sol and sol["multinetwork"]:
multinetwork = True
ppc["obj"] = ppci["obj"]
ppc["success"] = ppci["success"]
ppc["et"] = ppci["et"]
ppc["f"] = ppci["f"]
result = {}
for tp, soli in sol["nw"].items():
pm_results_to_ppc_results_one_time_step(ppci, soli)
result[str(int(tp)-1)] = deepcopy(_copy_results_ppci_to_ppc(ppci, ppc, options["mode"]))
else:
if "bus" not in sol:
ppci["success"] = False # PowerModels failed
else:
pm_results_to_ppc_results_one_time_step(ppci, sol)
result = _copy_results_ppci_to_ppc(ppci, ppc, options["mode"])
return result, multinetwork | [
7100,
51,
24,
5171,
51
] |
def METHOD_NAME(plugin):
plugins = set(config['ckan.plugins'].strip().split())
plugins.update(plugin.strip().split())
config['ckan.plugins'] = ' '.join(plugins) | [
557,
2793
] |
def METHOD_NAME(cmd):
"""
When running command under sudoer, or the current user is not root,
wrap crm cluster join command with '<user>@', and for the -N option, too
"""
sudoer = userdir.get_sudoer()
current_user = userdir.getuser()
if sudoer:
user = sudoer
elif current_user != 'root':
user = current_user
else:
return cmd
if re.search('cluster (:?join|geo_join|geo_init_arbitrator)', cmd) and "@" not in cmd:
cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
elif "cluster init" in cmd and ("-N" in cmd or "--qnetd-hostname" in cmd) and "@" not in cmd:
cmd = re.sub(r'''((?:-c|-N|--qnetd-hostname|--cluster-node)(?:\s+|=)['"]?)(\S{2,}['"]?)''', f'\\1{user}@\\2', cmd)
elif "cluster init" in cmd and "--node" in cmd and "@" not in cmd:
search_patt = r"--node [\'\"](.*)[\'\"]"
res = re.search(search_patt, cmd)
if res:
node_str = ' '.join([f"{user}@{n}" for n in res.group(1).split()])
cmd = re.sub(search_patt, f"--node '{node_str}'", cmd)
return cmd | [
503,
1660,
256,
1563
] |
def METHOD_NAME(name: str) -> str:
if name == 't':
return 'Time'
return f"Values[{name}]" | [
1112,
-1,
156
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag") | [
431
] |
def METHOD_NAME(self):
# Slice src so result collapses to a scalar.
src_cube = self.src_cube[:, 1, :]
# Regrid to a single cell with no overlap with masked src cells.
grid_cube = self.grid_cube[2, 1, 3]
res = regrid(src_cube, grid_cube, mdtol=0.8)
self.assertFalse(ma.isMaskedArray(res.data)) | [
9,
1997,
654,
2820
] |
async def METHOD_NAME(route: str, page_state: str):
printd("doComputeInitialState called")
try:
app_dir = paths["app_dir"]
out = await call_compute(app_dir, route, page_state)
return True, out
except Exception:
print("except", traceback.print_exc())
return False, b'' | [
74,
226,
2471,
551
] |
def METHOD_NAME(pt_op):
"""Test whether the logprob for ```pt.max``` for unsupported axis is correctly rejected"""
x = pt.random.normal(0, 1, size=(3, 3))
x.name = "x"
x_m = pt_op(x, axis=-1)
x_m_value = pt.vector("x_value")
with pytest.raises(RuntimeError, match=re.escape("Logprob method not implemented")):
x_max_logprob = logp(x_m, x_m_value) | [
9,
256,
-1,
2227
] |
def METHOD_NAME(availability_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAvailabilitySetResult]:
"""
Implements AvailabilitySet GET method.
:param str availability_set_name: Name of the AvailabilitySet.
:param str resource_group_name: The name of the resource group.
"""
... | [
19,
6477,
0,
146
] |
def METHOD_NAME(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# REQUIRED. Value MUST be set to "refresh_token".
if request.grant_type != 'refresh_token':
raise errors.UnsupportedGrantTypeError(request=request)
for validator in self.custom_validators.pre_token:
validator(request)
if request.refresh_token is None:
raise errors.InvalidRequestError(
description='Missing refresh token parameter.',
request=request)
# Because refresh tokens are typically long-lasting credentials used to
# request additional access tokens, the refresh token is bound to the
# client to which it was issued. If the client type is confidential or
# the client was issued client credentials (or assigned other
# authentication requirements), the client MUST authenticate with the
# authorization server as described in Section 3.2.1.
# https://tools.ietf.org/html/rfc6749#section-3.2.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The refresh token issued to the client.
log.debug('Validating refresh token %s for client %r.',
request.refresh_token, request.client)
if not self.request_validator.validate_refresh_token(
request.refresh_token, request.client, request):
log.debug('Invalid refresh token, %s, for client %r.',
request.refresh_token, request.client)
raise errors.InvalidGrantError(request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.refresh_token, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all((s in original_scopes for s in request.scopes))
and not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
for validator in self.custom_validators.post_token:
validator(request) | [
187,
466,
377
] |
def METHOD_NAME(mocker):
import flask
from flask import Blueprint
from app import init_app
from app.authentication.auth import AuthError
from app.v2.errors import BadRequestError, JobIncompleteError, TooManyRequestsError
app = flask.Flask(__name__)
app.config["TESTING"] = True
init_app(app)
from app import statsd_client
statsd_client.init_app(app)
from app.v2.errors import register_errors
blue = Blueprint("v2_under_test", __name__, url_prefix="/v2/under_test")
@blue.route("/raise_auth_error", methods=["GET"])
def raising_auth_error():
raise AuthError("some message", 403)
@blue.route("/raise_bad_request", methods=["GET"])
def raising_bad_request():
raise BadRequestError(message="you forgot the thing")
@blue.route("/raise_too_many_requests", methods=["GET"])
def raising_too_many_requests():
raise TooManyRequestsError(sending_limit="452")
@blue.route("/raise_validation_error", methods=["GET"])
def raising_validation_error():
from app.schema_validation import validate
from app.v2.notifications.notification_schemas import post_sms_request
validate({"template_id": "bad_uuid"}, post_sms_request)
@blue.route("raise_data_error", methods=["GET"])
def raising_data_error():
raise DataError("There was a db problem", "params", "orig")
@blue.route("raise_job_incomplete_error", methods=["GET"])
def raising_job_incomplete_error():
raise JobIncompleteError("Raising job incomplete error")
@blue.route("raise_exception", methods=["GET"])
def raising_exception():
raise AssertionError("Raising any old exception")
register_errors(blue)
app.register_blueprint(blue)
return app | [
991,
43,
9
] |
def METHOD_NAME(self):
"""
check that newspace_dims is equal to the (sorted) concatenation of dim over newspaces with this level and weight
"""
# TIME about 5s
return self.check_crosstable_aggregate('mf_newspaces', 'newspace_dims', ['level', 'weight'], 'dim', sort=['char_orbit_index']) | [
250,
13352,
-1,
2904
] |
def METHOD_NAME(self,
tgt,
memory,
cache):
mask = subsequent_mask(tgt.size(-1)).unsqueeze(0) # (B, T)
x = self.embed(tgt)
mask = self.prepare_mask(mask)
new_cache = []
for c, decoder in zip(cache, self.model.decoders):
x, mask = decoder(x, mask, memory, None, c)
new_cache.append(x)
x = x[:, 1:, :]
if self.model.normalize_before:
y = self.model.after_norm(x[:, -1])
else:
y = x[:, -1]
if self.model.output_layer is not None:
y = torch.log_softmax(self.model.output_layer(y), dim=-1)
return y, new_cache | [
76
] |
def METHOD_NAME(context, shift: Shift, fill_parent=False):
context["shift"] = shift_to_block_object(shift, fill_parent)
return context | [
929,
573
] |
def METHOD_NAME():
client_grpc = weaviate.Client(
"http://localhost:8080",
additional_config=Config(grpc_port_experimental=50051),
)
client_grpc.schema.delete_all()
client_grpc.schema.create_class(CLASS1)
client_grpc.data_object.create({"test": "test"}, "Test", vector=VECTOR)
client_gql = weaviate.Client("http://localhost:8080")
results = []
for client in [client_gql, client_grpc]:
query = client.query.get("Test").with_additional(
weaviate.AdditionalProperties(
uuid=True,
vector=True,
creationTimeUnix=True,
lastUpdateTimeUnix=True,
distance=True,
)
)
result = query.do()
assert "Test" in result["data"]["Get"]
results.append(result)
result_gql = results[0]["data"]["Get"]["Test"][0]["_additional"]
result_grpc = results[1]["data"]["Get"]["Test"][0]["_additional"]
assert sorted(result_gql.keys()) == sorted(result_grpc.keys())
for key in result_gql.keys():
assert result_gql[key] == result_grpc[key] | [
9,
2900
] |
def METHOD_NAME(self):
"""
Ensures that duplicate bookmarks cannot be created but that the client
receives the data they would have if it were. We expect it to return 200
because the request is ultimately OK - but we didn't create anything.
"""
self.client.post("/api/bookmarks/bookmarks/", self.user1_data, format="json")
duplicate_response = self.client.post(
"/api/bookmarks/bookmarks/", self.user1_data, format="json"
)
self.assertEqual(duplicate_response.status_code, HTTP_400_BAD_REQUEST) | [
9,
129,
1119,
11283
] |
def METHOD_NAME(self, addr: _Address | bytes) -> int: ... | [
707,
2258
] |
def METHOD_NAME(app, name, obj, options, bases):
bases[:] = [None.__class__ if x.__name__ == "pybind11_object" else x for x in bases] | [
527,
7346
] |
def METHOD_NAME(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_raw_transaction(self.nodes[0], coinbase_txids[1], node1_address, amount=49.99)
spend_102_raw = create_raw_transaction(self.nodes[0], coinbase_txids[2], node0_address, amount=49.99)
spend_103_raw = create_raw_transaction(self.nodes[0], coinbase_txids[3], node0_address, amount=49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26, 'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_raw_transaction(self.nodes[0], spend_102_id, node1_address, amount=49.98)
spend_103_1_raw = create_raw_transaction(self.nodes[0], spend_103_id, node1_address, amount=49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set()) | [
22,
9
] |
def METHOD_NAME(self, headers):
server_version = headers.get("server", "")
match = VERSION_REGEX.match(server_version)
if match is None:
self.log.debug("Lighttpd server version is Unknown")
return None, "Unknown"
full_version = match.group(1)
server_version = int(match.group(2))
self.log.debug("Lighttpd server version is %s", server_version)
return full_version, server_version | [
19,
163,
281
] |
def METHOD_NAME(
self, *, key_id: str, key_server: str = "keyserver.ubuntu.com"
) -> None:
"""Install key from specified key server.
:param key_id: Key ID to install.
:param key_server: Key server to query.
:raises: AptGPGKeyInstallError if unable to install key.
"""
env = dict()
env["LANG"] = "C.UTF-8"
cmd = [
"sudo",
"apt-key",
"--keyring",
str(self._gpg_keyring),
"adv",
"--keyserver",
key_server,
"--recv-keys",
key_id,
]
try:
logger.debug(f"Executing: {cmd!r}")
subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
env=env,
)
except subprocess.CalledProcessError as error:
raise errors.AptGPGKeyInstallError(
output=error.output.decode(), key_id=key_id, key_server=key_server
) | [
428,
59,
280,
-1
] |
def METHOD_NAME(__handle: int, __exitCode: int) -> None: ... | [
1602,
356
] |
def METHOD_NAME(ndata: int, header_ints: list[int], size: int):
unused_table_id, nnodes_nbytes, nwords, *other = header_ints
nvalues = ndata // 4
assert nvalues > 0
assert ndata % 4 == 0
try:
# assume nodes
nnodes = nnodes_nbytes
numwide = nvalues // nnodes
assert nvalues % nnodes == 0
if size == 4:
assert numwide in [7, 10], numwide
else:
assert numwide == 14, numwide
except AssertionError:
# calculate the bytes
if size == 4:
numwide = 7
elif numwide == 8:
numwide = 14
nnodes = nvalues // numwide
assert ndata == nnodes * numwide * 4
return nnodes, numwide | [
19,
-1,
-1
] |
def METHOD_NAME(size_n, d, which_prior):
e = np.sqrt( (1-np.exp(-0.8))/2.88)
if which_prior == 1 :
return uniform(0, 1, size=(size_n, d))
elif which_prior == 2 :
return uniform(0, e, size=(size_n, d))
elif which_prior == 3 :
return uniform(e, 1, size=(size_n, d)) | [
734,
7404
] |
def METHOD_NAME(self):
return True | [
1489,
2264
] |
def METHOD_NAME(t: torch.Tensor):
assert len(t.shape) <= len(batch[self.adv_key].shape)
return t.view(
t.shape + ((1,) * (len(batch[self.adv_key].shape) - len(t.shape)))
) | [
238,
4812,
2904
] |
def METHOD_NAME(inp, oup, stride):
return nn.Sequential(
OrderedDict([
("0", Conv2d_tf(inp, oup, 3, stride, padding="SAME", bias=False)),
("0/BatchNorm", BiasAdd(oup)),
("0/ReLU", nn.ReLU6(inplace=True)),
])) | [
1306,
3849
] |
def METHOD_NAME(self, loglevel):
if self.zigbee_communication == "zigpy":
self.log.logging( "zigateCommand", "Debug","zigate_set_loglevel %s not implemennted in zigpy" %loglevel)
return
self.log.logging( "zigateCommand", "Debug","zigate_set_loglevel %s" %loglevel) | [
3766,
0,
12453
] |
def METHOD_NAME(evt: Event, var: GameState):
GUARDED.clear()
LASTGUARDED.clear()
PASSED.clear() | [
69,
656
] |
def METHOD_NAME(next, token):
name = token[1]
token = next()
if token[0] != '(':
raise ValueError("Expected '(' after function name '%s'" % name)
predicate = handle_predicate(next, token)
return name, predicate | [
214,
717
] |
def METHOD_NAME(gateway, account, error):
print('\033[31m {} -> {} 原因: {} \033[0m\n'.format(gateway, account, error))
gateway.set_connectivity(Connectivity.ERR)
if not account:
return
account.set_connectivity(Connectivity.ERR) | [
69,
1806,
168
] |
f METHOD_NAME(s): | [
-1
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Specifies the state of the resource as it is getting provisioned. Value of "Succeeded" means the Manager was successfully created.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.')
parser.add_argument('--data_path', default='./data/timit', type=str, help='Path to raw TIMIT dataset')
parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False)
parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False)
parser.add_argument('--delta', default=True, type=boolean_string, help='Append Delta', required=False)
parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False)
parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False)
parser.add_argument('--n_jobs', default=-1, type=int, help='Number of jobs used for feature extraction', required=False)
parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False)
args = parser.parse_args()
return args | [
19,
666,
335
] |
def METHOD_NAME(self):
np.random.seed(seed=42)
num_part = 200
positions = np.random.random((num_part, 3))
dipoles = tests_common.random_dipoles(num_part)
self.system.part.add(pos=positions * self.system.box_l,
dip=dipoles, rotation=num_part * [3 * [True]])
self.minimize() | [
238,
6919,
5682
] |
def METHOD_NAME():
ind1 = {
"created": "2017-01-27T13:49:53.935Z",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"labels": [
"malicious-activity",
],
"modified": "2017-01-27T13:49:53.935Z",
"name": "Malicious site hosting downloader",
"pattern": "[url:value = 'http://x4z9arb.cn/4712']",
"type": "indicator",
"valid_from": "2017-01-27T13:49:53.935382Z",
}
ind2 = {
"created": "2017-01-27T13:49:53.935Z",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"labels": [
"malicious-activity",
],
"modified": "2017-01-27T13:49:53.935Z",
"name": "Malicious site hosting downloader",
"pattern": "[url:value = 'http://x4z9arb.cn/4712']",
"type": "indicator",
"valid_from": "2017-01-27T13:49:53.935382Z",
}
ind3 = {
"created": "2017-01-27T13:49:53.935Z",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"labels": [
"malicious-activity",
],
"modified": "2017-01-27T13:49:53.936Z",
"name": "Malicious site hosting downloader",
"pattern": "[url:value = 'http://x4z9arb.cn/4712']",
"type": "indicator",
"valid_from": "2017-01-27T13:49:53.935382Z",
}
ind4 = {
"created": "2017-01-27T13:49:53.935Z",
"id": "indicator--00000000-0000-4000-8000-000000000002",
"labels": [
"malicious-activity",
],
"modified": "2017-01-27T13:49:53.935Z",
"name": "Malicious site hosting downloader",
"pattern": "[url:value = 'http://x4z9arb.cn/4712']",
"type": "indicator",
"valid_from": "2017-01-27T13:49:53.935382Z",
}
ind5 = {
"created": "2017-01-27T13:49:53.935Z",
"id": "indicator--00000000-0000-4000-8000-000000000002",
"labels": [
"malicious-activity",
],
"modified": "2017-01-27T13:49:53.935Z",
"name": "Malicious site hosting downloader",
"pattern": "[url:value = 'http://x4z9arb.cn/4712']",
"type": "indicator",
"valid_from": "2017-01-27T13:49:53.935382Z",
}
return [ind1, ind2, ind3, ind4, ind5] | [
7460,
15664
] |
def METHOD_NAME(self, url):
response = self.http.get(url)
data = response.json()
status_code = response.status_code
return data, status_code | [
1444,
17
] |
def METHOD_NAME(logged_in_users_only=False):
"""
List information about the sessions.
.. versionadded:: 2016.11.0
:param logged_in_users_only: If True, only return sessions with users logged in.
:return: A list containing dictionaries of session information.
CLI Example:
.. code-block:: bash
salt '*' rdp.list_sessions
"""
ret = list()
server = win32ts.WTS_CURRENT_SERVER_HANDLE
protocols = {
win32ts.WTS_PROTOCOL_TYPE_CONSOLE: "console",
win32ts.WTS_PROTOCOL_TYPE_ICA: "citrix",
win32ts.WTS_PROTOCOL_TYPE_RDP: "rdp",
}
statuses = {
win32ts.WTSActive: "active",
win32ts.WTSConnected: "connected",
win32ts.WTSConnectQuery: "connect_query",
win32ts.WTSShadow: "shadow",
win32ts.WTSDisconnected: "disconnected",
win32ts.WTSIdle: "idle",
win32ts.WTSListen: "listen",
win32ts.WTSReset: "reset",
win32ts.WTSDown: "down",
win32ts.WTSInit: "init",
}
for session in win32ts.WTSEnumerateSessions(server):
user = (
win32ts.WTSQuerySessionInformation(
server, session["SessionId"], win32ts.WTSUserName
)
or None
)
protocol_id = win32ts.WTSQuerySessionInformation(
server, session["SessionId"], win32ts.WTSClientProtocolType
)
status_id = win32ts.WTSQuerySessionInformation(
server, session["SessionId"], win32ts.WTSConnectState
)
protocol = protocols.get(protocol_id, "unknown")
connection_status = statuses.get(status_id, "unknown")
station = session["WinStationName"] or "Disconnected"
connection_info = {
"connection_status": connection_status,
"protocol": protocol,
"session_id": session["SessionId"],
"station": station,
"user": user,
}
if logged_in_users_only:
if user:
ret.append(connection_info)
else:
ret.append(connection_info)
if not ret:
_LOG.warning("No sessions found.")
return sorted(ret, key=lambda k: k["session_id"]) | [
245,
5887
] |
def METHOD_NAME(self, scraper):
raw = scraper.request(self.feed_url).decode('unicode-escape')
data = json.loads(raw)
return list(map(BixiStation.from_json_xml, data)) | [
19,
763,
399,
5739
] |
def METHOD_NAME(self, locator: Union[WebElement, str]):
"""Sets frame identified by ``locator`` as the current frame.
See the `Locating elements` section for details about the locator
syntax.
Works both with frames and iframes. Use `Unselect Frame` to cancel
the frame selection and return to the main frame.
Example:
| `Select Frame` | top-frame | # Select frame with id or name 'top-frame' |
| `Click Link` | example | # Click link 'example' in the selected frame |
| `Unselect Frame` | | # Back to main frame. |
| `Select Frame` | //iframe[@name='xxx'] | # Select frame using xpath |
"""
self.info(f"Selecting frame '{locator}'.")
element = self.find_element(locator)
self.driver.switch_to.frame(element) | [
1472,
896
] |
def METHOD_NAME(self, filter_cls):
if filter_cls.name not in self._filters:
self._filters[filter_cls.name] = filter_cls | [
238
] |
def METHOD_NAME(self) -> PILImage.Image:
"""Return a handle to our PIL image.
Returns:
PIL.Image.Image
PIL image representation of this data model object.
"""
return self._image_data | [
947,
1218
] |
def METHOD_NAME(self) -> int:
return pulumi.get(self, "retained_release_count") | [
12602,
586,
29
] |
def METHOD_NAME(self):
super().METHOD_NAME()
# Patch in a memory-based user service instead of using the persistent version
course_tag_api = MemoryCourseTagAPI()
self.user_service_patcher = patch(
'openedx.core.djangoapps.user_api.partition_schemes.course_tag_api', course_tag_api
)
self.user_service_patcher.start()
self.addCleanup(self.user_service_patcher.stop)
# Create a test user
self.user = UserFactory.create() | [
0,
1
] |
def METHOD_NAME(self, *, mel):
pass | [
76
] |
def METHOD_NAME(self, event):
try:
self.display_size = [int(s) for s in self.tPSFROI.GetValue().split(',')]
#self.view.psfROISize = psfROISize
self.view.Refresh()
except:
pass | [
69,
-1
] |
def METHOD_NAME(d: FILTER_ARGS) -> Optional[str]:
for k in keys:
if k in d: # type: ignore[operator] # We assume FILTER_ARGS is a dict
return None
return f"Filter must contain at least one of: {keys}" | [
2889
] |
def METHOD_NAME(*, build_url, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
urls = []
url = build_url
url = url.replace('__year__', str(year))
urls.append(url)
return urls | [
6749,
1305,
-1,
2284,
12633,
741,
1087
] |
def METHOD_NAME(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""This endpoint retrieves the previous and current LDAP password for
the associated account (or rotate if required)
:param name: Specifies the name of the role to request credentials from.
:type name: str | unicode
:param mount_point: Specifies the place where the secrets engine will be accessible (default: ad).
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url("/v1/{}/creds/{}", mount_point, name)
return self._adapter.get(
url=api_path,
) | [
567,
3568
] |
def METHOD_NAME(self):
self.runsafe(self.do_all_no_constraint) | [
9,
75,
654,
1126
] |
def METHOD_NAME(self, data: List[str]) -> List[str]:
current_e = 0.0
to_compensate = 0 # Used when extrusion mode is relative.
is_active = False # Whether retract-continue is in effect.
current_pos = Vector(0.0, 0.0, 0.0)
last_pos = Vector(0.0, 0.0, 0.0)
extra_retraction_speed = self.getSettingValueByKey("extra_retraction_speed")
relative_extrusion = Application.getInstance().getGlobalContainerStack().getProperty(
"relative_extrusion", "value"
)
for layer_number, layer in enumerate(data):
lines = layer.split("\n")
for line_number, line in enumerate(lines):
# Focus on move-type lines.
code_g = self.getValue(line, "G")
if code_g not in [0, 1]:
continue
# Track X,Y,Z location.
last_pos = last_pos.set(current_pos.x, current_pos.y, current_pos.z)
current_pos = current_pos.set(
self.getValue(line, "X", current_pos.x),
self.getValue(line, "Y", current_pos.y),
self.getValue(line, "Z", current_pos.z)
)
# Track extrusion 'axis' position.
last_e = current_e
e_value = self.getValue(line, "E")
if e_value:
current_e = (current_e if relative_extrusion else 0) + e_value
# Handle lines: Detect retractions and compensate relative if G1, potential retract-continue if G0.
if code_g == 1:
if last_e > (current_e + 0.0001): # Account for floating point inaccuracies.
# There is a retraction, each following G0 command needs to continue the retraction.
is_active = True
continue
elif relative_extrusion and is_active:
# If 'relative', the first G1 command after the total retraction will have to compensate more.
travel, f = self._getTravelMove(lines[line_number], current_pos)
lines[line_number] = self._travelMoveString(travel, f, to_compensate + e_value)
to_compensate = 0.0
# There is no retraction (see continue in the retract-clause) and everything else has been handled.
is_active = False
elif code_g == 0:
if not is_active:
continue
# The retract-continue is active, so each G0 until the next extrusion needs to continue retraction.
travel, f = self._getTravelMove(lines[line_number], current_pos)
travel_length = (current_pos - last_pos).length()
extra_retract = travel_length * extra_retraction_speed
new_e = (0 if relative_extrusion else current_e) - extra_retract
to_compensate += extra_retract
current_e -= extra_retract
lines[line_number] = self._travelMoveString(travel, f, new_e)
new_layer = "\n".join(lines)
data[layer_number] = new_layer
return data | [
750
] |
def METHOD_NAME(self, y_true, y_pred):
return ops.vectorized_map(
lambda y_true_and_pred: self.regression_loss(
y_true_and_pred[0], y_true_and_pred[1]
),
(y_true, y_pred),
) | [
128
] |
def METHOD_NAME(op_types):
"""The class decorator used to register all QOperator subclasses."""
def decorator_op(cls):
assert cls.__name__.endswith(
"Operator"
), "The name of subclass of QOperator should end with 'Operator' substring."
if cls.__name__[: -len("Operator")] in QOPERATORS: # pragma: no cover
raise ValueError("Cannot have two operators with the same name.")
for single_op_type in [op_type.strip() for op_type in op_types.split(",")]:
if single_op_type.startswith("QLinear") or single_op_type in [
"QGemm",
"QAttention",
"QEmbedLayerNormalization",
"ArgMax",
"Reshape",
"Transpose",
"Squeeze",
"Unsqueeze",
"Gather",
"MaxPool",
"Pad",
"Resize",
"Split",
]:
QOPERATORS[single_op_type] = cls
return cls
return decorator_op | [
-1,
510
] |
def METHOD_NAME(self) -> str:
"""
Description of the slot type.
"""
return pulumi.get(self, "description") | [
1067
] |
def METHOD_NAME(self) -> str:
"""Returns the description of the job."""
return self._description | [
1067
] |
def METHOD_NAME(test_img, reference_img, metric="MAE"):
"""Compare images at paths test_img and reference_img
Use imagemagick to calculate distortion using the given metric.
You can view the available metrics with 'convert -list metric'.
"""
cmd = [
"convert",
test_img,
reference_img,
"-metric",
metric,
"-compare",
"-format",
"%[distortion]\n",
"info:",
]
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
print("stdout", stdout.decode())
print("stderr", stderr.decode())
print("cmd", cmd)
return float(stdout.decode().strip()) | [
979,
3669
] |
def METHOD_NAME(self, X):
return self.getPipeline().METHOD_NAME(X) | [
2103,
2550
] |
def METHOD_NAME(self) -> bool:
"""
Returns:
bool: True if server is recovered from an unavailable state.
"""
if len(self._state) < 2:
self._recover = False
return False
if self._recover:
self._recover = False
return True
return False | [
137,
17463
] |
def METHOD_NAME(self, len):
return "strlen(name)=%s" %len | [
19,
390,
169
] |
def METHOD_NAME(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1) | [
9,
2148
] |
def METHOD_NAME(nodes: Iterable[T], next_: NextFunc, _count=None) -> Iterator[T]:
if _count is None:
_count = count(1)
for node in nodes:
yield node
yield from METHOD_NAME(next_(node), next_, _count)
if (i := next(_count)) and i > 20000:
raise RecursionError(f'The tree has either more then={20000} nodes '
f'or most likely it is circular') | [
4715,
413,
4716
] |
def METHOD_NAME(en_tokenizer, punct, text):
tokens = en_tokenizer(text + punct)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == punct | [
9,
4334,
1345,
3286,
1462,
8225
] |
def METHOD_NAME(self):
signal.signal(signal.SIGINT, self._handler)
signal.signal(signal.SIGTERM, self._handler)
if os.name != "nt":
signal.signal(signal.SIGHUP, self._handler) | [
176
] |
def METHOD_NAME(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg) | [
638,
1389,
926
] |
def METHOD_NAME() -> None:
class SubclassMiddleware(AbstractMiddleware):
exclude_opt_key = "exclude_route"
async def __call__(self, scope: "Scope", receive: "Receive", send: "Send") -> None:
async def _send(message: "Message") -> None:
if message["type"] == "http.response.start":
headers = MutableScopeHeaders(message)
headers.add("test", str(123))
await send(message)
await self.app(scope, receive, _send)
@get("/", exclude_route=True)
def handler() -> dict:
return {"hello": "world"}
with create_test_client(handler, middleware=[DefineMiddleware(SubclassMiddleware)]) as client:
response = client.get("/")
assert "test" not in response.headers | [
9,
982,
604,
1671,
59
] |
def METHOD_NAME(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
lm_logits, output_metadata = model(**sample["net_input"])
# reshape lm_logits from (N,T,C) to (N*T,C)
lm_logits = lm_logits.view(-1, lm_logits.size(-1))
lm_targets = sample["lm_target"].view(-1)
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
# compute the number of tokens for which loss is computed. This is used
# to normalize the loss
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = lm_loss / ntokens
nsentences = sample["nsentences"]
# nsentences = 0
# Compute sentence loss if masked_lm_only is False
sentence_loss = None
if not self.masked_lm_only:
sentence_logits = output_metadata["sentence_logits"]
sentence_targets = sample["sentence_target"].view(-1)
# This needs to be recomputed due to some differences between
# TokenBlock and BlockPair dataset. This can be resolved with a
# refactor of BERTModel which we will do in the future.
# TODO: Remove this after refactor of BERTModel
nsentences = sentence_targets.size(0)
# Check for logits being none which can happen when remove_heads
# is set to true in the BERT model. Ideally we should set
# masked_lm_only to true in this case, but that requires some
# refactor in the BERT model.
if sentence_logits is not None:
sentence_loss = compute_cross_entropy_loss(
sentence_logits, sentence_targets
)
loss += self.nsp_loss_weight * (sentence_loss / nsentences)
# NOTE: as we are summing up per token mlm loss and per sentence nsp loss
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"lm_loss": utils.item(lm_loss.data) if reduce else lm_loss.data,
# sentence loss is not always computed
"sentence_loss": (
(utils.item(sentence_loss.data) if reduce else sentence_loss.data)
if sentence_loss is not None
else 0.0
),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output | [
76
] |
def METHOD_NAME(self):
return self.files | [
19,
171
] |
def METHOD_NAME():
c = Canvas()
m1 = c.addGen( Wire( nm='m1', layer='M1', direction='v',
clg=UncoloredCenterLineGrid( width=400, pitch=720, repeat=2),
spg=EnclosureGrid( pitch=720, stoppoint=360)))
m2 = c.addGen( Wire( nm='m2', layer='M2', direction='h',
clg=UncoloredCenterLineGrid( width=400, pitch=720, repeat=5),
spg=EnclosureGrid( pitch=720, stoppoint=360)))
v1 = c.addGen( Via( nm='v1', layer='via1', h_clg=m2.clg, v_clg=m1.clg))
for i in [0,2,4]:
c.addWire( m1, 'a', i, (0,1), (4,-1))
for i in [1,3,5]:
c.addWire( m1, 'b', i, (0,1), (4,-1))
c.addWireAndViaSet( 'a', m2, v1, 2, [(0,0), (1,0), (2,0)])
c.addWireAndViaSet( 'b', m2, v1, 1, [(0,1), (1,1), (2,1)])
print( c.terminals)
c.computeBbox()
fn = "__json_via_set2"
data = { 'bbox' : c.bbox.toList(),
'globalRoutes' : [],
'globalRouteGrid' : [],
'terminals' : c.removeDuplicates()}
with open( mydir / (fn + "_cand"), "wt") as fp:
fp.write( json.dumps( data, indent=2) + '\n')
with open( mydir / (fn + "_gold"), "rt") as fp:
data2 = json.load( fp)
assert data == data2 | [
9,
1603
] |
def METHOD_NAME(self):
psycopg2.extras.register_default_jsonb(
conn_or_curs=self._cursor, loads=lambda x: x
) | [
9,
372,
119
] |
f METHOD_NAME(parameter, default=missing): | [
19,
772
] |
def METHOD_NAME(self, instance):
host = instance.get('host')
port = int(instance.get('port', 2222)) # 2222 is default
tags = instance.get('tags', [])
if tags is None:
tags = []
service_check_tags = ['host:{}'.format(host), 'port:{}'.format(port)] + tags
service_check_tags = list(set(service_check_tags))
try:
addrs = socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP)
except socket.gaierror as e:
self.log.warning("Unable to retrieve address info for %s:%s - %s", host, port, e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
return None
response = ""
for addr in addrs:
try:
if addr[1] == socket.SOCK_STREAM:
client = socket.socket(*addr[0:3])
client.connect(addr[-1])
self.log.debug("Querying: %s:%s", host, port)
while 1:
data = ensure_unicode(client.recv(1024))
if not data:
break
response = ''.join([response, data])
client.close()
break
except socket.error as e:
self.log.warning("Unable to connect to %s - %s", addr[-1], e)
status = AgentCheck.OK if response else AgentCheck.CRITICAL
self.service_check(self.SERVICE_CHECK_NAME, status, tags=service_check_tags)
return response | [
19,
365
] |
def METHOD_NAME(self):
BasePickler.METHOD_NAME(self)
self._canonical.clear()
self.bamWriter = BamWriter() | [
537,
12260
] |
def METHOD_NAME(self, subcommand, *args, **kwargs):
""" Dispatch sub-commands: create, delete.
"""
if subcommand == "create":
self.handle_create(
kwargs.pop('product_type_name')[0],
kwargs.pop('mask_type_name')[0],
*args, **kwargs
)
elif subcommand == "delete":
self.handle_delete(
kwargs.pop('product_type_name')[0],
kwargs.pop('mask_type_name')[0],
*args, **kwargs
)
elif subcommand == "list":
self.handle_list(
kwargs.pop('product_type_name')[0], *args, **kwargs
) | [
276
] |
def METHOD_NAME(fromFile, toFile):
print("copying contents")
with open(fromFile, "rb") as f2, open(toFile, "wb") as f1:
shutil.copyfileobj(f2, f1) | [
215,
192
] |
def METHOD_NAME(full_path, user):
'''
Sets up filepaths for the rest of the script.
This also checks if a mediaconch xml already exists.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
log_name_source_ = os.path.basename(full_path) + time.strftime("_%Y_%m_%dT%H_%M_%S")
log_name_source = "%s/%s_mediaconch_validation.log" % (desktop_logs_dir, log_name_source_)
filename = os.path.basename(full_path)
object_dir = os.path.dirname(full_path)
parent_dir = os.path.dirname(object_dir)
sip_root = os.path.dirname(parent_dir)
metadata_dir = os.path.join(parent_dir, 'metadata')
manifest = os.path.join(
sip_root, os.path.basename(parent_dir) + '_manifest.md5'
)
if not os.path.isfile(manifest):
print('manifest does not exist %s' % manifest)
return 'skipping'
if os.path.isdir(metadata_dir):
mediaconch_xmlfile_basename = '%s_mediaconch_validation.xml' % filename
mediaconch_xmlfile = os.path.join(
metadata_dir, mediaconch_xmlfile_basename
)
if os.path.isfile(mediaconch_xmlfile):
print('mediaconch xml already exists')
return 'skipping'
else:
print('no metadata directory found. Exiting.')
return log_name_source, user, mediaconch_xmlfile, manifest, full_path, parent_dir | [
102
] |
def METHOD_NAME(img):
""" This only saves the rgb channels of the image
"""
imgformat = img.format()
if imgformat == "bgra32":
bgr = img.bmp_array[:, :, :3]
rgb = bgr[:, :, ::-1].copy()
pil_img = Image.fromarray(rgb, "RGB")
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'img.bmp')
pil_img.METHOD_NAME(path)
else:
raise NotImplementedError(
"currently only supports writing out bgra32 images"
) | [
73
] |
def METHOD_NAME(self, theta_x, phi_x):
pairwise_weight = paddle.matmul(theta_x, phi_x)
pairwise_weight /= pairwise_weight.shape[-1]
return pairwise_weight | [
1903,
1188
] |
def METHOD_NAME(self) -> Generator[None, None, None]:
if "true" not in self.precision:
yield
return
default_dtype = torch.get_default_dtype()
torch.set_default_dtype(self._desired_dtype)
yield
torch.set_default_dtype(default_dtype) | [
176,
198
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.