text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
async def METHOD_NAME(self, script, *args):
# Always use no storage here to prevent settings to mix amongst tests
argv = [script, "--storage", "none"] + list(args)
inputs = "\n".join(self.inputs) + "\n"
with capture_output(argv, inputs) as (out, err):
udns_port = str(self.fake_udns.port)
with patch.dict("os.environ", {"PYATV_UDNS_PORT": udns_port}):
with fake_udns.stub_multicast(self.fake_udns, self.loop):
with faketime("pyatv", 0):
# Stub away port knocking and ignore result (not tested here)
with patch("pyatv.support.knock.knock") as mock_knock:
async def _no_action(*args):
pass
mock_knock.side_effect = _no_action
module = import_module(f"pyatv.scripts.{script}")
self.retcode = await module.appstart(self.loop)
self.stdout = out.getvalue()
self.stderr = err.getvalue() | [
22,
782
] |
def METHOD_NAME():
actuals = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 1]], dtype=tf.float32)
predictions = tf.constant(
[[0.97, 0.56, 0.83, 0.77], [0.34, 0.95, 0.7, 0.89], [0.95, 0.45, 0.23, 0.56]],
dtype=tf.float32,
)
# Initialize
hl_obj = HammingLoss("multilabel", threshold=0.8)
hl_obj.update_state(actuals, predictions)
# Check results
check_results(hl_obj, 0.16666667) | [
9,
4702,
1842,
393
] |
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
) | [
69,
1072
] |
async def METHOD_NAME(self, batch_id, job_id, attempt_id, status):
url = self.status_path(batch_id, job_id, attempt_id)
await self.fs.write(url, status.encode('utf-8')) | [
77,
452,
171
] |
def METHOD_NAME(self, raw: bytes) -> None:
try:
line = raw.decode('utf-8').strip()
except UnicodeDecodeError:
self.sendError('command is not utf-8 valid')
if line.startswith('!help'):
_, _, path = line.partition(' ')
self.help(path)
return
elif line.startswith('!backup'):
self.backup()
return
try:
feedback = self.runner.run(line)
if feedback:
self.sendLine(feedback)
except SysctlEntryNotFound:
path, _, _ = self.runner.get_line_parts(line)
self.sendError(f'{path} not found')
except SysctlReadOnlyEntry:
path, _, _ = self.runner.get_line_parts(line)
self.sendError(f'cannot write to {path}')
except SysctlWriteOnlyEntry:
path, _, _ = self.runner.get_line_parts(line)
self.sendError(f'cannot read from {path}')
except SysctlException as e:
self.sendError(str(e))
except ValidationError as e:
self.sendError(str(e))
except SysctlRunnerException as e:
self.sendError(str(e)) | [
534,
1732
] |
def METHOD_NAME(self):
self.test_project, self.test_user = create_canned_project()
# Arrange
self.test_project = update_project_with_info(self.test_project)
# Act
project_dto = self.test_project.as_dto_for_mapping()
# Assert
self.assertIsInstance(project_dto.area_of_interest, geojson.MultiPolygon)
self.assertIsInstance(project_dto.tasks, geojson.FeatureCollection)
# TODO test for project info
# self.assertEqual(project_dto.project_name, 'Test')
self.assertEqual(project_dto.project_id, self.test_project.id) | [
9,
155,
1046,
673,
4207,
947,
5457
] |
def METHOD_NAME(test_client):
response = test_client.get("/parameter/taxes/income_tax_rate")
parameter = json.loads(response.data)
assert sorted(list(parameter.keys())), [
"description",
"id",
"metadata",
"source",
"values",
]
assert parameter["id"] == "taxes.income_tax_rate"
assert parameter["description"] == "Income tax rate"
assert parameter["values"] == {
"2015-01-01": 0.15,
"2014-01-01": 0.14,
"2013-01-01": 0.13,
"2012-01-01": 0.16,
}
assert parameter["metadata"] == {"unit": "/1"}
assert re.match(GITHUB_URL_REGEX, parameter["source"])
assert "taxes/income_tax_rate.yaml" in parameter["source"]
# 'documentation' attribute exists only when a value is defined
response = test_client.get("/parameter/benefits/housing_allowance")
parameter = json.loads(response.data)
assert sorted(list(parameter.keys())), [
"description",
"documentation",
"id",
"metadata",
"source" == "values",
]
assert (
parameter["documentation"]
== "A fraction of the rent.\nFrom the 1st of Dec 2016, the housing allowance no longer exists."
) | [
9,
511,
199
] |
def METHOD_NAME(ddev, repository, helpers):
codecov_yaml = repository.path / '.codecov.yml'
with codecov_yaml.open(encoding='utf-8') as file:
codecov_yaml_info = yaml.safe_load(file)
temp = codecov_yaml_info['coverage']['status']['project']['Active_Directory']
codecov_yaml_info['coverage']['status']['project']['active directory'] = temp
codecov_yaml_info['coverage']['status']['project'].pop('Active_Directory')
output = yaml.safe_dump(codecov_yaml_info, default_flow_style=False, sort_keys=False)
with codecov_yaml.open(mode='w', encoding='utf-8') as file:
file.write(output)
result = ddev("validate", "ci")
assert result.exit_code == 1, result.output
error = "Project `active directory` should be called `Active_Directory`"
assert error in helpers.remove_trailing_spaces(result.output) | [
9,
3534,
155,
156
] |
def METHOD_NAME(data):
return self._node_to_member(data, balancer) | [
717
] |
def METHOD_NAME(contract: DeployedContract) -> DeployedContractInfo:
beamer_commit = get_commit_id()
return DeployedContractInfo(
beamer_commit=beamer_commit,
tx_hash=contract.deployment_txhash,
address=contract.address,
deployment_block=contract.deployment_block,
deployment_args=contract.deployment_args,
) | [
93,
8845,
1522,
100
] |
def METHOD_NAME(fixture_dir: FixtureDirGetter) -> None:
info = PackageInfo.from_metadata(
fixture_dir("inspection") / "demo_only_requires_txt.egg-info"
)
assert info is not None
demo_check_info(info) | [
9,
100,
280,
139,
310
] |
def METHOD_NAME(filepath: pathlib.Path) -> pathlib.Path:
if filepath.is_absolute():
return filepath.relative_to(_repo_root())
else:
return filepath | [
522,
1821,
3775
] |
def METHOD_NAME(self):
code = "var = 1\n"
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.rindex("var")) | [
9,
1646,
9437,
69,
256,
3194
] |
def METHOD_NAME(iargs=None):
# parse
parser = create_parser()
inps = parser.parse_args(args=iargs)
# import
from mintpy.objects import TIMESERIES_KEY_NAMES
from mintpy.utils import readfile, utils as ut
# check
inps.work_dir = os.path.abspath(os.path.dirname(inps.file))
atr = readfile.read_attribute(inps.file)
# default + check: geom_file for file in radar coord
if 'Y_FIRST' not in atr.keys():
geom_ds_list = ['latitude', 'longitude']
# default geom_file
if not inps.geom_file:
inps.geom_file = ut.get_geometry_file(
geom_ds_list,
work_dir=inps.work_dir,
coord='radar')
# check existence
if not inps.geom_file or not os.path.isfile(inps.geom_file):
msg = f'No geometry file with {geom_ds_list} in radar coord found!'
raise FileNotFoundError(msg)
# check: dset option (required for timeseries and ifgramStack files)
ftype = atr['FILE_TYPE']
if not inps.dset and ftype in TIMESERIES_KEY_NAMES + ['ifgramStack']:
raise Exception(f'No date/date12 specified for {ftype} file!')
return inps | [
1660,
534,
214
] |
f METHOD_NAME(self): | [
0,
1
] |
def METHOD_NAME(rel_codes, boxes, weights):
# type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
# perform some unpacking to make it JIT-fusion friendly
#rel_codes=rel_codes[0][None]
wx = weights[1]
wy = weights[0]
ww = weights[3]
wh = weights[2]
boxes_x1 = boxes[:, 1].unsqueeze(1).unsqueeze(0)
boxes_y1 = boxes[:, 0].unsqueeze(1).unsqueeze(0)
boxes_x2 = boxes[:, 3].unsqueeze(1).unsqueeze(0)
boxes_y2 = boxes[:, 2].unsqueeze(1).unsqueeze(0)
dx = rel_codes[:,:, 1].unsqueeze(2)
dy = rel_codes[:,:, 0].unsqueeze(2)
dw = rel_codes[:,:, 3].unsqueeze(2)
dh = rel_codes[:,:, 2].unsqueeze(2)
# implementation starts here
widths = boxes_x2 - boxes_x1
heights = boxes_y2 - boxes_y1
ctr_x = boxes_x1 + 0.5 * widths
ctr_y = boxes_y1 + 0.5 * heights
dx = dx / wx
dy = dy / wy
dw = dw / ww
dh = dh / wh
pred_ctr_x = dx * widths + ctr_x
#import pdb; pdb.set_trace()
pred_ctr_y = dy * heights + ctr_y
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_boxes = torch.cat(
[
pred_ctr_x - 0.5 * pred_w,
pred_ctr_y - 0.5 * pred_h,
pred_ctr_x + 0.5 * pred_w,
pred_ctr_y + 0.5 * pred_h,
],
dim=2,
)
#import pdb; pdb.set_trace()
return pred_boxes | [
1268,
2877
] |
def METHOD_NAME(self) -> List[Genotype]: ... | [
-1
] |
def METHOD_NAME(self):
"""Method to test whether a user is registrar of an event or not"""
with self.app.test_request_context(headers=self.auth, method="POST"):
uer = UsersEventsRolesSubFactory(
user_id=1, event_id=1, role__name='registrar'
)
save_to_db(uer)
assert has_access('is_registrar', event_id=1) | [
9,
137,
2509
] |
def METHOD_NAME(cc, source, executable, options):
subprocess.check_call([cc, source, "-o", executable] + options)
p = subprocess.Popen(
["./security-check.py", executable],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout.rstrip()) | [
128,
2326,
250
] |
def METHOD_NAME(bc_builder, **kwargs) -> None:
bc_builder.register_aliases(BuildFileAliases(**kwargs)) | [
372,
2334
] |
def METHOD_NAME(self):
"""
Basically restart a service.
"""
subprocess.check_call(['systemctl', 'daemon-reload'], close_fds=True) | [
1687,
1372
] |
def METHOD_NAME(self, server_info):
self.server_info = {
"Server instance": server_info.get("instanceId"),
"Server version": server_info.get("serverVersion"),
"Server license": server_info.get("instanceLicenseId"),
"Giskard User": server_info.get("user"),
} | [
176,
163,
100
] |
def METHOD_NAME(self, img):
self.print_step("Downloading install iso")
cimg = self._download_with_cache(self.link, sha256sum=self.csum)
img_tmp = img + ".tmp"
iso = img + ".install.iso"
iso_xz = iso + ".xz"
self.print_step("Preparing iso and disk image")
subprocess.check_call(["cp", "-f", cimg, iso_xz])
subprocess.check_call(["xz", "-dvf", iso_xz])
self.exec_qemu_img("create", "-f", "qcow2", img_tmp, self.size)
self.print_step("Booting installer")
self.boot(img_tmp, extra_args = [
"-bios", "pc-bios/bios-256k.bin",
"-machine", "graphics=off",
"-device", "VGA",
"-cdrom", iso
])
self.console_init()
self.console_boot_serial()
self.console_wait_send("Console type", "xterm\n")
# pre-install configuration
self.console_wait_send("Welcome", "\n")
self.console_wait_send("Keymap Selection", "\n")
self.console_wait_send("Set Hostname", "freebsd\n")
self.console_wait_send("Distribution Select", "\n")
self.console_wait_send("Partitioning", "\n")
self.console_wait_send("Partition", "\n")
self.console_wait_send("Scheme", "\n")
self.console_wait_send("Editor", "f")
self.console_wait_send("Confirmation", "c")
self.print_step("Installation started now, this will take a while")
# post-install configuration
self.console_wait("New Password:")
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Retype New Password:")
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("Network Configuration", "\n")
self.console_wait_send("IPv4", "y")
self.console_wait_send("DHCP", "y")
self.console_wait_send("IPv6", "n")
self.console_wait_send("Resolver", "\n")
self.console_wait_send("Time Zone Selector", "0\n")
self.console_wait_send("Confirmation", "y")
self.console_wait_send("Time & Date", "\n")
self.console_wait_send("Time & Date", "\n")
self.console_wait_send("System Configuration", "\n")
self.console_wait_send("System Hardening", "\n")
# qemu user
self.console_wait_send("Add User Accounts", "y")
self.console_wait("Username")
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait("Full name")
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait_send("Uid", "\n")
self.console_wait_send("Login group", "\n")
self.console_wait_send("Login group", "\n")
self.console_wait_send("Login class", "\n")
self.console_wait_send("Shell", "\n")
self.console_wait_send("Home directory", "\n")
self.console_wait_send("Home directory perm", "\n")
self.console_wait_send("Use password", "\n")
self.console_wait_send("Use an empty password", "\n")
self.console_wait_send("Use a random password", "\n")
self.console_wait("Enter password:")
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("Enter password again:")
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait_send("Lock out", "\n")
self.console_wait_send("OK", "yes\n")
self.console_wait_send("Add another user", "no\n")
self.console_wait_send("Final Configuration", "\n")
self.console_wait_send("Manual Configuration", "\n")
self.console_wait_send("Complete", "\n")
self.print_step("Installation finished, rebooting")
self.console_boot_serial()
# setup qemu user
prompt = "$"
self.console_ssh_init(prompt, self._config["guest_user"], self._config["guest_pass"])
self.console_wait_send(prompt, "exit\n")
# setup root user
prompt = "root@freebsd:~ #"
self.console_ssh_init(prompt, "root", self._config["root_pass"])
self.console_sshd_config(prompt)
# setup serial console
self.console_wait(prompt)
self.console_send("echo 'console=comconsole' >> /boot/loader.conf\n")
# setup boot delay
self.console_wait(prompt)
self.console_send("echo 'autoboot_delay=1' >> /boot/loader.conf\n")
# setup virtio-blk #1 (tarfile)
self.console_wait(prompt)
self.console_send("echo 'chmod 666 /dev/vtbd1' >> /etc/rc.local\n")
self.print_step("Configuration finished, rebooting")
self.console_wait_send(prompt, "reboot\n")
self.console_wait("login:")
self.wait_ssh()
self.print_step("Installing packages")
self.ssh_root_check("pkg install -y %s\n" % " ".join(self.pkgs))
# shutdown
self.ssh_root(self.poweroff)
self.console_wait("Uptime:")
self.wait()
if os.path.exists(img):
os.remove(img)
os.rename(img_tmp, img)
os.remove(iso)
self.print_step("All done") | [
56,
660
] |
def METHOD_NAME():
stim = moose.RandSpike( '/model/stim', params['numInputs'] )
inhib = moose.LIF( '/model/inhib', params['numInhib'] )
insyn = moose.SimpleSynHandler(inhib.path + '/syns', params['numInhib'])
moose.connect( insyn, 'activationOut', inhib, 'activation', 'OneToOne' )
output = moose.LIF( '/model/output', params['numOutput'] )
outsyn = moose.SimpleSynHandler(output.path+'/syns',params['numOutput'])
moose.connect(outsyn, 'activationOut', output, 'activation', 'OneToOne')
outInhSyn = moose.SimpleSynHandler(output.path+'/inhsyns',params['numOutput'])
moose.connect(outInhSyn, 'activationOut', output, 'activation', 'OneToOne')
iv = moose.vec( insyn.path + '/synapse' )
ov = moose.vec( outsyn.path + '/synapse' )
oiv = moose.vec( outInhSyn.path + '/synapse' )
assert len(iv) == 0
assert len(ov) == 0
assert len(oiv) == 0
temp = moose.connect( stim, 'spikeOut', iv, 'addSpike', 'Sparse' )
inhibMatrix = moose.element( temp )
inhibMatrix.setRandomConnectivity(
params['stimToInhProb'], params['stimToInhSeed'] )
cl = inhibMatrix.connectionList
# This can change when random-number generator changes.
# This was before we used c++11 <random> to generate random numbers. This
# test has changes on Tuesday 31 July 2018 11:12:35 AM IST
# expectedCl = [ 1,4,13,13,26,42,52,56,80,82,95,97,4,9,0,9,4,8,0,6,1,6,6,7]
expectedCl=[0,6,47,50,56,67,98,2,0,3,5,4,8,3]
assert list(cl) == expectedCl, "Expected %s, got %s" % (expectedCl, cl)
temp = moose.connect( stim, 'spikeOut', ov, 'addSpike', 'Sparse' )
excMatrix = moose.element( temp )
excMatrix.setRandomConnectivity(
params['stimToOutProb'], params['stimToOutSeed'] )
temp = moose.connect( inhib, 'spikeOut', oiv, 'addSpike', 'Sparse' )
negFFMatrix = moose.element( temp )
negFFMatrix.setRandomConnectivity(
params['inhToOutProb'], params['inhToOutSeed'] )
# print("ConnMtxEntries: ", inhibMatrix.numEntries, excMatrix.numEntries, negFFMatrix.numEntries)
got = (inhibMatrix.numEntries, excMatrix.numEntries, negFFMatrix.numEntries)
expected = (7, 62, 55)
assert expected == got, "Expected %s, Got %s" % (expected,got)
cl = negFFMatrix.connectionList
numInhSyns = [ ]
niv = 0
nov = 0
noiv = 0
for i in moose.vec( insyn ):
niv += i.synapse.num
numInhSyns.append( i.synapse.num )
if i.synapse.num > 0:
i.synapse.weight = params['wtStimToInh']
# expected = [2,1,0,0,2,0,3,1,1,2]
expected = [1, 0, 1, 2, 1, 1, 0, 0, 1, 0]
assert numInhSyns == expected, "Expected %s, got %s" % (expected,numInhSyns)
for i in moose.vec( outsyn ):
print('111', i)
nov += i.synapse.num
if i.synapse.num > 0:
i.synapse.weight = params['wtStimToOut']
for i in moose.vec( outInhSyn ):
noiv += i.synapse.num
#print i.synapse.num
if i.synapse.num > 0:
i.synapse.weight = params['wtInhToOut']
print("SUMS: ", sum( iv.numField ), sum( ov.numField ), sum( oiv.numField ))
assert [1, 64, 25] == [sum( iv.numField ), sum( ov.numField ), sum( oiv.numField )]
print("SUMS2: ", niv, nov, noiv)
assert [7, 62, 55] == [ niv, nov, noiv ]
print("SUMS3: ", sum( insyn.vec.numSynapses ), sum( outsyn.vec.numSynapses ), sum( outInhSyn.vec.numSynapses ))
assert [7,62,55] == [ sum( insyn.vec.numSynapses ), sum( outsyn.vec.numSynapses ), sum( outInhSyn.vec.numSynapses ) ]
# print(oiv.numField)
# print(insyn.vec[1].synapse.num)
# print(insyn.vec.numSynapses)
# print(sum( insyn.vec.numSynapses ))
# niv = iv.numSynapses
# ov = iv.numSynapses
sv = moose.vec( stim )
sv.rate = params['randInputRate']
sv.refractT = params['randRefractTime']
#moose.showfield( sv[7] )
inhib.vec.thresh = params['inhibThresh']
inhib.vec.Rm = params['Rm']
inhib.vec.Cm = params['Cm']
inhib.vec.vReset = params['inhVreset']
inhib.vec.refractoryPeriod = params['inhibRefractTime']
output.vec.thresh = params['outputThresh']
output.vec.Rm = params['Rm']
output.vec.Cm = params['Cm']
output.vec.refractoryPeriod = params['outputRefractTime']
otab = moose.Table( '/model/otab', params['numOutput'] )
moose.connect( otab, 'requestOut', output, 'getVm', 'OneToOne' )
itab = moose.Table( '/model/itab', params['numInhib'] )
moose.connect( itab, 'requestOut', inhib, 'getVm', 'OneToOne' )
return inhib, output | [
93,
285,
3101,
1228
] |
def METHOD_NAME(self, video):
video_id = video.get('id')
if video_id is None:
raise ExtractorError('Video id was not found')
formats = []
for track in variadic(traverse_obj(video, ('media', 'track')) or []):
href = track.get('url')
if href is None:
continue
ext = determine_ext(href, None)
transport = track.get('transport')
if transport == 'DASH' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(href, video_id, mpd_id='dash', fatal=False))
elif transport == 'HLS' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
href, video_id, m3u8_id='hls', entry_protocol='m3u8_native', fatal=False))
elif transport == 'HDS' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(href, video_id, f4m_id='hds', fatal=False))
elif transport == 'SMOOTH':
formats.extend(self._extract_ism_formats(href, video_id, ism_id='smooth', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(href, video_id, fatal=False))
else:
track_obj = {
'url': href,
'ext': ext,
'format_note': track.get('transport'),
'resolution': traverse_obj(track, ('video', 'resolution')),
'fps': int_or_none(traverse_obj(track, ('video', 'framerate'))),
'vbr': int_or_none(traverse_obj(track, ('video', 'bitrate')), scale=1000),
'vcodec': traverse_obj(track, ('video', 'encoder', 'type')) if track.get('video') else 'none',
'abr': int_or_none(traverse_obj(track, ('audio', 'bitrate')), scale=1000),
'asr': int_or_none(traverse_obj(track, ('audio', 'samplingrate'))),
'acodec': traverse_obj(track, ('audio', 'encoder', 'type')) if track.get('audio') else 'none',
}
if transport == 'RTMP':
m_obj = re.search(r'(?:rtmp://[^/]+/(?P<app>[^/]+))/(?P<ext>.+):(?P<playpath>.+)', href)
if not m_obj:
continue
track_obj.update({
'app': m_obj.group('app'),
'ext': m_obj.group('ext'),
'play_path': m_obj.group('ext') + ':' + m_obj.group('playpath'),
'rtmp_live': True,
'preference': -2,
})
formats.append(track_obj)
return {
'id': video_id,
'formats': formats,
'title': video.get('title'),
'series': video.get('seriestitle'),
'season_id': video.get('series'),
'creator': traverse_obj(video, ('creators', 'creator')),
'timestamp': parse_iso8601(video.get('start')),
'thumbnail': traverse_obj(video, ('attachments', 'attachment', ..., 'url'), get_all=False),
} | [
214,
14694
] |
def METHOD_NAME():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass | [
9
] |
def METHOD_NAME():
db = open("tmp", "n")
assert len(db) == 0
db["foo"] = "bar"
assert db["foo"] == "bar"
db[unicode("ufoo")] = unicode("ubar")
assert db[unicode("ufoo")] == unicode("ubar")
db.sync()
db = open("tmp", "c")
assert len(db) == 2, len(db)
assert db["foo"] == "bar"
db["bar"] = "foo"
assert db["bar"] == "foo"
db[unicode("ubar")] = unicode("ufoo")
assert db[unicode("ubar")] == unicode("ufoo")
db.sync()
db = open("tmp", "r")
assert len(db) == 4, len(db)
assert db["foo"] == "bar"
assert db["bar"] == "foo"
assert db[unicode("ufoo")] == unicode("ubar")
assert db[unicode("ubar")] == unicode("ufoo")
try:
db.sync()
except IOError, e:
assert str(e) == "Read-only database: tmp.dblite"
else:
raise RuntimeError("IOError expected.")
db = open("tmp", "w")
assert len(db) == 4
db["ping"] = "pong"
db.sync()
try:
db[(1,2)] = "tuple"
except TypeError, e:
assert str(e) == "key `(1, 2)' must be a string but is <type 'tuple'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
try:
db["list"] = [1,2]
except TypeError, e:
assert str(e) == "value `[1, 2]' must be a string but is <type 'list'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
db = open("tmp", "r")
assert len(db) == 5
db = open("tmp", "n")
assert len(db) == 0
dblite._open("tmp.dblite", "w")
db = open("tmp", "r")
dblite._open("tmp.dblite", "w").write("x")
try:
db = open("tmp", "r")
except pickle.UnpicklingError:
pass
else:
raise RuntimeError("pickle exception expected.")
global ignore_corrupt_dbfiles
ignore_corrupt_dbfiles = 2
db = open("tmp", "r")
assert len(db) == 0
os.unlink("tmp.dblite")
try:
db = open("tmp", "w")
except IOError, e:
assert str(e) == "[Errno 2] No such file or directory: 'tmp.dblite'", str(e)
else:
raise RuntimeError("IOError expected.")
print "OK" | [
3446
] |
def METHOD_NAME(self, timeslice):
self.__timeslice = timeslice | [
0,
-1
] |
def METHOD_NAME(self):
pass | [
9,
14129,
1818,
1501
] |
def METHOD_NAME(self, json_info):
self.layout = QVBoxLayout()
self.image = ImageLabel()
self.layout.addWidget(self.image)
mini_layout = QHBoxLayout()
self.layout.addLayout(mini_layout)
if not json_info:
self.layout.addWidget(QLabel("An error occurred"))
self.setLayout(self.layout)
return
self.title_label = QLabel(json_info.get("title"))
self.title_label.setWordWrap(True)
mini_layout.addWidget(self.title_label)
mini_layout.addStretch(1)
price = json_info["price"]["totalPrice"]["fmtPrice"]["originalPrice"]
discount_price = json_info["price"]["totalPrice"]["fmtPrice"]["discountPrice"]
price_label = QLabel(price)
if price != discount_price:
font = QFont()
font.setStrikeOut(True)
price_label.setFont(font)
mini_layout.addWidget(
QLabel(discount_price if discount_price != "0" else self.tr("Free"))
)
mini_layout.addWidget(price_label)
else:
if price == "0":
price_label.setText(self.tr("Free"))
mini_layout.addWidget(price_label)
for c in r'<>?":|\/*':
json_info["title"] = json_info["title"].replace(c, "")
self.json_info = json_info
self.slug = json_info["productSlug"]
self.title = json_info["title"]
for img in json_info["keyImages"]:
if img["type"] in [
"DieselStoreFrontWide",
"OfferImageWide",
"VaultClosed",
"ProductLogo",
]:
if img["type"] == "VaultClosed" and self.title != "Mystery Game":
continue
self.image.update_image(
img["url"],
json_info["title"],
(self.width, int(self.width * 9 / 16)),
)
break
else:
logger.info(", ".join([img["type"] for img in json_info["keyImages"]]))
self.setLayout(self.layout)
self.setFixedSize(self.width + 10, self.width * 9 // 16 + 50) | [
176,
882
] |
def METHOD_NAME():
bpy.utils.unregister_class(SvLinearApproxNode) | [
2468
] |
def METHOD_NAME(self):
source = """ | [
9,
-1
] |
def METHOD_NAME(
postgres_dsn: PostgresTestConfig, postgres_engine: sa.engine.Engine
) -> None:
# create a template db, the removal is necessary to allow for the usage of --keep-docker-up
queries = [
# disconnect existing users
f"""
SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity
WHERE pg_stat_activity.datname = '{postgres_dsn["database"]}' AND pid <> pg_backend_pid();
""",
# drop template database
f"ALTER DATABASE {TEMPLATE_DB_TO_RESTORE} is_template false;",
f"DROP DATABASE {TEMPLATE_DB_TO_RESTORE};",
# create template database
"""
CREATE DATABASE {template_db} WITH TEMPLATE {original_db} OWNER {db_user};
""".format(
template_db=TEMPLATE_DB_TO_RESTORE,
original_db=postgres_dsn["database"],
db_user=postgres_dsn["user"],
),
]
execute_queries(postgres_engine, queries, ignore_errors=True) | [
129,
671,
1267
] |
def METHOD_NAME(zero_dm_device):
device_path = "{}{}".format(DMPATH_PREFIX, zero_dm_device)
assert os.path.exists(device_path)
devicemapper.removeMapping(zero_dm_device)
assert not os.path.exists(device_path) | [
9,
188,
445
] |
def METHOD_NAME(car_fingerprint, radar_disabled=False):
no_radar = car_fingerprint in HONDA_BOSCH_RADARLESS
if radar_disabled or no_radar:
# when radar is disabled, steering commands are sent directly to powertrain bus
return get_pt_bus(car_fingerprint)
# normally steering commands are sent to radar, which forwards them to powertrain bus
return 0 | [
19,
2542,
1660,
2583
] |
def METHOD_NAME(mock_docker_app_context, tmp_path, sub_check_output_kw, capsys):
"""A call can request additional mounts."""
assert (
mock_docker_app_context.check_output(
["hello", "world"],
mounts=[
("/path/to/first", "/container/first"),
("/path/to/second", "/container/second"),
],
)
== "goodbye\n"
)
mock_docker_app_context.tools.subprocess._subprocess.check_output.assert_called_once_with(
[
"docker",
"run",
"--rm",
"--volume",
f"{tmp_path / 'bundle'}:/app:z",
"--volume",
f"{tmp_path / 'briefcase'}:/briefcase:z",
"--volume",
"/path/to/first:/container/first:z",
"--volume",
"/path/to/second:/container/second:z",
"briefcase/com.example.myapp:py3.X",
"hello",
"world",
],
**sub_check_output_kw,
)
assert capsys.readouterr().out == "" | [
9,
1967,
5155
] |
def METHOD_NAME(dataset_id, dataset_type, dataset_path):
status, message = get_folder_status(dataset_path, True)
if status is None:
status = DatasetStatus.XEMPTY
if status == DatasetStatus.XCOPYING:
items = message.strip().split()
if len(items) < 2:
percent = 0.0
else:
pid = int(items[0])
if int(items[1]) == 0:
percent = 1.0
else:
copyed_files_num = len(list_files(dataset_path)) - 1
percent = copyed_files_num * 1.0 / int(items[1])
message = {'pid': pid, 'percent': percent}
if status == DatasetStatus.XCOPYDONE or status == DatasetStatus.XSPLITED:
if not osp.exists(osp.join(dataset_path, 'statis.pkl')):
p = import_dataset(dataset_id, dataset_type, dataset_path,
dataset_path)
status = DatasetStatus.XCHECKING
return status, message | [
19,
126,
452
] |
f METHOD_NAME(self, path_prefix=None): | [
324,
157
] |
def METHOD_NAME(self, operator, *args, node):
"""Helper method to lookup a method type given the operator and types of arguments.
TODO: modify this implementation to use mro.
"""
if args:
# First try to do a direct lookup.
if (
isinstance(args[0], ForwardRef)
and operator in self.classes[args[0].__forward_arg__]
):
for func_type, _ in self.classes[args[0].__forward_arg__][operator]:
if len(args) != len(func_type.__args__) - 1:
continue
if self.type_constraints.can_unify(
Callable[list(args), Any], Callable[list(func_type.__args__[:-1]), Any]
):
return func_type
# If that doesn't work, fall back on a brute force search.
func_types_list = self.methods[operator]
for func_type, _ in func_types_list:
if len(args) != len(func_type.__args__) - 1:
continue
if self.type_constraints.can_unify(
Callable[list(args), Any], Callable[list(func_type.__args__[:-1]), Any]
):
return func_type
return TypeFailFunction(tuple(func_types_list), None, node) | [
1906,
103
] |
def METHOD_NAME(key):
if key.size(2) < self.pre_decision_ratio:
return key
else:
k = key[
:,
:,
self.pre_decision_ratio - 1:: self.pre_decision_ratio,
].contiguous()
if key.size(-1) % self.pre_decision_ratio != 0:
k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous()
return k | [
679
] |
def METHOD_NAME(self, node: Node) -> int: ... | [
447,
224
] |
def METHOD_NAME(self) -> str:
"""
The ID associated with the cluster.
"""
return pulumi.get(self, "cluster_id") | [
2059,
147
] |
def METHOD_NAME(self, log):
AppConfig.initialize()
log.info.assert_called_with("AXES: blocking by combination of username and IP.") | [
9,
7272,
200,
390,
21,
1213
] |
def METHOD_NAME(self):
return True | [
220,
155,
2546
] |
def METHOD_NAME():
release = RedhatRelease(context_wrap(CENTOS_STREAM))
assert release.major == 8
assert release.minor is None
assert release.product == 'CentOS Stream'
assert release.is_centos
assert not release.is_rhel | [
9,
11913,
919
] |
def METHOD_NAME(self, context):
# Get JobQueue module
modules = context.data["openPypeModules"]
job_queue_module = modules["job_queue"]
jobs_root = job_queue_module.get_jobs_root()
if not jobs_root:
raise ValueError("Job Queue root is not set.")
context.data["jobsRoot"] = jobs_root
context_staging_dir = self._create_context_staging_dir(jobs_root)
workfile_path = self._extract_workfile_path(
context, context_staging_dir
)
context.data["contextStagingDir"] = context_staging_dir
context.data["workfilePath"] = workfile_path
# Prepare tvpaint command
collect_scene_data_command = CollectSceneData()
# Create TVPaint sender commands
commands = SenderTVPaintCommands(workfile_path, job_queue_module)
commands.add_command(collect_scene_data_command)
# Send job and wait for answer
commands.send_job_and_wait()
collected_data = collect_scene_data_command.result()
layers_data = collected_data["layers_data"]
groups_data = collected_data["groups_data"]
scene_data = collected_data["scene_data"]
exposure_frames_by_layer_id = (
collected_data["exposure_frames_by_layer_id"]
)
pre_post_beh_by_layer_id = (
collected_data["pre_post_beh_by_layer_id"]
)
# Store results
# scene data store the same way as TVPaint collector
scene_data = {
"sceneWidth": scene_data["width"],
"sceneHeight": scene_data["height"],
"scenePixelAspect": scene_data["pixel_aspect"],
"sceneFps": scene_data["fps"],
"sceneFieldOrder": scene_data["field_order"],
"sceneMarkIn": scene_data["mark_in"],
# scene_data["mark_in_state"],
"sceneMarkInState": scene_data["mark_in_set"],
"sceneMarkOut": scene_data["mark_out"],
# scene_data["mark_out_state"],
"sceneMarkOutState": scene_data["mark_out_set"],
"sceneStartFrame": scene_data["start_frame"],
"sceneBgColor": scene_data["bg_color"]
}
context.data["sceneData"] = scene_data
# Store only raw data
context.data["groupsData"] = groups_data
context.data["layersData"] = layers_data
context.data["layersExposureFrames"] = exposure_frames_by_layer_id
context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id
self.log.debug(
(
"Collected data"
"\nScene data: {}"
"\nLayers data: {}"
"\nExposure frames: {}"
"\nPre/Post behavior: {}"
).format(
json.dumps(scene_data, indent=4),
json.dumps(layers_data, indent=4),
json.dumps(exposure_frames_by_layer_id, indent=4),
json.dumps(pre_post_beh_by_layer_id, indent=4)
)
) | [
356
] |
def METHOD_NAME():
bpy.utils.unregister_class(SvCurveFormulaNode) | [
2468
] |
def METHOD_NAME(self, method):
# Clean up: Delete the test file.
os.remove(self.testDataFilePath) | [
1843,
103
] |
def METHOD_NAME(state_tree):
with pytest.helpers.temp_file("Invalid.cer") as cert_file:
with salt.utils.files.fopen(str(cert_file), "wb") as fh:
fh.write(b"Invalid cert data")
yield cert_file | [
532,
1941,
171
] |
def METHOD_NAME(self, rv, t, compute_gain=False, _diffusion=1.0, **kwargs):
raise NotImplementedError("Not available") | [
76,
11035
] |
def METHOD_NAME(self):
self.pre_operations()
yield self.RestorePointCollectionsDelete(ctx=self.ctx)()
self.post_operations() | [
750,
710
] |
async def METHOD_NAME(
db: Connection,
params: Params,
) -> List[Tuple[datetime, int]]:
sqlbase = """ | [
19,
365
] |
def METHOD_NAME(self, fan_num):
return self._get_fan_node_val(fan_num, self.FAN_NODE_DIR_IDX_OF_MAP) | [
19,
1337,
1190
] |
def METHOD_NAME(self):
with self._isolate() as f:
tool_path = os.path.join(PROJECT_TEMPLATES_DIR, "demo", "randomlines.xml")
job_path = os.path.join(TEST_DATA_DIR, "randomlines_job_1.json")
test_cmd = [
"--verbose",
"run",
"--no_dependency_resolution",
"--galaxy_branch",
target_galaxy_branch(),
"--test_data",
TEST_DATA_DIR,
tool_path,
job_path,
]
self._check_exit_code(test_cmd)
assert os.path.exists(os.path.join(f, "tool_test_output.html"))
assert os.path.exists(os.path.join(f, "tool_test_output.json")) | [
9,
22,
-1,
-1
] |
def METHOD_NAME(self, year: int, month: int) -> list[list[tuple[int, int]]]: ... | [
-1
] |
def METHOD_NAME(self, psf):
energy = 1 * u.TeV
theta = 0 * u.deg
containment = [0.68, 0.8, 0.9]
desired = psf.containment_radius(
energy_true=energy, offset=theta, fraction=containment
)
assert_allclose(desired, [0.14775, 0.18675, 0.25075] * u.deg, rtol=1e-3) | [
9,
24,
410,
12031
] |
def METHOD_NAME():
@resource(config_schema={"foo": str})
def resource_reqs_config(context):
assert context.resource_config["foo"] == "bar"
return 5
# Ensure that error is raised when we attempt to invoke with a None context
with pytest.raises(
DagsterInvalidInvocationError,
match="Resource has required config schema, but no context was provided.",
):
resource_reqs_config(None)
# Ensure that error is raised when context does not have the required config.
context = build_init_resource_context()
with pytest.raises(
DagsterInvalidConfigError,
match="Error in config for resource",
):
resource_reqs_config(context)
with pytest.raises(
DagsterInvalidConfigError,
match="Error when applying config mapping for resource",
):
resource_reqs_config.configured({"foobar": "bar"})(None)
# Ensure that if you configure the respirce, you can provide a none-context.
result = resource_reqs_config.configured({"foo": "bar"})(None)
assert result == 5
result = resource_reqs_config(build_init_resource_context(config={"foo": "bar"}))
assert result == 5 | [
9,
191,
2974,
41,
200
] |
def METHOD_NAME(target, check, target_version, log_cb=None, online=True, force=False):
logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.update_script")
if not online and not check("offline", False):
raise CannotUpdateOffline()
if not can_perform_update(target, check):
raise ConfigurationInvalid(
"checkout_folder and update_folder are missing for update target %s, one is needed"
% target
)
update_script = check["update_script"]
update_branch = check.get("update_branch", "")
force_exact_version = check.get("force_exact_version", False)
folder = check.get(
"update_folder", check.get("checkout_folder")
) # either should be set, tested above
pre_update_script = check.get("pre_update_script", None)
post_update_script = check.get("post_update_script", None)
caller = _get_caller(log_cb=log_cb)
### pre update
if pre_update_script is not None:
logger.debug(f"Target: {target}, running pre-update script: {pre_update_script}")
try:
caller.checked_call(pre_update_script, cwd=folder)
except CommandlineError as e:
logger.exception(
"Target: %s, error while executing pre update script, got returncode %r"
% (target, e.returncode)
)
### update
try:
update_command = update_script.format(
python=sys.executable,
folder=folder,
target=target_version,
branch=update_branch,
force="true" if force_exact_version else "false",
)
logger.debug(f"Target {target}, running update script: {update_command}")
caller.checked_call(update_command, cwd=folder)
except CommandlineError as e:
logger.exception(
"Target: %s, error while executing update script, got returncode %r"
% (target, e.returncode)
)
raise UpdateError(
"Error while executing update script for %s", (e.stdout, e.stderr)
)
### post update
if post_update_script is not None:
logger.debug(
"Target: {}, running post-update script {}...".format(
target, post_update_script
)
)
try:
caller.checked_call(post_update_script, cwd=folder)
except CommandlineError as e:
logger.exception(
"Target: %s, error while executing post update script, got returncode %r"
% (target, e.returncode)
)
return "ok" | [
407,
86
] |
def METHOD_NAME(dbhost, dbport, dbname, dbuser, dbpass):
return ':'.join(str(s) for s in [dbhost, dbport, dbname, dbuser, dbpass]) | [
13206,
17986
] |
def METHOD_NAME(self, clock):
self.clock = clock
self.scheduler = schedule.Scheduler(clock=clock)
self.scheduler.start() | [
129,
1520
] |
def METHOD_NAME(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
"""
if is_iterable(key):
return [self.METHOD_NAME(subkey) for subkey in key]
return self.map[key] | [
724
] |
def METHOD_NAME(self):
if (self.getPosition() == self.min_pos):
return True
else:
return False | [
1541,
1681
] |
def METHOD_NAME(self, ignition_info: str) -> None:
self.api_client.METHOD_NAME(infra_env_id=self.id, ignition_info=ignition_info) | [
1575,
8402,
-1
] |
def METHOD_NAME(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCodelessApiPollingDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getCodelessApiPollingDataConnector', __args__, opts=opts, typ=GetCodelessApiPollingDataConnectorResult).value
return AwaitableGetCodelessApiPollingDataConnectorResult(
connector_ui_config=pulumi.get(__ret__, 'connector_ui_config'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
polling_config=pulumi.get(__ret__, 'polling_config'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type')) | [
19,
12552,
58,
2510,
365,
4059
] |
def METHOD_NAME():
ht = hl.utils.range_matrix_table(1, 1)
ht = ht.annotate_rows(b = hl.nd.array([[0]]))
try:
ht = ht.annotate_rows(c = ht.b[ht.col_idx, None])
except hl.ExpressionException as exc:
assert 'scope violation' in exc.args[0]
assert "'col_idx' (indices ['column'])" in exc.args[0]
else:
assert False | [
9,
1636,
724,
41,
98,
1170
] |
def METHOD_NAME(self, soup: BeautifulSoup) -> str:
# The soup here is the result of `self.get_soup(self.novel_url)`
pass | [
214,
2893
] |
def METHOD_NAME(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if can_symlink():
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3')) | [
0,
1
] |
def METHOD_NAME(
ctx,
profile="",
tags=[], # noqa: B006
targets=[], # noqa: B006
configparams=[], # noqa: B006
verbose=True,
METHOD_NAME="",
skip="",
cache=False,
junit_tar="",
coverage=False,
test_run_name="",
):
"""
Run E2E Tests based on test-infra-definitions infrastructure provisioning.
"""
if shutil.which("pulumi") is None:
raise Exit(
"pulumi CLI not found, Pulumi needs to be installed on the system (see https://github.com/DataDog/test-infra-definitions/blob/main/README.md)",
1,
)
e2e_module = DEFAULT_MODULES["test/new-e2e"]
e2e_module.condition = lambda: True
if targets:
e2e_module.targets = targets
envVars = dict()
if profile:
envVars["E2E_PROFILE"] = profile
parsedParams = dict()
for param in configparams:
parts = param.split("=", 1)
if len(parts) != 2:
raise Exit(message=f"wrong format given for config parameter, expects key=value, actual: {param}", code=1)
parsedParams[parts[0]] = parts[1]
if parsedParams:
envVars["E2E_STACK_PARAMS"] = json.dumps(parsedParams)
gotestsum_format = "standard-verbose" if verbose else "pkgname"
coverage_opt = ""
coverage_path = "coverage.out"
if coverage:
coverage_opt = f"-cover -covermode=count -coverprofile={coverage_path} -coverpkg=./...,github.com/DataDog/test-infra-definitions/..."
test_run_arg = ""
if test_run_name != "":
test_run_arg = f"-run {test_run_name}"
cmd = f'gotestsum --format {gotestsum_format} '
cmd += '{junit_file_flag} --packages="{packages}" -- -ldflags="-X {REPO_PATH}/test/new-e2e/containers.GitCommit={commit}" {verbose} -mod={go_mod} -vet=off -timeout {timeout} -tags {go_build_tags} {nocache} {run} {skip} {coverage_opt} {test_run_arg}'
args = {
"go_mod": "mod",
"timeout": "4h",
"verbose": '-v' if verbose else '',
"nocache": '-count=1' if not cache else '',
"REPO_PATH": REPO_PATH,
"commit": get_git_commit(),
"run": '-test.run ' + METHOD_NAME if METHOD_NAME else '',
"skip": '-test.skip ' + skip if skip else '',
"coverage_opt": coverage_opt,
"test_run_arg": test_run_arg,
}
test_res = test_flavor(
ctx,
flavor=AgentFlavor.base,
build_tags=tags,
modules=[e2e_module],
args=args,
cmd=cmd,
env=envVars,
junit_tar=junit_tar,
save_result_json="",
test_profiler=None,
)
if junit_tar:
junit_files = []
for module_test_res in test_res:
if module_test_res.junit_file_path:
junit_files.append(module_test_res.junit_file_path)
produce_junit_tar(junit_files, junit_tar)
some_test_failed = False
for module_test_res in test_res:
failed, failure_string = module_test_res.get_failure(AgentFlavor.base)
some_test_failed = some_test_failed or failed
if failed:
print(failure_string)
if coverage:
print(f"In folder `test/new-e2e`, run `go tool cover -html={coverage_path}` to generate HTML coverage report")
if some_test_failed:
# Exit if any of the modules failed
raise Exit(code=1) | [
22
] |
def METHOD_NAME(
class_: MockObjClass = _UNDEF,
*,
entity_category: str = _UNDEF,
device_class: str = _UNDEF,
icon: str = _UNDEF,
block_inverted: bool = False,
default_restore_mode: str = "ALWAYS_OFF",
):
schema = _SWITCH_SCHEMA.extend(
{
cv.Optional(CONF_RESTORE_MODE, default=default_restore_mode): cv.enum(
RESTORE_MODES, upper=True, space="_"
),
}
)
if class_ is not _UNDEF:
schema = schema.extend({cv.GenerateID(): cv.declare_id(class_)})
if entity_category is not _UNDEF:
schema = schema.extend(
{
cv.Optional(
CONF_ENTITY_CATEGORY, default=entity_category
): cv.entity_category
}
)
if device_class is not _UNDEF:
schema = schema.extend(
{
cv.Optional(
CONF_DEVICE_CLASS, default=device_class
): validate_device_class
}
)
if icon is not _UNDEF:
schema = schema.extend({cv.Optional(CONF_ICON, default=icon): cv.icon})
if block_inverted:
schema = schema.extend(
{
cv.Optional(CONF_INVERTED): cv.invalid(
"Inverted is not supported for this platform!"
)
}
)
return schema | [
705,
135
] |
def METHOD_NAME(self):
return api.modes.COMMAND.last.widget.METHOD_NAME() | [
1056
] |
def METHOD_NAME(mock_service):
return mock_service(AnnotationDeleteService, name="annotation_delete") | [
2141,
34,
549
] |
def METHOD_NAME():
return 300_000 | [
1030,
1336,
16665
] |
def METHOD_NAME(self, signal: Signal, dry_run: bool = False) -> int:
"""
Send the email to the reporter
"""
try:
context = self.get_context(signal, dry_run)
except URLEncodedCharsFoundInText:
# Log a warning and add a note to the Signal that the email could not be sent
logger.warning(f'URL encoded text found in Signal {signal.id}')
Signal.actions.create_note(
{'text':
'E-mail is niet verzonden omdat er verdachte tekens in de meldtekst staan.'},
signal=signal
)
return 0 # No mail sent, return 0. Same behaviour as send_mail()
subject, message, html_message = self.render_mail_data(context)
return METHOD_NAME(subject=subject, message=message, from_email=self.from_email,
recipient_list=self.get_recipient_list(signal), html_message=html_message) | [
353,
3562
] |
def METHOD_NAME(request):
(tmp_cfg_fd, tmp_cfg_file_path) = tempfile.mkstemp()
os.close(tmp_cfg_fd)
def RemoveTempFile():
print("Removing %r" %(tmp_cfg_file_path))
os.remove(tmp_cfg_file_path)
request.addfinalizer(RemoveTempFile)
return tmp_cfg_file_path | [
963,
171
] |
def METHOD_NAME(elem):
if isinstance(elem, dict):
return ", ".join(k for k, v in elem.items())
if isinstance(elem, list):
return ", ".join(k for k in elem)
return None | [
2831,
245,
1532
] |
def METHOD_NAME(self) -> None:
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
)
answer = [any_inter for (any_inter,) in res]
assert [1] == answer
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
)
answer = [any_inter for (any_inter,) in res]
assert [0] == answer | [
9,
11479,
2147,
2845
] |
def METHOD_NAME(func):
def wrapper(*args, **kwargs):
global _registry
"""A wrapper function to update the registry file each time a metric has been updated"""
func(*args, **kwargs)
prometheus_client.write_to_textfile(path=_registry_path, registry=_registry)
return wrapper | [
77,
510
] |
def METHOD_NAME(self) -> int:
"""
Number of recovery points that are stored in a backup vault.
"""
return pulumi.get(self, "recovery_points") | [
1300,
182
] |
def METHOD_NAME():
if (
authentication.is_authenticated(flask.session)
and "developer_token" in flask.session
):
return flask.redirect(
flask.url_for("publisher_snaps.get_account_snaps")
)
# Get a bakery v2 macaroon from the publisher API to be discharged
# and save it in the session
flask.session["publisher-macaroon"] = publisher_api.get_macaroon(
authentication.PERMISSIONS
)
login_url = candid.get_login_url(
macaroon=flask.session["publisher-macaroon"],
callback_url=flask.url_for("login.login_callback", _external=True),
state=generate_csrf(),
)
# Next URL to redirect the user after the login
next_url = flask.request.args.get("next")
if next_url:
if not next_url.startswith("/") and not next_url.startswith(
flask.request.url_root
):
return flask.abort(400)
flask.session["next_url"] = next_url
return flask.redirect(login_url, 302) | [
273,
-1
] |
def METHOD_NAME(data):
value = base64.urlsafe_b64decode(data).decode()
if value.startswith(("[", "{")):
value = json.loads(value)
return value | [
1268,
-1,
718
] |
def METHOD_NAME(self) -> str:
return self._log_directory | [
390,
2851
] |
def METHOD_NAME(self):
self.assertHoliday(f"{year}-01-28" for year in range(2003, 2050))
self.assertNoHoliday(f"{year}-01-28" for year in range(1991, 2003)) | [
9,
17949,
1724
] |
def METHOD_NAME(client, db_dj_model):
response = client.get('/api/db/v0/databases/')
response_data = response.json()
assert response.status_code == 200
assert response_data['count'] == 1
assert len(response_data['results']) == 1
check_database(db_dj_model, response_data['results'][0]) | [
9,
463,
245
] |
def METHOD_NAME(values):
return {
"srcs_version": attr.string(
default = "PY2AND3",
values = values,
),
} | [
129,
16215,
281,
864
] |
def METHOD_NAME(psf_king):
radius = psf_king.containment_radius(
fraction=0.68, energy_true=1 * u.TeV, offset=0.0 * u.deg
)
assert_allclose(radius, 0.14575 * u.deg, rtol=1e-5) | [
9,
12031,
14038,
8356,
3662
] |
def METHOD_NAME(function_scope_coinbasepro):
"""Test that querying balances from coinbasepro works fine"""
cb = function_scope_coinbasepro
cb_mock = create_coinbasepro_query_mock(cb)
with cb_mock:
balances, message = cb.query_balances()
assert message == ''
assert len(balances) == 2
assert balances[A_BAT].amount == FVal('10.5')
assert balances[A_BAT].usd_value == FVal('15.75')
assert balances[A_ETH].amount == FVal('2.5')
assert balances[A_ETH].usd_value == FVal('3.75') | [
9,
539,
2199
] |
def METHOD_NAME(self):
self.add_web_user_phone_number(verified=True)
request = self.request(verification_status='verified')
self.assertEqual(len(list(PhoneNumberReport(request, domain=self.domain_name).rows)), 1) | [
9,
1714,
106
] |
def METHOD_NAME(self):
# type: () -> None
for req in set(self._entries):
self.remove(req)
logger.debug("Removed build tracker: %r", self._root) | [
950
] |
def METHOD_NAME():
boolUseRandomTrace = True
boolUseDefaultConfig = True
trace_file = ""
config_file_list = list()
override_list = list()
for arg in sys.argv:
if arg.find("--tracefile=") != -1:
substrIndex = arg.find("=")+1
trace_file = arg[substrIndex:]
boolUseRandomTrace = False
print("Trace file:", trace_file)
elif arg.find("--configfile=") != -1:
substrIndex = arg.find("=")+1
config_file_list.append(arg[substrIndex:])
boolUseDefaultConfig = False
print("Config file list:", config_file_list)
elif arg != sys.argv[0]:
if arg.find("=") == -1:
print("Malformed config override found!: ", arg)
exit(-1)
override_list.append(arg)
print("Override: ", override_list[-1])
return [boolUseRandomTrace, trace_file, boolUseDefaultConfig, config_file_list, override_list] | [
203,
134
] |
def METHOD_NAME(section):
return list(get_default_config()[section]) | [
19,
75,
200,
342,
604,
1287
] |
def METHOD_NAME(self):
return len(self.actions_undo) > 0 | [
1046,
2796
] |
def METHOD_NAME():
# [START single_vector_search_with_filter]
query = "Top hotels in town"
search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key))
vector = Vector(value=get_embeddings(query), k=3, fields="descriptionVector")
results = search_client.search(
search_text="",
vectors=[vector],
filter="category eq 'Luxury'",
select=["hotelId", "hotelName"],
)
for result in results:
print(result)
# [END single_vector_search_with_filter] | [
97,
798,
1070,
41,
527
] |
def METHOD_NAME(self):
"""
Get field renderables.
Must be overridden in subclasses.
Returns:
list: List of :class:`django_cradmin.uicontainer.container.AbstractContainerRenderable`.
"""
raise NotImplementedError() | [
19,
101,
10137
] |
def METHOD_NAME():
test_recognizer = MockRecognizer(
patterns=[],
entity="ENTITY_1",
deny_list=["phone", "name"],
context=None,
name=None,
)
results = test_recognizer.analyze(
"my phone number is 555-1234, and my name is John", ["ENTITY_1"]
)
assert len(results) == 2
assert_result(results[0], "ENTITY_1", 3, 8, 1.0)
assert_result(results[1], "ENTITY_1", 36, 40, 1.0) | [
9,
1646,
7348,
245,
868,
2537,
622
] |
def METHOD_NAME():
hdr = OpenEXR.Header(100, 100)
for chans in [ set("a"), set(['foo', 'bar']), set("abcdefghijklmnopqstuvwxyz") ]:
hdr['channels'] = dict([(nm, Imath.Channel(Imath.PixelType(Imath.PixelType.FLOAT))) for nm in chans])
x = OpenEXR.OutputFile("out0.exr", hdr)
data = array('f', [0] * (100 * 100)).tobytes()
x.writePixels(dict([(nm, data) for nm in chans]))
x.close()
assert set(OpenEXR.InputFile('out0.exr').header()['channels']) == chans
print("mchannels ok") | [
9,
77,
-1
] |
def METHOD_NAME(value):
return self.read_file(os.path.join(sys_dir, value)).strip() | [
203,
3709,
171
] |
def METHOD_NAME(self, application_instance, db_session):
db_session.flush()
assert application_instance.provisioning is True | [
9,
1994,
1618,
24,
2019
] |
def METHOD_NAME(self, base_dir: str) -> None: ... | [
77,
1159,
100
] |
def METHOD_NAME(self):
producer = self.c.Producer()
consumer = self.c.Consumer([self.q3, self.q4])
producer.publish(
{'hello': 'world'},
declare=consumer.queues,
exchange=self.fanout,
)
assert self.q3(self.c).get().payload == {'hello': 'world'}
assert self.q4(self.c).get().payload == {'hello': 'world'}
assert self.q3(self.c).get() is None
assert self.q4(self.c).get() is None | [
9,
3117,
1049,
349
] |
def METHOD_NAME():
global mol, h4
mol.stdout.close()
del mol, h4 | [
531,
481,
298
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.