text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(file_path: str, loaders):
loader = _get_loader(file_path, loaders)
# escaping file_path to avoid fsspec treating the path as a glob pattern
# fsspec ignores `expand=False` in read mode
with fsspec.open(escape_file_path(file_path)) as file:
return loader(file) | [
557
] |
def METHOD_NAME(self):
self.mc.events.post('show_slide1')
self.advance_time()
self.mc.events.post('move_out_top')
self.advance_time() | [
9,
132,
1737,
1635
] |
def METHOD_NAME():
from parallel_wavegan.models import HiFiGANGenerator as PWGHiFiGANGenerator
model_pwg = PWGHiFiGANGenerator(**make_hifigan_generator_args())
model_espnet2 = HiFiGANGenerator(**make_hifigan_generator_args())
model_espnet2.load_state_dict(model_pwg.state_dict())
model_pwg.eval()
model_espnet2.eval()
with torch.no_grad():
c = torch.randn(3, 5)
out_pwg = model_pwg.inference(c)
out_espnet2 = model_espnet2.inference(c)
np.testing.assert_array_equal(
out_pwg.cpu().numpy(),
out_espnet2.cpu().numpy(),
) | [
9,
1498,
18121,
2956
] |
def METHOD_NAME(s, strip_newlines=True):
decoded = b64decode(s).decode('utf-8', errors='ignore')
return decoded.replace('\r\n', '').replace('\n', '') if strip_newlines else decoded | [
1268,
321
] |
def METHOD_NAME(self_p):
"""
Transform zchunk into a zframe that can be sent in a message.
Take ownership of the chunk.
"""
return utils.lib.zchunk_packx(utils.ffi.new("zchunk_t **", self_p._p)) | [
-1
] |
def METHOD_NAME():
errors = db.engine.execute(BLOCKED_WORKFLOWS_QUERY).fetchall()
all_blocking_wfs = {}
for wf_id in errors:
wf = workflow_object_class.get(wf_id[0])
msg = wf.extra_data["_error_msg"]
matches = re.findall(REGEEX_FIND_BLOCKING_WORKFLOWS, msg)
all_blocking_wfs[wf_id[0]] = [int(match) for match in matches]
return all_blocking_wfs | [
19,
5999,
9304
] |
def METHOD_NAME():
yesterday = timezone.now() - datetime.timedelta(days=7)
Status.objects.filter(date_created__lt=yesterday).delete() | [
34,
1019,
452,
688
] |
def METHOD_NAME(element: QueueTuple) -> ProviderStatus:
prefix, example, url = element
status_code: Optional[int]
exception: Optional[str]
context: Optional[str]
try:
res = requests.head(url, timeout=10, allow_redirects=True)
except IOError as e:
status_code = None
failed = True
exception = e.__class__.__name__
context = str(e)
else:
status_code = res.status_code
failed = res.status_code != 200
exception = None
context = None
if failed:
text = (
f'[{datetime.datetime.now().strftime("%H:%M:%S")}] '
+ click.style(prefix, fg="green")
+ " at "
+ click.style(url, fg="red")
+ " failed to download"
)
if exception:
text += ": " + click.style(exception, fg="bright_black")
with tqdm.external_write_mode():
click.echo(text)
return ProviderStatus(
prefix=prefix,
example=example,
url=url,
failed=failed,
status_code=status_code,
exception=exception,
context=context,
) | [
356
] |
def METHOD_NAME(self):
d = openmediavault.collectiontools.DotDict()
d['a'] = False
self.assertEqual(d['a'], False) | [
9,
1327
] |
def METHOD_NAME(self):
# Catching a string is bad.
self.catch_fails("spam") | [
9,
1057,
144
] |
def METHOD_NAME(component_type: str, version: int, ip_address: str, port: int, id: int, num: Optional[int] = None):
""" Ausführung des Moduls als Python-Skript
"""
log.debug('Start reading flex')
dev = Device(Flex(configuration=FlexConfiguration(ip_address=ip_address, port=port)))
if component_type in COMPONENT_TYPE_TO_MODULE:
component_config = COMPONENT_TYPE_TO_MODULE[component_type].component_descriptor.configuration_factory()
else:
raise Exception("illegal component type " + component_type +
". Allowed values: " +
','.join(COMPONENT_TYPE_TO_MODULE.keys()))
component_config.id = num
component_config.configuration.version = version
component_config.configuration.id = id
dev.add_component(component_config)
log.debug('openWB flex Version: ' + str(version))
log.debug('openWB flex-Kit IP-Adresse: ' + ip_address)
log.debug('openWB flex-Kit Port: ' + str(port))
log.debug('openWB flex-Kit ID: ' + str(id))
dev.update() | [
203,
3116
] |
def METHOD_NAME(command):
assert not match(command) | [
9,
130,
590
] |
def METHOD_NAME(maintainer: str, repo: str) -> Response:
return problems_generic(
repo=repo,
maintainer=maintainer.lower(),
start=flask.request.args.to_dict().get('start'),
end=flask.request.args.to_dict().get('end')
) | [
5888,
4674
] |
def METHOD_NAME(text):
"Add borders above/below text block"
maxwidth = max(len(line) for line in text.split("\n"))
sep = "|w" + "~" * maxwidth + "|n"
text = f"{sep}\n{text}\n{sep}"
return text | [
238,
169,
5543
] |
def METHOD_NAME(record):
"""Get OpenAIRE datasource identifier."""
return current_app.config['OPENAIRE_ZENODO_IDS'].get(openaire_type(record)) | [
7940,
745,
147
] |
def METHOD_NAME(fmax: float, fint: float, E_c: float,
t_run: float=1e-6, t_averaging: float=1.0,
t_gate: float=240e-9, A_flux_noise: float=6e-6):
'''
Calculate rms of phase error (in deg) due to flux noise at the
interaction point of a CZ gate.
Args:
fmax (float):
Sweet spot frequency of the qubit in Hz.
fint (float):
Frequency of the qubit at the interaction point of the CZ gate
in Hz.
E_c (float):
Charging energy of the qubit in Hz.
t_run (float):
Duration of a single run of the circuit in s.
t_averaging (float):
Averaging time of the whole experiment in s.
t_gate (float):
Duration of the CZ gate in s.
A_flux_noise (float):
1/f noise magnitude.
Returns:
phase_rms: rms of the conditional phase error in deg.
'''
cosine = ((fint + E_c) / (fmax + E_c))**2
beta = np.pi / 2 * (fint + E_c) * np.sqrt(1 - cosine**2) / cosine
return (A_flux_noise * np.sqrt(np.log(t_averaging / t_run)) * t_gate *
360 * beta) | [
19,
3200,
168,
6984
] |
def METHOD_NAME():
build_selector = BuildSelector(
build_config="*", skip_config="", requires_python=SpecifierSet(">=3.6, !=3.7.*")
)
assert build_selector("cp36-manylinux_x86_64")
assert not build_selector("cp37-manylinux_x86_64")
assert build_selector("cp38-manylinux_x86_64")
assert build_selector("cp39-manylinux_x86_64") | [
9,
56,
5567,
440,
2351
] |
def METHOD_NAME(text):
return re.sub(r'([^`[])`[.]?([\w]+)([()]+)?`', replace_with_link, text) | [
9523
] |
def METHOD_NAME(section: IntermediateRepr, key: str):
to_type(section, key, ConfigList.validate) | [
24,
245
] |
def METHOD_NAME(reverse):
users = [User.objects.create(username='rando_{}'.format(i)) for i in range(2)]
for u in users:
assert u.is_superuser is False
system_admin = Role.singleton('system_administrator')
if reverse:
for u in users:
u.roles.add(system_admin)
else:
system_admin.members.add(*[u.id for u in users]) # like .add(42, 54)
for u in users:
u.refresh_from_db()
assert u.is_superuser is True
users[0].roles.clear()
for u in users:
u.refresh_from_db()
assert users[0].is_superuser is False
assert users[1].is_superuser is True
system_admin.members.clear()
for u in users:
u.refresh_from_db()
assert u.is_superuser is False | [
9,
2619,
47,
137,
5733,
584
] |
def METHOD_NAME():
"""Ensure parent folder parameter is handled correctly."""
from sasctl.core import RestObj
FOLDER_NAME = "Spam"
# Mock response when retrieving parent folder.
PARENT_FOLDER = RestObj(
{
"name": "",
"id": "123",
"links": [{"rel": "self", "uri": "/folders/somewhere/spam-eggs-spam-spam"}],
}
)
with mock.patch(
"sasctl._services.folders.Folders.get_folder", return_value=PARENT_FOLDER
):
with mock.patch("sasctl._services.folders.Folders.post") as post:
folders.create_folder(FOLDER_NAME, parent="Doesnt Matter")
# Should have tried to create folder with correct name and parent URI
assert post.called
json = post.call_args[1]["json"]
params = post.call_args[1]["params"]
assert json["name"] == FOLDER_NAME
assert json["description"] is None
assert params["parentFolderUri"] == PARENT_FOLDER["links"][0]["uri"]
# If parent folder can't be found, error should be raised
with mock.patch("sasctl._services.folders.Folders.get_folder", return_value=None):
with mock.patch("sasctl._services.folders.Folders.post"):
with pytest.raises(ValueError):
folders.create_folder(FOLDER_NAME, parent="Doesnt Matter") | [
9,
129,
451,
41,
935
] |
def METHOD_NAME(self, scope, *, recurse=True, visited=None):
#
# We use a return statement even though this is a generator, simply
# to avoid the generator overhead of yielding each element.
#
return cast("Element", self._plugin).METHOD_NAME(scope, recurse=recurse, visited=visited) | [
2410
] |
def METHOD_NAME(self, file_path: str, task: Any, prefix: str = "") -> None:
"""Checks for possible block usage"""
if not task or not isinstance(task, dict):
return
if ResourceType.BLOCK in task and isinstance(task[ResourceType.BLOCK], list):
prefix += f"{ResourceType.BLOCK}." # with each nested level an extra block prefix is added
self._create_block_vertices(file_path=file_path, block=task, prefix=prefix)
for block_task in task[ResourceType.BLOCK]:
self.METHOD_NAME(file_path=file_path, task=block_task, prefix=prefix)
else:
self._create_tasks_vertices(file_path=file_path, task=task, prefix=prefix) | [
356,
37
] |
def METHOD_NAME(self, xml, xmlns):
xmlns = xmlns.encode('utf-8')
return re.sub(br'http://openrosa\.org/formdesigner/[\w-]{36}', xmlns, xml) | [
369,
1133
] |
def METHOD_NAME():
"""
Simple function to return if host is Photon OS or not
"""
(osname, osrelease, oscodename) = (
x.strip('"').strip("'") for x in linux_distribution()
)
return osname == "VMware Photon OS" | [
137,
-1
] |
def METHOD_NAME(self):
"""Return GitHub pull requests that squash bugs in the provided
date window"""
if self._pulls:
return self._pulls
self.get_issues()
return self._pulls | [
19,
7965
] |
async def METHOD_NAME():
"""
Get a list of available roles for users.
Returns:
dict[str,str]: A dictionary containing all available roles and their values.
"""
user_roles = {}
for role in UserRoleEnum:
user_roles[role.name] = role.value
return user_roles | [
19,
21,
2173
] |
def METHOD_NAME(self, attempts: int = 30) -> None:
"""
Polls the DNS server over TCP until it gets a response, or until
it runs out of attempts and raises a ValueError.
The DNS response message must match the txn_id of the DNS query message,
but otherwise the contents are ignored.
:param int attempts: The number of attempts to make.
"""
if not self.process:
raise ValueError("DNS server has not been started. Please run start() first.")
for _ in range(attempts):
if self.process.poll():
raise ValueError("BIND9 server stopped unexpectedly")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5.0)
try:
sock.connect(BIND_BIND_ADDRESS)
sock.sendall(BIND_TEST_QUERY)
buf = sock.recv(1024)
# We should receive a DNS message with the same tx_id
if buf and len(buf) > 4 and buf[2:4] == BIND_TEST_QUERY[2:4]:
return
# If we got a response but it wasn't the one we wanted, wait a little
time.sleep(1)
except: # pylint: disable=bare-except
# If there was a network error, wait a little
time.sleep(1)
finally:
sock.close()
raise ValueError(
"Gave up waiting for DNS server {} to respond".format(BIND_BIND_ADDRESS)
) | [
618,
1238,
1338
] |
def METHOD_NAME(data, name, run=0):
"""
Converts the data into a numpy array in the format ID, time, value.
:param ~neo.core.Block data: Data as returned by a getData() call
:param str name: Name of the data to be extracted.
Same values as used in getData()
:param int run: Zero based index of the run to extract data for
:rtype: ~numpy.ndarray
"""
if len(data.segments) <= run:
raise ValueError(
f"Data only contains {len(data.segments)} so unable to run {run}. "
"Note run is the zero based index.")
if name == "all":
raise ValueError("Unable to convert all data in one go "
"as result would be comparing apples and oranges.")
if name == "spikes":
return convert_spikes(data, run)
return convert_analog_signal(
data.segments[run].filter(name=name)[0]) | [
197,
365
] |
def METHOD_NAME(self):
TimerHandler.stop_timer()
self.do_GET_internal()
TimerHandler.start_timer() | [
74,
19
] |
def METHOD_NAME(builder, authfactor2Type):
return HelloNewAddAuthfactor2Type(builder, authfactor2Type) | [
238,
5813,
44
] |
def METHOD_NAME(self):
body = json.dumps(self.PR_EVENT_BODY)
self.get_response('pull_request', body)
digest = models.GHIssueDigest.get('test/test', 123)
self.assertTrue(digest.is_pr)
self.assertTrue(digest.is_open)
self.assertEqual(digest.involved, ['rmmh', 'spxtr'])
self.assertEqual(digest.payload['title'], 'test pr')
self.assertEqual(digest.payload['needs_rebase'], False) | [
9,
12,
1933,
1452
] |
def METHOD_NAME(
hostname,
filename,
local_filename=None,
force=False,
port=22,
user=None,
ssh_keyscan=False,
):
"""
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ port: connect to this port
+ user: connect with this user
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
"""
local_filename = local_filename or filename
# Get local file info
local_file_info = host.get_fact(File, path=local_filename)
# Local file exists but isn't a file?
if local_file_info is False:
raise OperationError(
"Local destination {0} already exists and is not a file".format(
local_filename,
),
)
# If the local file exists and we're not forcing a re-download, no-op
if local_file_info and not force:
host.noop("file {0} is already downloaded".format(filename))
return
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if user:
connection_target = "@".join((user, hostname))
if ssh_keyscan:
yield from keyscan(hostname)
# Download the file with scp
yield "scp -P {0} {1}:{2} {3}".format(
port,
connection_target,
filename,
local_filename,
)
host.create_fact(
File,
kwargs={"path": local_filename},
data={"mode": None, "group": None, "user": user, "mtime": None},
) | [
136
] |
def METHOD_NAME(self, runner):
if not self.by_epoch:
return
# save checkpoint for following cases:
# 1. every ``self.interval`` epochs
# 2. reach the last epoch of training
if self.every_n_epochs(
runner, self.interval) or (self.save_last
and self.is_last_epoch(runner)):
runner.logger.info(
f'Saving checkpoint at {runner.epoch + 1} epochs')
if self.sync_buffer:
allreduce_params(runner.model.buffers())
self._save_checkpoint(runner) | [
1887,
849,
1165
] |
def METHOD_NAME(interpolator):
model = gempy.create_data(extent=[0, 2500, 0, 1000, 0, 1000],
resolution=[50, 20, 20],
path_o=input_path2 + "jan_models/fixture_model_orientations.csv",
path_i=input_path2 + "jan_models/fixture_model_surfaces.csv")
# Assigning series to surface as well as their order (timewise)
gp.map_stack_to_surfaces(model,
{"Fault_Series": ('fault'), "Strat_Series1": ('rock3'),
"Strat_Series2": ('rock2', 'rock1'),
"Basement_Series": ('basement')})
model.set_is_fault(['Fault_Series'])
model.set_aesara_function(interpolator)
return model | [
578,
2587
] |
def METHOD_NAME(ignore_pygments_extensions=True):
"""Get all file types supported by the Editor"""
# The filter details are not hidden on Windows, so we can't use all
# Pygments extensions on that platform.
if os.name == 'nt' and ignore_pygments_extensions:
supported_exts = []
else:
try:
supported_exts = _get_pygments_extensions()
except Exception:
supported_exts = []
# NOTE: Try to not add too much extensions to this list to not
# make the filter look too big on Windows
favorite_exts = ['.py', '.R', '.jl', '.ipynb', '.md', '.pyw', '.pyx',
'.c', '.cpp', '.json', '.dat', '.csv', '.tsv', '.txt',
'.ini', '.html', '.js', '.h', '.bat']
other_exts = [ext for ext in supported_exts if ext not in favorite_exts]
all_exts = tuple(favorite_exts + other_exts)
text_filetypes = (_("Supported text files"), all_exts)
return [text_filetypes] + EDIT_FILETYPES | [
19,
2004,
12778
] |
def METHOD_NAME():
"""Test serializer to GeoJSON"""
input_data = {
"metadata": {
"locations": {
"features": [
{
"geometry": {"type": "Point", "coordinates": [6.05, 46.23333]},
"identifiers": [
{"scheme": "geonames", "identifier": "2661235"}
],
"place": "CERN",
"description": "Invenio birth place.",
}
]
}
},
"contributors": ["Nielsen, Lars Holm"],
"types": ["info:eu-repo/semantic/other"],
"relations": ["doi:10.1234/foo.bar"],
"descriptions": ["A description \nwith HTML tags", "Bla bla bla"],
"identifiers": ["1924MNRAS..84..308E", "10.1234/inveniordm.1234"],
"publishers": ["InvenioRDM"],
"languages": ["dan", "eng"],
"formats": ["application/pdf"],
"titles": ["InvenioRDM"],
"creators": ["Nielsen, Lars Holm"],
"subjects": ["custom"],
"dates": ["2018/2020-09", "info:eu-repo/date/embargoEnd/2131-01-01"],
"rights": [
"info:eu-repo/semantics/embargoedAccess",
"A custom license",
"https://customlicense.org/licenses/by/4.0/",
"Creative Commons Attribution 4.0 International",
"https://creativecommons.org/licenses/by/4.0/legalcode",
],
}
expected_data = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [6.05, 46.23333]},
"properties": {
"identifiers": [{"scheme": "geonames", "identifier": "2661235"}],
"place": "CERN",
"description": "Invenio birth place.",
},
}
],
}
serializer = GeoJSONSerializer()
serialized_record = serializer.dump_obj(input_data)
assert serialized_record == expected_data | [
9,
4642,
1386,
97,
2530
] |
def METHOD_NAME(fn: TWrappedFunction) -> TWrappedFunction:
@functools.wraps(fn)
async def wrapper(*args, **kwargs):
with self.start_flow(control_point, explicit_labels) as flow:
if flow.should_run():
return await run_fn(fn, *args, **kwargs)
else:
if on_reject:
return on_reject()
raise RejectedFlowException("Flow was rejected")
return wrapper | [
972
] |
def METHOD_NAME(path):
path_directory = os.path.dirname(path)
if not os.path.exists(path_directory):
os.makedirs(path_directory)
return path | [
93,
157,
6031
] |
def METHOD_NAME(self, batch_shape, _instance=None):
raise NotImplementedError | [
2450
] |
def METHOD_NAME(cls, refs, ref_name, value):
refs['{}__{}'.format(cls.__action__, ref_name)] = value | [
0,
272
] |
def METHOD_NAME(testcase: TestCase) -> Dict[str, str]:
"""Create the musicians LDAP resolver and remove it after test.
manage a resolver for a test:
- create it, if needed
- yield to the test
- remove it, if it was created
Returns a dict containing
name: visible name
type: type of resolver
fullname: type.name
"""
music_resolver = data.musicians_ldap_resolver
useridresolver_manager = testcase.useridresolver_manager
resolver = useridresolver_manager.get_resolver_params_via_api(
music_resolver["name"]
)
existing = resolver and resolver["type"]
if not existing:
useridresolver_manager.create_resolver_via_api(
data.musicians_ldap_resolver
)
yield dict(
name=music_resolver["name"],
type=music_resolver["type"],
fullname=music_resolver["type"] + "." + music_resolver["name"],
)
if not existing:
useridresolver_manager.delete_resolver_via_api(music_resolver["name"]) | [
-1,
1836
] |
def METHOD_NAME(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp() | [
963,
1190
] |
def METHOD_NAME():
def testInner(nx, nh):
x = np.random.randint(-30, 30, size=nx) + 1.0
h = np.random.randint(-20, 20, size=nh) + 1.0
gold = fftconvolve(x, h, mode="same")
for size in [2, 3]:
dirt = ols(x, h, [size])
assert np.allclose(gold, dirt)
for nx in [20, 21]:
for nh in [9, 10, 17, 18]:
testInner(nx, nh) | [
-1
] |
def METHOD_NAME(num_to_keep: int, **kwargs):
"""Trigger the clean old composes job. This is a periodic task."""
from .clean_old_composes import main
log.info("Received a clean old composes order")
_do_init()
main(num_to_keep) | [
1356,
2228,
1749,
758
] |
def METHOD_NAME(term):
sys.stdout = term
print("This print statement is redirected from stdout to the Panel Terminal")
sys.stdout = sys.__stdout__
print("This print statement is again redirected to the server console") | [
38,
24,
1019
] |
def METHOD_NAME(self, command):
"""Execute a cmt command in the cmt user area pointed to by the
application. Will execute the command "cmt <command>" after the
proper configuration. Do not include the word "cmt" yourself."""
subprocess.run(f'cmt {command}',
shell=True,
check=False,
env=self.getenv(False),
cwd=self.user_release_area) | [
6986
] |
def METHOD_NAME(self) -> Model:
self.validate()
if self._model:
try:
dataset = DatasetDAO.update(
self._model,
attributes=self._properties,
)
return dataset
except DAOUpdateFailedError as ex:
logger.exception(ex.exception)
raise DatasetUpdateFailedError() from ex
raise DatasetUpdateFailedError() | [
22
] |
def METHOD_NAME(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
# Create TVM placeholders
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
# Create the numpy arrays to be used for executing conv models
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
# c_orig will be used for declaration ouptut
# c_sch will be used for scheduled computation output
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
# Generate and run the optimized schedule
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean | [
22,
1748
] |
def METHOD_NAME(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
lang_format="[{}]",
input_feeding=True,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
RandomCropDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index(lang_format.format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index(lang_format.format(tgt))
)
eos = tgt_dict.index(lang_format.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
input_feeding=input_feeding,
) | [
557,
-1,
126
] |
def METHOD_NAME():
"""
JSON request body:
```
{
"worksheet_uuid": "0xea72f9b6aa754636a6657ff2b5e005b0",
"command": "cl run :main.py 'python main.py'",
"autocomplete": false
}
```
JSON response body:
```
{
"structured_result": { ... },
"output": "..."
}
```
"""
query = request.json
if 'worksheet_uuid' not in query:
abort(httplib.BAD_REQUEST, 'Missing `workhseet_uuid`')
if 'command' not in query:
abort(httplib.BAD_REQUEST, 'Missing `command`')
# If 'autocomplete' field is set, return a list of completions instead
if query.get('autocomplete', False):
return {'completions': complete_command(query['worksheet_uuid'], query['command'])}
return general_command(query['worksheet_uuid'], query['command']) | [
72,
13037,
462
] |
def METHOD_NAME() -> RequestWants:
"""Get request_wants from the request."""
# Flask 2.0 deprecated use of _request_ctx_stack.top and recommends using `g`.
# However, `g` is not suitable for us as we must cache results for a request only.
# Therefore we stick it in the request object itself.
if has_request_context():
# pylint: disable=protected-access
wants = getattr(request, '_request_wants', None)
if wants is None:
wants = RequestWants()
request._request_wants = wants # type: ignore[attr-defined]
return wants
# Return an empty handler
return RequestWants() | [
19,
377,
14935
] |
def METHOD_NAME(self):
"""Test setting cutoff tolerances for the tapered operator works."""
qubit_op = SparsePauliOp.from_list(
[
("II", -1.0537076071291125),
("IZ", 0.393983679438514),
("ZI", -0.39398367943851387),
("ZZ", -0.01123658523318205),
("XX", 0.1812888082114961),
]
)
z2_symmetries = Z2Symmetries.find_z2_symmetries(qubit_op)
z2_symmetries.tol = 0.2 # removes the X part of the tapered op which is < 0.2
tapered_op = z2_symmetries.taper(qubit_op)[1]
primitive = SparsePauliOp.from_list(
[
("I", -1.0424710218959303),
("Z", -0.7879673588770277),
]
)
expected_op = primitive
self.assertEqual(tapered_op, expected_op) | [
9,
5419,
16954,
441
] |
def METHOD_NAME(self, signature, payload):
k = self.key
if isinstance(self.key, rsa.RSAPrivateKey):
k = self.key.public_key()
return k.METHOD_NAME(signature=signature, data=payload,
padding=PSS(mgf=MGF1(SHA256()), salt_length=32),
algorithm=SHA256()) | [
1162
] |
def METHOD_NAME(roi, bundle_name="ROI"):
"""
After being non-linearly transformed, ROIs tend to have holes in them.
We perform a couple of computational geometry operations on the ROI to
fix that up.
Parameters
----------
roi : 3D binary array
The ROI after it has been transformed.
bundle_name : str, optional
Name of bundle, which may be useful for error messages.
Default: None
Returns
-------
ROI after dilation and hole-filling
"""
hole_filled = ndim.binary_fill_holes(roi > 0)
if not np.any(hole_filled):
raise ValueError((
f"{bundle_name} found to be empty after "
"applying the mapping."))
return hole_filled | [
1575,
1,
65
] |
def METHOD_NAME(self):
b = b'SPA'
with self.assertRaises(EOFError):
sunau.open(io.BytesIO(b))
b = b'SPAM'
with self.assertRaisesRegex(sunau.Error, 'bad magic number'):
sunau.open(io.BytesIO(b)) | [
9,
203,
1068,
5605,
106
] |
def METHOD_NAME(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
group = feature_dict.get("group", "")
if not group:
return
if isinstance(group, str):
group = group.split()
for f in group:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'group', '%s' already exists as a feature name" % f
) | [
9,
846
] |
def METHOD_NAME(funcs, tgt_type_key="tgt_type"):
"""
Asserts that ``mine_get`` gives the expected results when request
is a list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
"""
funcs.cache.store(
"minions/webserver",
"mine",
dict(ip_addr="2001:db8::1:3", ip4_addr="127.0.0.1"),
)
with patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value=(dict(minions=["webserver"], missing=[]))),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "G@roles:web",
"fun": ["ip_addr", "ip4_addr"],
tgt_type_key: "compound",
}
)
assert ret == dict(
ip_addr=dict(webserver="2001:db8::1:3"),
ip4_addr=dict(webserver="127.0.0.1"),
) | [
9,
2292,
19,
553,
245
] |
def METHOD_NAME():
print('-'*88)
print("Welcome to the demo of version 2 of the Amazon Kinesis Data Analytics API.")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
kinesis_client = boto3.client('kinesis')
analytics_client = boto3.client('kinesisanalyticsv2')
iam_resource = boto3.resource('iam')
application = KinesisAnalyticsApplicationV2(analytics_client)
app_running_waiter = ApplicationRunningWaiter(analytics_client)
input_stream_name = 'doc-example-stream-input'
input_prefix = 'SOURCE_SQL_STREAM'
output_stream_name = 'doc-example-stream-output'
app_name = 'doc-example-app'
role_name = 'doc-example-kinesis-read-write'
print(f"Creating input stream {input_stream_name} and output stream "
f"{output_stream_name}.")
input_stream = KinesisStream(kinesis_client)
input_stream.create(input_stream_name)
output_stream = KinesisStream(kinesis_client)
output_stream.create(output_stream_name)
print("Starting data generator (on a separate thread) to put data into the "
"input stream.")
stream_thread = threading.Thread(
target=generate, args=(input_stream.name, kinesis_client, False), daemon=True)
stream_thread.start()
print(f"Creating role {role_name} to let Kinesis Analytics read from the input "
f"stream and write to the output stream.")
role = application.create_read_write_role(
role_name, input_stream.arn(), output_stream.arn(), iam_resource)
print("Waiting for role to be ready.")
time.sleep(10)
print(f"Creating application {app_name}.")
# Sometimes the role is still not ready and InvalidArgumentException is raised, so
# continue to retry if this happens.
app_data = exponential_retry('InvalidArgumentException')(
application.create)(app_name, role.arn)
pprint(app_data)
print(f"Discovering schema of input stream {input_stream.name}.")
input_schema = application.discover_input_schema(input_stream.arn(), role.arn)
pprint(input_schema)
print("Adding input stream to the application.")
input_details = application.add_input(
input_prefix, input_stream.arn(), input_schema)
print("Input details:")
pprint(input_details)
print("Uploading SQL code to the application to process the input stream.")
with open('analyticsv2/example.sql') as code_file:
code = code_file.read()
application.update_code(code)
print("Adding output stream to the application.")
application.add_output('DESTINATION_SQL_STREAM', output_stream.arn())
print("Starting the application.")
application.start(input_details['InputDescriptions'][0]['InputId'])
print("Waiting for the application to start (this may take a minute or two).")
app_running_waiter.wait(application.name)
print("Application started. Getting records from the output stream.")
for records in output_stream.get_records(50):
if len(records) > 0:
print(*[rec['Data'].decode() for rec in records], sep='\n')
print("Cleaning up...")
application.delete()
input_stream.delete()
output_stream.delete()
print("Deleting read/write role.")
for policy in role.attached_policies.all():
role.detach_policy(PolicyArn=policy.arn)
policy.delete()
role.delete()
print("Thanks for watching!")
print('-'*88) | [
558,
2660
] |
def METHOD_NAME(error):
return error.response["Error"]["Code"] | [
452,
544,
280,
442
] |
def METHOD_NAME(filename):
"""Recognize sound headers."""
with open(filename, 'rb') as f:
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return SndHeaders(*res)
return None | [
9676
] |
def METHOD_NAME(labels_or_label_file, template=None):
if isinstance(labels_or_label_file, str):
labels = mmengine.list_from_file(labels_or_label_file)
elif isinstance(labels_or_label_file, list):
labels = labels_or_label_file
else:
raise ValueError(f'`labels_or_label_file` must be `list` or `str`, '
f'but got {type(labels_or_label_file)}')
if template is None:
template = [
'a photo of action {}', 'a picture of action {}',
'Human action of {}', '{}, an action', '{} this is an action',
'{}, a video of action', 'Playing action of {}', '{}',
'Playing a kind of action, {}', 'Doing a kind of action, {}',
'Look, the human is {}', 'Can you recognize the action of {}?',
'Video classification of {}', 'A video of {}', 'The man is {}',
'The woman is {}'
]
elif isinstance(template, str):
template = [template]
elif not mmengine.is_seq_of(template, str):
raise ValueError(f'`template` must be list of `str`, `str` or `None`, '
f'but got {type(template)}')
num_prompt = len(template)
prompt = torch.cat(
[clip.tokenize(t.format(c)) for t in template for c in labels])
return prompt, num_prompt | [
526,
2995
] |
def METHOD_NAME(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg | [
6144,
362,
171
] |
def METHOD_NAME():
instance = {
'proxy': {'http': 'http://1.2.3.4:567', 'https': 'https://1.2.3.4:567', 'no_proxy': 'unused,google.com'}
}
init_config = {}
http = RequestsWrapper(instance, init_config)
response = http.get('http://www.google.com')
response.raise_for_status()
response = http.get('https://www.google.com')
response.raise_for_status() | [
9,
127,
1068,
654,
127,
345,
1434
] |
def METHOD_NAME(common_setup_teardown, intfs_for_test, enum_frontend_asic_index):
duthost, ptfhost, router_mac = common_setup_teardown
intf1, intf2, intf1_indice, intf2_indice = intfs_for_test
asichost = duthost.asic_instance(enum_frontend_asic_index)
params = {
'acs_mac': router_mac,
'port': intf1_indice
}
# Test Gratuitous ARP behavior, no Gratuitous ARP installed when arp was not resolved before
clear_dut_arp_cache(duthost, asichost.cli_ns_option)
log_file = "/tmp/arptest.GarpNoUpdate.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.GarpNoUpdate", '/root/ptftests',
params=params, log_file=log_file, is_python3=True)
switch_arptable = asichost.switch_arptable()['ansible_facts']
for ip in list(switch_arptable['arptable']['v4'].keys()):
pytest_assert(ip != '10.10.1.7')
# Test Gratuitous ARP update case, when received garp, no arp reply, update arp table if it was solved before
log_file = "/tmp/arptest.ExpectReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.ExpectReply", '/root/ptftests',
params=params, log_file=log_file, is_python3=True)
switch_arptable = asichost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:0a')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1)
time.sleep(2)
log_file = "/tmp/arptest.GarpUpdate.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.GarpUpdate", '/root/ptftests',
params=params, log_file=log_file, is_python3=True)
switch_arptable = asichost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:00:07:08:09:0a')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1) | [
9,
4321,
-1,
654,
86
] |
def METHOD_NAME(tmpdir_factory, nwbfile):
out_path = os.path.join(
str(tmpdir_factory.mktemp("test_serialize")),
"out.nwb"
)
sink = nwb2_sink.Nwb2Sink(None)
sink._data = io.BytesIO()
sink._h5_file = h5py.File(sink._data, "w")
sink._nwb_io = pynwb.NWBHDF5IO(
path=sink._h5_file.filename,
mode="w",
file=sink._h5_file
)
sink.nwbfile = nwbfile
sink.serialize({"output_path": out_path})
with pynwb.NWBHDF5IO(out_path, "r", load_namespaces=True) as reader:
obt = reader.read()
assert obt.identifier == "test session" | [
9,
183
] |
def METHOD_NAME(self):
class Object(object):
def __init__(self):
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor()
def f(self):
return 42
o = Object()
async def f():
answer = await o.f()
return answer
result = yield f()
self.assertEqual(result, 42) | [
9,
958,
4096
] |
def METHOD_NAME():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
add_to_lockfile = subparsers.add_parser("add-to-lockfile")
add_to_lockfile.add_argument("lockfile")
add_to_lockfile.add_argument("packages", nargs="+")
add_to_lockfile.set_defaults(func=cli_add_to_lockfile)
update_mirror = subparsers.add_parser("update-mirror")
update_mirror.add_argument("lockfile")
update_mirror.add_argument("--awscli-profile", default="default")
update_mirror.set_defaults(func=cli_update_mirror)
install = subparsers.add_parser("install")
install.add_argument("lockfile")
install.add_argument("dest")
install.set_defaults(func=cli_install)
args = parser.parse_args()
if not hasattr(args, "func"):
print("error: a subcommand is required (see --help)")
exit(1)
args.func(args) | [
615
] |
def METHOD_NAME(self):
dt = (
# New Year's Day
"2017-01-02",
"2022-01-03",
"2023-01-02",
# Workers' Day
"2016-05-02",
"2021-05-03",
"2022-05-04",
# Democracy Day
"2016-05-30",
"2021-06-14",
"2022-06-13",
# Independence Day
"2016-10-03",
"2017-10-02",
"2022-10-03",
"2023-10-02",
# Christmas Day
"2016-12-27",
"2021-12-27",
"2022-12-27",
# Boxing Day
"2020-12-28",
"2021-12-28",
# Id el Fitr
"2018-06-18",
"2020-05-26",
"2023-04-24",
# Id el Kabir
"2019-08-13",
"2020-08-03",
"2022-07-11",
"2022-07-12",
# Id el Maulud
"2019-11-11",
"2022-10-10",
)
self.assertHoliday(dt)
self.assertNoNonObservedHoliday(dt) | [
9,
2679
] |
f METHOD_NAME(self): | [
555
] |
def METHOD_NAME(self, orm):
"""
This migration has been customized to support upgrades from Cloudera
Enterprise 3.5, as well as Hue 1.2
"""
if 'userman_ldapgroup' in connection.introspection.table_names():
db.rename_table('userman_ldapgroup', 'useradmin_ldapgroup')
db.delete_column('useradmin_ldapgroup', 'hidden')
else:
# Adding model 'LdapGroup'
db.create_table('useradmin_ldapgroup', (
('group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group', to=orm['auth.Group'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('useradmin', ['LdapGroup'])
# Adding field 'UserProfile.creation_method'
try:
db.add_column('useradmin_userprofile', 'creation_method', self.gf('django.db.models.fields.CharField')(max_length=64, default=UserProfile.CreationMethod.HUE.name), keep_default=False)
except Exception:
# It's possible that we could run into an error here, because this
# table may have been migrated from Cloudera Enterprise, in which case
# this column would already exist.
pass
for user in User.objects.all():
try:
orm.UserProfile.objects.get(user=user)
except orm.UserProfile.DoesNotExist:
create_profile_for_user(user) | [
2368
] |
def METHOD_NAME(self):
return 'http://localhost:%d'%self.port | [
414,
274
] |
def METHOD_NAME(
self,
) -> Callable[
[locations_pb2.GetLocationRequest],
Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], | [
19,
708
] |
def METHOD_NAME(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
# transformers = [
# deepchem.trans.LogTransformer(transform_X=True),
# deepchem.trans.NormalizationTransformer(transform_y=True,
# dataset=train_dataset)]
return transformers | [
19,
7137
] |
def METHOD_NAME():
assert md("<table><tr><td>td1</td><td>td2</td></tr></table>") == "td1\ttd2\t\n\n" | [
9,
410
] |
def METHOD_NAME(self):
satXml = parse('/etc/tuxbox/satellites.xml').getroot()
if satXml is not None:
for sat in satXml.findall('sat'):
name = sat.get('name') or None
position = sat.get('position') or None
if name is not None and position is not None:
position = '%s.%s' % (position[:-1], position[-1:])
if position.startswith('-'):
position = '%sW' % position[1:]
else:
position = '%sE' % position
if position.startswith('.'):
position = '0%s' % position
self.satNames[position] = name.encode('utf-8') | [
203,
10270,
399
] |
METHOD_NAME(self): | [
9,
2768,
724,
69,
2384
] |
def METHOD_NAME(names, name):
def fun(self, value):
for field in names:
self.__dict__[field] = (name == field) and value
return fun | [
503,
0
] |
def METHOD_NAME(request):
if FirmwareFile.objects.all().count() > 0:
form = DeleteFirmwareForm(initial={'firmware': FirmwareFile.objects.latest('upload_date').id})
return render(request, 'uploader/manage.html', {'delete_form': form})
form = DeleteFirmwareForm()
return render(request, 'uploader/manage.html', {'delete_form': form}) | [
1232,
171
] |
def METHOD_NAME():
"""
Build two points on a line around the origin at a random orientation.
"""
vector = np.random.rand(3)
vector /= np.linalg.norm(vector)
points = np.stack([np.zeros((3, )), vector])
points -= vector/2
return points | [
10545,
637
] |
def METHOD_NAME(self, query):
query = query.lower().replace(" ", "+")
soup = self.get_soup(search_url % query)
results = []
for tab in soup.select(".c-tabs-item__content"):
a = tab.select_one(".post-title h3 a")
if not isinstance(a, Tag):
continue
latest = tab.select_one(".latest-chap .chapter a")
if isinstance(latest, Tag):
latest = latest.text.strip()
status = tab.select_one(".mg_release .summary-content a")
if isinstance(status, Tag):
status = "Status: " + status.text.strip()
results.append(
{
"title": a.text.strip(),
"url": self.absolute_url(a["href"]),
"info": " | ".join(filter(None, [latest, status])),
}
)
return results | [
1070,
4734
] |
def METHOD_NAME(nocache=False, passed_jid=None): # pylint: disable=unused-argument
"""
Do any work necessary to prepare a JID, including sending a custom id
"""
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) | [
48,
1215
] |
def METHOD_NAME(self, locs):
"""Compute displacement field at locations.
"""
(npts, dim) = locs.shape
disp = numpy.zeros((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64)
disp[:,:, 2] = numpy.dot(ezz.reshape(numSteps, 1), (locs[:, 2] + 8000.0).reshape(1, npts))
return disp | [
6619
] |
def METHOD_NAME(data: bytes) -> Toif:
return from_struct(ToifStruct.parse(data)) | [
280,
321
] |
def METHOD_NAME(text, width):
if width < 5:
width = 5
diff = get_console_length(text) - width
if diff > 0:
text = _lose_width(text, diff+3) + '...'
return _pad_width(text, width) | [
2459,
2516,
799
] |
def METHOD_NAME(self, _, **attrs):
raise NotImplementedError(
'upload_to_gcs should not be called for CDCVaccinationNational') | [
172,
24,
4191
] |
def METHOD_NAME(self) -> Dict[str, Any]:
return {
"features_col": None,
} | [
19,
5165,
1852,
434
] |
def METHOD_NAME(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item) | [
789,
877
] |
def METHOD_NAME(cls) -> str:
return 'C sharp' | [
19,
52,
2938
] |
def METHOD_NAME(self):
for e in self.valid_emails:
assert self.form_class.allow_register(e) is True
for e in self.invalid_emails:
assert self.form_class.allow_register(e) is False | [
9,
2569,
372
] |
def METHOD_NAME(self, pred3d, pred2d, inputs):
"""
mpjpe: mpjpe loss between 3d joints
keypoint_2d_loss: 2d joints loss compute by criterion_2dpose
"""
gt_3d_joints = inputs['joints_3d']
gt_2d_joints = inputs['joints_2d']
has_3d_joints = inputs['has_3d_joints']
has_2d_joints = inputs['has_2d_joints']
loss_3d = mpjpe_focal(pred3d, gt_3d_joints, has_3d_joints)
loss = self.weight_3d * loss_3d
epoch = inputs['epoch_id']
if self.weight_2d > 0:
weight = self.weight_2d * pow(0.1, (epoch // 8))
if epoch > 8:
weight = 0
loss_2d = keypoint_2d_loss(self.criterion_2dpose, pred2d,
gt_2d_joints, has_2d_joints)
loss += weight * loss_2d
return loss | [
76
] |
def METHOD_NAME():
scaling_type = np.float32
rng = np.random.RandomState(20111121)
N = 10000
sd_10s = range(-20, 51, 5)
iuint_types = np.sctypes['int'] + np.sctypes['uint']
# Remove types which cannot be set into nifti header datatype
nifti_supported = supported_np_types(Nifti1Header())
iuint_types = [t for t in iuint_types if t in nifti_supported]
f_types = [np.float32, np.float64]
# Expanding standard deviations
for i, sd_10 in enumerate(sd_10s):
sd = 10.0**sd_10
V_in = rng.normal(0, sd, size=(N, 1))
for j, in_type in enumerate(f_types):
for k, out_type in enumerate(iuint_types):
check_arr(sd_10, V_in, in_type, out_type, scaling_type)
# Spread integers across range
for i, sd in enumerate(np.linspace(0.05, 0.5, 5)):
for j, in_type in enumerate(iuint_types):
info = np.iinfo(in_type)
mn, mx = info.min, info.max
type_range = mx - mn
center = type_range / 2.0 + mn
# float(sd) because type_range can be type 'long'
width = type_range * float(sd)
V_in = rng.normal(center, width, size=(N, 1))
for k, out_type in enumerate(iuint_types):
check_arr(sd, V_in, in_type, out_type, scaling_type) | [
9,
3834,
3835
] |
def METHOD_NAME(*args, **kwargs):
synced = cache.get('synced_registered_tasks', False)
if synced:
return
cache.set('synced_registered_tasks', True, 60)
with transaction.atomic():
try:
db_tasks = CeleryTask.objects.all()
celery_task_names = [key for key in app.tasks]
db_task_names = db_tasks.values_list('name', flat=True)
db_tasks.exclude(name__in=celery_task_names).delete()
not_in_db_tasks = set(celery_task_names) - set(db_task_names)
tasks_to_create = [CeleryTask(name=name) for name in not_in_db_tasks]
CeleryTask.objects.bulk_create(tasks_to_create)
except ProgrammingError:
pass | [
164,
3024,
620
] |
def METHOD_NAME(self):
for key, value in self.stat_struct.items():
modvalue = getattr(stat, key)
self.assertEqual(value, modvalue, key)
for key, value in self.permission_bits.items():
modvalue = getattr(stat, key)
self.assertEqual(value, modvalue, key)
for key in self.file_flags:
modvalue = getattr(stat, key)
self.assertIsInstance(modvalue, int)
for key in self.formats:
modvalue = getattr(stat, key)
self.assertIsInstance(modvalue, int)
for key in self.format_funcs:
func = getattr(stat, key)
self.assertTrue(callable(func))
self.assertEqual(func(0), 0) | [
9,
298,
177
] |
def METHOD_NAME(parfile):
if parfile not in _cached_toas:
model = get_model_cached(parfile)
if hasattr(model, "T0") and model.T0.value is not None:
start = model.T0.value
elif hasattr(model, "TASC") and model.TASC.value is not None:
start = model.TASC.value
elif hasattr(model, "PEPOCH") and model.PEPOCH.value is not None:
start = model.PEPOCH.value
else:
start = 57000
with quiet():
toas1 = pint.simulation.make_fake_toas_uniform(
model=model,
startMJD=start,
endMJD=start + 100,
ntoas=5,
freq=1400 * u.MHz,
obs="gbt",
)
toas2 = pint.simulation.make_fake_toas_uniform(
model=model,
startMJD=start + 1,
endMJD=start + 102,
ntoas=5,
freq=2000 * u.MHz,
obs="ao",
)
toas = pint.toa.merge_TOAs([toas1, toas2])
phase = model.phase(toas, abs_phase=False)
_cached_toas[parfile] = model, toas, phase
return copy.deepcopy(_cached_toas[parfile]) | [
19,
578,
61,
12655
] |
def METHOD_NAME(self):
"""
Retrieves whether thermal module is replaceable
Returns:
A boolean value, True if replaceable, False if not
"""
return False | [
137,
5941
] |
def METHOD_NAME(self):
code = """ | [
9,
69,
2551,
69,
2551,
194,
972
] |
def METHOD_NAME(obj):
"""Ensures that all strings are valid UTF-8 encoded, which is
required by MongoDB to be able to store the JSON documents.
@param obj: analysis results dictionary.
"""
if not obj:
return
items = []
if isinstance(obj, dict):
items = obj.items()
elif isinstance(obj, list):
items = enumerate(obj)
for k, v in items:
# This type check is intentionally not done using isinstance(),
# because bson.binary.Binary *is* a subclass of bytes/str, and
# we do not want to convert that.
if isinstance(v, str):
try:
v.encode()
except UnicodeEncodeError:
obj[k] = "".join(str(ord(_)) for _ in v).encode()
else:
METHOD_NAME(v) | [
602,
1205,
3690
] |
def METHOD_NAME(_instance):
assert _instance.enabled | [
9,
1111,
1042
] |
def METHOD_NAME(self, *args, **kargs):
log_interactive.warning("run() method deprecated. The instance is now callable") # noqa: E501
self(*args, **kargs) | [
22
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.