text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME() -> None:
assert binb._server_url("foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("http://foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("https://foo.com:8888", 10) == "https://foo.com:10/" | [
9,
163,
274
] |
def METHOD_NAME(self):
emr_client = mock.MagicMock
emr_client.clusters = {}
with mock.patch(
"prowler.providers.aws.services.emr.emr_service.EMR",
new=emr_client,
):
# Test Check
from prowler.providers.aws.services.emr.emr_cluster_master_nodes_no_public_ip.emr_cluster_master_nodes_no_public_ip import (
emr_cluster_master_nodes_no_public_ip,
)
check = emr_cluster_master_nodes_no_public_ip()
result = check.execute()
assert len(result) == 0 | [
9,
654,
1827
] |
def METHOD_NAME(
self, collection_modules: Dict[str, List[str]]
) -> None:
"""
Setup repository modules and streams
Implementation in specialized package manager class
:param dict collection_modules: unused
"""
raise NotImplementedError | [
102,
1230,
468
] |
def METHOD_NAME(
self, start: float = 0.0, end: float = 1000.0, n: int = 10
) -> t.List[Decimal]:
"""Generate a decimal number as :py:class:`decimal.Decimal` objects.
:param start: Start range.
:param end: End range.
:param n: Length of the list.
:return: A list of random decimal numbers.
"""
return [self.decimal_number(start, end) for _ in range(n)] | [
4936
] |
def METHOD_NAME(self):
return self.get("query_parameters") | [
539,
386
] |
def METHOD_NAME(entity_type, term):
"""Proxy to search entities on remote server.
Currently, we only search on MEF remote servers. If multiple remote sources
are possible to search, a request must be sent to each remote API and
all result must be unified into a common response.
:param entity_type: The type of entities to search.
:param term: the searched term.
"""
try:
return MEFProxyFactory.create_proxy(entity_type).search(term)
except ValueError as err:
abort(400, str(err)) | [
2437,
1070,
127
] |
def METHOD_NAME(self):
self.fw_one.list_of_all_included_files.append('foo')
self.fw_two.list_of_all_included_files.append('foo')
result = self.c_plugin.compare_function([self.fw_one, self.fw_two])
assert len(result.keys()) == 5 # noqa: PLR2004 | [
9,
22,
979,
2793
] |
def METHOD_NAME(self, event: QGraphicsSceneDragDropEvent):
event.accept() | [
2572,
132,
417
] |
def METHOD_NAME(self):
a = "A" * 0x10000
self.assertRaises(MemoryError, strop.replace, a, "A", a) | [
9,
-1,
1482
] |
def METHOD_NAME(self):
return problem_data_storage.exists('%s/init.yml' % self.problem.code) | [
220,
5024
] |
def METHOD_NAME(self):
schema = self._get_schema()
schema.validate({"name": "Eggs", "slaves": "eth0"}) | [
9,
187,
652
] |
def METHOD_NAME(ch: channel.Channel) -> None:
ch.sendline("echo Hello; echo World", read_back=True)
out_s = ch.readline()
assert out_s == "Hello\n"
out_s = ch.readline()
assert out_s == "World\n" | [
9,
53,
2532
] |
def METHOD_NAME(**kwargs):
test_passed = True
for k, v in kwargs.items():
# check for incorrect types
if (
(k == "X")
or (k == "X_test")
or (k == "y")
or (k == "y_test")
or (k == "y_true")
or (k == "y_probas")
):
# FIXME: do this individually
if not isinstance(
v,
(
Sequence,
Iterable,
np.ndarray,
np.generic,
pd.DataFrame,
pd.Series,
list,
),
):
wandb.termerror("%s is not an array. Please try again." % (k))
test_passed = False
# check for classifier types
if k == "model":
if (not sklearn.base.is_classifier(v)) and (
not sklearn.base.is_regressor(v)
):
wandb.termerror(
"%s is not a classifier or regressor. Please try again." % (k)
)
test_passed = False
elif k == "clf" or k == "binary_clf":
if not (sklearn.base.is_classifier(v)):
wandb.termerror("%s is not a classifier. Please try again." % (k))
test_passed = False
elif k == "regressor":
if not sklearn.base.is_regressor(v):
wandb.termerror("%s is not a regressor. Please try again." % (k))
test_passed = False
elif k == "clusterer":
if not (getattr(v, "_estimator_type", None) == "clusterer"):
wandb.termerror("%s is not a clusterer. Please try again." % (k))
test_passed = False
return test_passed | [
9,
119
] |
def METHOD_NAME(self):
submission = dict(self.MOCK_TEAM_SUBMISSION)
submission['submission_uuids'] = []
with self.assertRaises(AssessmentWorkflowInternalError):
with self.mock_submissions_api_get(submission):
TeamAssessmentWorkflow.start_workflow(self.team_submission_uuid) | [
9,
447,
3855,
654,
2150,
1469
] |
def METHOD_NAME():
# Nintendo
with Image.open("Tests/images/sugarshack.mpo") as im:
exif = im.getexif()
assert exif.get_ifd(0x927C)[0x1101]["Parallax"] == -44.798187255859375
# Fujifilm
with Image.open("Tests/images/fujifilm.mpo") as im:
im.seek(1)
exif = im.getexif()
assert exif.get_ifd(0x927C)[0xB211] == -3.125 | [
9,
17537
] |
def METHOD_NAME(monkeypatch):
monkeypatch.setattr('processor.template_processor.base.base_template_processor.get_collection_size', mock_get_collection_size)
monkeypatch.setattr('processor.template_processor.base.base_template_processor.create_indexes', mock_create_indexes)
monkeypatch.setattr('processor.template_processor.base.base_template_processor.insert_one_document', mock_insert_one_document)
from processor.template_processor.google_template_processor import GoogleTemplateProcessor
node_data = template_processor_kwargs["snapshot"]["nodes"][0]
template_processor = GoogleTemplateProcessor(node_data, **template_processor_kwargs)
snapshot_data = template_processor.populate_template_snapshot()
assert snapshot_data == {
'SNAPSHOT_1': True
}
assert template_processor.processed_template != None
assert template_processor.processed_template["resources"][0]["properties"]["steps"][0]["args"][1] == "deployments" | [
9,
3914,
671,
394,
2019
] |
def METHOD_NAME(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmengine.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update() | [
567,
4528
] |
def METHOD_NAME(self):
try:
import Metal
except ImportError:
logging.error("Failure to import Metal, please install pyobjc-framework-Metal")
raise
import objc
dtype_out = vaex.dtype(self.return_dtype).numpy
if dtype_out.name == "float64":
dtype_out = np.dtype("float32")
warnings.warn("Casting output from float64 to float32 since Metal does not support float64")
ast_node = expresso.parse_expression(self.expression)
cppcode = node_to_cpp(ast_node)
typemap = {'float32': 'float',
'float64': 'float'} # we downcast!
for name in vaex.array_types._type_names_int:
typemap[name] = f'{name}_t'
typenames = [typemap[dtype.name] for dtype in self.argument_dtypes]
metal_args = [f'const device {typename} *{name}_array [[buffer({i})]]' for i, (typename, name) in
enumerate(zip(typenames, self.arguments))]
code_get_scalar = [f' {typename} {name} = {name}_array[id];\n' for typename, name, in zip(typenames, self.arguments)]
sourcecode = ''' | [
296
] |
def METHOD_NAME(self):
"""Test the init method of the documentation filter form"""
# get user
test_user = User.objects.get(username='testuser_filter_forms')
# get user config
user_config = UserConfigModel.objects.get(
user_config_username=test_user, filter_view='documentation'
)
# get notestatus
notestatus = Notestatus.objects.get(notestatus_name='test_filter_forms_status')
# get object beofre assignment
form_wo_notestatus = DocumentationFilterForm(
data={'user_config_id': user_config.user_config_id}, instance=user_config
)
# assign notestatus
user_config.filter_list_status = notestatus
user_config.save()
# get object
form_with_notestatus = DocumentationFilterForm(
data={'user_config_id': user_config.user_config_id}, instance=user_config
)
# compare
self.assertFalse('filter_list_status' in form_wo_notestatus.initial)
self.assertEqual(
form_with_notestatus.initial['filter_list_status'], notestatus.notestatus_id
) | [
9,
1200,
527,
1029,
176
] |
def METHOD_NAME(self):
path1 = os.path.join(settings.FILEBROWSER_ROOT, '1/a/path')
path2 = os.path.join(settings.FILEBROWSER_ROOT, 'not_int/a/path')
path3 = 'not_int/a/path'
path4 = '1/a/path'
self.assertEqual(displayed_path(path1), 'home/a/path')
self.assertEqual(displayed_path(path2), 'not_int/a/path')
self.assertEqual(displayed_path(path3), 'not_int/a/path')
self.assertEqual(displayed_path(path4), 'home/a/path') | [
9,
4898,
157
] |
def METHOD_NAME(lst: list):
"""Argmax function.
Parameters
----------
lst
"""
return max(range(len(lst)), key=lst.__getitem__) | [
2098
] |
f METHOD_NAME(self): | [
9,
599,
3272
] |
def METHOD_NAME(self):
# empty
g = GroupCoordinates([])
# same dims, unstacked
c1 = Coordinates([[0, 1], [0, 1]], dims=["lat", "lon"])
c2 = Coordinates([[10, 11], [10, 11]], dims=["lat", "lon"])
g = GroupCoordinates([c1, c2])
# same dims, stacked
c2 = Coordinates([[[0, 1], [0, 1]]], dims=["lat_lon"])
c2 = Coordinates([[[10, 11], [10, 11]]], dims=["lat_lon"])
g = GroupCoordinates([c1, c2])
# different order
c1 = Coordinates([[0, 1], [0, 1]], dims=["lat", "lon"])
c2 = Coordinates([[10, 11], [10, 11]], dims=["lon", "lat"])
g = GroupCoordinates([c1, c2])
# different stacking
c1 = Coordinates([[0, 1], [0, 1]], dims=["lat", "lon"])
c2 = Coordinates([[[10, 11], [10, 11]]], dims=["lat_lon"])
g = GroupCoordinates([c1, c2]) | [
9,
176
] |
def METHOD_NAME(
settings, client, test_user
):
# Given
settings.MAX_PASSWORD_RESET_EMAILS = 2
settings.PASSWORD_RESET_EMAIL_COOLDOWN = 60 * 60 * 24
url = reverse("api-v1:custom_auth:ffadminuser-reset-password")
data = {"email": test_user.email}
# First, let's hit the limit of emails we can send
for _ in range(5):
response = client.post(
url, data=json.dumps(data), content_type="application/json"
)
assert response.status_code == status.HTTP_204_NO_CONTENT
# Then - we should only have two emails
assert len(mail.outbox) == 2
mail.outbox.clear()
# Next, let's reset the password
reset_password_data = {
"new_password": "new_password",
"re_new_password": "new_password",
"uid": utils.encode_uid(test_user.pk),
"token": default_token_generator.make_token(test_user),
}
reset_password_confirm_url = reverse(
"api-v1:custom_auth:ffadminuser-reset-password-confirm"
)
response = client.post(
reset_password_confirm_url,
data=json.dumps(reset_password_data),
content_type="application/json",
)
assert response.status_code == status.HTTP_204_NO_CONTENT
# Finally, let's try to send another email
client.post(url, data=json.dumps(data), content_type="application/json")
# Then - we should receive another email
assert len(mail.outbox) == 1 | [
9,
353,
656,
2897,
5814,
1585,
1467
] |
def METHOD_NAME(self) -> Generator[CursorWrapper, None, None]: ... | [
5944,
550
] |
def METHOD_NAME(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush() | [
76,
1021
] |
def METHOD_NAME(file_name: str, graphs: Dict[str, pynini.FstLike]):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
print(f"Created {file_name}") | [
1443,
57
] |
def METHOD_NAME(self):
"""Verify that old password is correct"""
old_password = self.cleaned_data['old_password']
is_valid_password = self.account.check_password(old_password)
if not is_valid_password:
self.clear_passwords(self.cleaned_data)
raise forms.ValidationError('Password is incorrect')
return | [
1356,
2228,
2897
] |
def METHOD_NAME(self):
"""Test Initialization of AddresFilter.Range."""
assert AddressFilter.Range("*").get_range() == (0, 65535)
assert AddressFilter.Range("5").get_range() == (5, 5)
assert AddressFilter.Range("0").get_range() == (0, 0)
assert AddressFilter.Range("3-5").get_range() == (3, 5)
assert AddressFilter.Range("5-3").get_range() == (3, 5)
assert AddressFilter.Range("-5").get_range() == (0, 5)
assert AddressFilter.Range("5-").get_range() == (5, 65535)
assert AddressFilter.Range("70-100").get_range() == (70, 100) | [
9,
661,
7319
] |
def METHOD_NAME(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value | [
4922
] |
def METHOD_NAME():
#ns=Pyro.naming.NameServerLocator().getNS()
ns = hybrid_ns.getNS()
#tq = Pyro.core.getProxyForURI("PYRONAME://" + taskQueueName)
procName = compName + ' - PID:%d' % os.getpid()
import logging
logging.basicConfig(filename='taskWorkerZC_%d.log' % os.getpid(), level=logging.INFO)
logger = logging.getLogger(__file__)
serverFails = {}
#loop forever asking for tasks
while 1:
queueNames = ns.list('TaskQueues')
#print queueNames
tasks = []
#loop over all queues, looking for tasks to process
while len(tasks) == 0 and len(queueNames) > 0:
#try queue on current machine first
#print queueNames
if compName in queueNames:
qName = compName
queueNames.remove(qName)
else: #pick a queue at random
qName = queueNames.pop(random.randint(0, len(queueNames)-1))
try:
#print qName
tq = Pyro.core.getProxyForURI(ns.resolve(qName))
tq._setTimeout(10)
tq._setOneway(['returnCompletedTask'])
#print qName
#ask the queue for tasks
logging.debug('Getting tasks from server')
tasks = tq.getTasks(procName, PYME.version.version)
logging.debug('Got %d tasks' % len(tasks))
#we succesfully contacted the server, so reset it's fail count
serverFails[qName] = 0
except Pyro.core.ProtocolError as e:
logging.exception('Pyro error: %s' %e.message)
if e.message == 'connection failed':
#remember that the server failed - and put it 'on notice'
nFails = 1
if qName in serverFails.keys():
nFails += serverFails[qName]
serverFails[qName] = nFails
if False:#nFails >= 4:
#server is dead in the water - put it out of it's misery
print(('Killing:', qName))
try:
ns.unregister('TaskQueues.%s' % qName)
except Pyro.errors.NamingError:
pass
except Exception:
import traceback
logger.exception(traceback.format_exc())
#pass
if len(tasks) == 0: #no queues had tasks
logger.debug('No tasks avaialable, waiting')
time.sleep(1) #put ourselves to sleep to avoid constant polling
#else:
# print qName, len(tasks)
#results = []
#loop over tasks - we pop each task and then delete it after processing
#to keep memory usage down
while len(tasks) > 0:
#get the next task (a task is a function, or more generally, a class with
#a __call__ method
task = tasks.pop(0)
try:
#execute the task,
t1 = time.time()
logger.debug('running task')
res = task(taskQueue=tq)
t2 = time.time()
if not task.resultsURI is None:
# new style way of returning results to reduce load on server
from PYME.IO import clusterResults
clusterResults.fileResults(task.resultsURI, res)
logging.debug('Returning task for frame %d' % res.index)
tq.returnCompletedTask(res, procName, t2-t1)
except:
import traceback
logger.exception('Error returning results')
traceback.print_exc()
del task
#tq.returnCompletedTasks(results, name)
del tasks
#del results | [
57
] |
def METHOD_NAME(loader_factory):
loader = loader_factory(
dataset=get_dataset(),
prediction_interval_length=0.001,
context_interval_length=0.0001,
is_train=True,
override_args={"batch_size": 5},
)
batches = list(loader)
batch = batches[0]
assert (
batch["past_target"].shape[1] == batch["future_target"].shape[1] == 1
)
assert (
batch["past_target"].shape[0] == batch["future_target"].shape[0] == 5
) | [
9,
849,
467,
1707,
9623
] |
def METHOD_NAME(self):
with self.assertRaises(RuntimeError) as cm:
edit_metadata('pippo', in_place=True, exception_type=RuntimeError, metadata_path=TEST_NEW_META_YML)
self.assertEqual('Input path must point to ZARR dataset directory.', f'{cm.exception}')
with self.assertRaises(RuntimeError) as cm:
edit_metadata(TEST_CUBE_ZARR, exception_type=RuntimeError, metadata_path=TEST_NEW_META_YML)
self.assertEqual('Output path must be given.', f'{cm.exception}')
with self.assertRaises(RuntimeError) as cm:
edit_metadata(TEST_CUBE_ZARR, output_path=TEST_CUBE_ZARR, exception_type=RuntimeError,
metadata_path=TEST_NEW_META_YML)
self.assertEqual('Output path already exists.', f'{cm.exception}')
with self.assertRaises(RuntimeError) as cm:
edit_metadata(TEST_CUBE_ZARR, output_path='./' + TEST_CUBE_ZARR, exception_type=RuntimeError,
metadata_path=TEST_NEW_META_YML)
self.assertEqual('Output path already exists.', f'{cm.exception}') | [
9,
4538
] |
def METHOD_NAME(self, trashed_item: Any) -> str:
"""
Should return the name of this particular trashed item to display in the trash
modal.
:param trashed_item: The item to be named.
:return The name of the trashed_item
"""
pass | [
19,
156
] |
def METHOD_NAME(self):
problem = {
"num_vars": 2,
"bounds": [[0, 1], [0, 1]],
"names": ["var1", "var2"],
"groups": ["group1", "group1"],
}
actual = sample(problem, 10, seed=42)
expected = np.array(
[
[0.8601115, 0.8601115],
[0.27319939, 0.27319939],
[0.03745401, 0.03745401],
[0.60580836, 0.60580836],
[0.78661761, 0.78661761],
[0.97080726, 0.97080726],
[0.35986585, 0.35986585],
[0.19507143, 0.19507143],
[0.41560186, 0.41560186],
[0.51559945, 0.51559945],
]
)
np.testing.assert_allclose(actual, expected) | [
9,
8302,
734,
206,
846
] |
f METHOD_NAME(self): | [
9,
19,
1735,
61,
773,
3286,
668
] |
def METHOD_NAME(*args, **kwargs):
if is_admin():
return f(*args, **kwargs)
else:
if request.content_type == "application/json":
abort(403)
else:
return redirect(url_for("auth.login", next=request.full_path)) | [
13531,
246,
291
] |
def METHOD_NAME(self, reconstruction_settings: dict):
"""
Replaces the currently stored reconstruction model settings with the given dictionary
:param reconstruction_settings: a dictionary containing the reconstruction model settings
"""
self[Tags.RECONSTRUCTION_MODEL_SETTINGS] = Settings(reconstruction_settings) | [
0,
7942,
817
] |
def METHOD_NAME():
# Field mapping. And which fields should be turned into integers.
# See https://en.wikipedia.org/wiki/Template:CongLinks for what's possibly available.
fieldmap = {
"congbio": "bioguide",
#"fec": "fec", # handled specially...
"govtrack": "govtrack", # for sanity checking since we definitely have this already (I caught some Wikipedia errors)
"opensecrets": "opensecrets",
"votesmart": "votesmart",
"cspan": "cspan",
}
int_fields = ("govtrack", "votesmart", "cspan")
# default to not caching
cache = utils.flags().get('cache', False)
# Load legislator files and map bioguide IDs.
y1 = utils.load_data("legislators-current.yaml")
y2 = utils.load_data("legislators-historical.yaml")
bioguides = { }
for y in y1+y2:
bioguides[y["id"]["bioguide"]] = y
# Okay now the Wikipedia stuff...
def get_matching_pages():
# Does a Wikipedia API search for pages containing either of the
# two templates. Returns the pages.
page_titles = set()
for template in ("CongLinks", "CongBio"):
eicontinue = ""
while True:
# construct query URL, using the "eicontinue" of the last query to get the next batch
url = 'http://en.wikipedia.org/w/api.php?action=query&list=embeddedin&eititle=Template:%s&eilimit=500&format=xml' % template
if eicontinue: url += "&eicontinue=" + eicontinue
# load the XML
print("Getting %s pages (%d...)" % (template, len(page_titles)))
dom = lxml.etree.fromstring(utils.download(url, None, True)) # can't cache eicontinue probably
for pgname in dom.xpath("query/embeddedin/ei/@title"):
page_titles.add(pgname)
# get the next eicontinue value and loop
eicontinue = dom.xpath("string(query-continue/embeddedin/@eicontinue)")
if not eicontinue: break
return page_titles
# Get the list of Wikipedia pages that use any of the templates we care about.
page_list_cache_file = os.path.join(utils.cache_dir(), "legislators/wikipedia/page_titles")
if cache and os.path.exists(page_list_cache_file):
# Load from cache.
matching_pages = open(page_list_cache_file).read().split("\n")
else:
# Query Wikipedia API and save to cache.
matching_pages = get_matching_pages()
utils.write(("\n".join(matching_pages)), page_list_cache_file)
# Filter out things that aren't actually pages (User:, Talk:, etcetera, anything with a colon).
matching_pages = [p for p in matching_pages if ":" not in p]
# Load each page's content and parse the template.
for p in sorted(matching_pages):
if " campaign" in p: continue
if " (surname)" in p: continue
if "career of " in p: continue
if "for Congress" in p: continue
if p.startswith("List of "): continue
if p in ("New York in the American Civil War", "Upper Marlboro, Maryland"): continue
# Query the Wikipedia API to get the raw page content in XML,
# and then use XPath to get the raw page text.
url = "http://en.wikipedia.org/w/api.php?action=query&titles=" + urllib.parse.quote(p.encode("utf8")) + "&export&exportnowrap"
cache_path = "legislators/wikipedia/pages/" + p
dom = lxml.etree.fromstring(utils.download(url, cache_path, not cache))
page_content = dom.xpath("string(mw:page/mw:revision/mw:text)", namespaces={ "mw": "http://www.mediawiki.org/xml/export-0.8/" })
# Build a dict for the IDs that we want to insert into our files.
new_ids = {
"wikipedia": p # Wikipedia page name, with spaces for spaces (not underscores)
}
if "CongLinks" in page_content:
# Parse the key/val pairs in the template.
m = re.search(r"\{\{\s*CongLinks\s+([^}]*\S)\s*\}\}", page_content)
if not m: continue # no template?
for arg in m.group(1).split("|"):
if "=" not in arg: continue
key, val = arg.split("=", 1)
key = key.strip()
val = val.strip()
if val and key in fieldmap:
try:
if fieldmap[key] in int_fields: val = int(val)
except ValueError:
print("invalid value", key, val)
continue
if key == "opensecrets": val = val.replace("&newMem=Y", "").replace("&newmem=Y", "").replace("&cycle=2004", "").upper()
new_ids[fieldmap[key]] = val
if "bioguide" not in new_ids: continue
new_ids["bioguide"] = new_ids["bioguide"].upper() # hmm
bioguide = new_ids["bioguide"]
else:
m = re.search(r"\{\{\s*CongBio\s*\|\s*(\w+)\s*\}\}", page_content)
if not m: continue # no template?
bioguide = m.group(1).upper()
if not bioguide in bioguides:
print("Member not found: " + bioguide, p, "(Might have been a delegate to the Constitutional Convention.)")
continue
# handle FEC ids specially because they are stored in an array...
fec_id = new_ids.get("fec")
if fec_id: del new_ids["fec"]
member = bioguides[bioguide]
member["id"].update(new_ids)
# ...finish the FEC id.
if fec_id:
if fec_id not in bioguides[bioguide]["id"].get("fec", []):
bioguides[bioguide]["id"].setdefault("fec", []).append(fec_id)
#print p.encode("utf8"), new_ids
utils.save_data(y1, "legislators-current.yaml")
utils.save_data(y2, "legislators-historical.yaml") | [
22
] |
def METHOD_NAME(param=None):
werkzeug_logger_to_error()
configure_root_logger()
dci_app = DciControlServer()
dci_app.url_map.converters["uuid"] = utils.UUIDConverter
logger.info("dci control server startup")
def handle_api_exception(api_exception):
response = flask.jsonify(api_exception.to_dict())
response.status_code = api_exception.status_code
logger.exception(api_exception)
return response
def handle_dbapi_exception(dbapi_exception):
db_exception = exceptions.DCIException(str(dbapi_exception)).to_dict()
response = flask.jsonify(db_exception)
response.status_code = 400
logger.exception(dbapi_exception)
return response
@dci_app.before_request
def before_request():
flask.g.team_admin_id = dci_app.team_admin_id
flask.g.team_redhat_id = dci_app.team_redhat_id
flask.g.team_epm_id = dci_app.team_epm_id
for i in range(5):
try:
flask.g.engine = dci_app.engine
flask.g.db_conn = dci_app.engine.connect()
flask.g.session = sessionmaker(bind=dci_app.engine)()
break
except Exception:
logging.warning(
"failed to connect to the database, " "will retry in 1 second..."
)
time.sleep(1)
pass
flask.g.store = dci_app.store
flask.g.sender = dci_app.sender
@dci_app.teardown_request
def teardown_request(_):
try:
flask.g.session.close()
except Exception:
logging.warning(
"There's been an arror while calling session.close() in teardown_request."
)
try:
flask.g.db_conn.close()
except Exception:
logging.warning(
"There's been an error while calling db_conn.close() in teardown_request."
)
# Registering REST error handler
dci_app.register_error_handler(exceptions.DCIException, handle_api_exception)
dci_app.register_error_handler(sa_exc.DBAPIError, handle_dbapi_exception)
# Registering REST API v1
dci_app.register_blueprint(api_v1.api, url_prefix="/api/v1")
# Registering custom encoder
dci_app.json_encoder = utils.JSONEncoder
return dci_app | [
129,
991
] |
def METHOD_NAME(x):
"""Scale appropriately to between +/- 1."""
return 0.95 * x / np.max(np.abs(x)) | [
930,
4463
] |
def METHOD_NAME(sentence):
filtered_sentence = re.sub("[^A-Za-z0-9]+", " ", sentence).split()
filtered_sentence = [token for token in filtered_sentence if len(token) > 2]
return set(filtered_sentence) | [
4022
] |
def METHOD_NAME(monkeypatch, caplog, value, is_valid, expected):
versions = version.WebEngineVersions.from_pyqt('5.15.3')
monkeypatch.setenv('QUTE_DARKMODE_VARIANT', value)
with caplog.at_level(logging.WARNING):
assert darkmode._variant(versions) == expected
log_msg = 'Ignoring invalid QUTE_DARKMODE_VARIANT=invalid_value'
assert (log_msg in caplog.messages) != is_valid | [
9,
933,
345
] |
f METHOD_NAME(self): | [
9,
1471,
69,
374,
584
] |
def METHOD_NAME(s):
s = s.group(1)
if s[0] == '#':
# numeric charref
if s[1] in 'xX':
num = int(s[2:].rstrip(';'), 16)
else:
num = int(s[1:].rstrip(';'))
if num in _invalid_charrefs:
return _invalid_charrefs[num]
if 0xD800 <= num <= 0xDFFF or num > 0x10FFFF:
return '\uFFFD'
if num in _invalid_codepoints:
return ''
return chr(num)
else:
# named charref
if s in _html5:
return _html5[s]
# find the longest matching name (as defined by the standard)
for x in range(len(s)-1, 1, -1):
if s[:x] in _html5:
return _html5[s[:x]] + s[x:]
else:
return '&' + s | [
369,
13872
] |
def METHOD_NAME(cls):
super().METHOD_NAME()
cls.collection = api.create_collection(title="Collection")
cls.bundle = api.create_bundle(cls.collection.uuid, title="Test Bundle", slug="test")
cls.draft = api.get_or_create_bundle_draft(cls.bundle.uuid, draft_name="test-draft") | [
0,
1,
2
] |
def METHOD_NAME(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/openapi_post_endpoint_test_recursive_arg.yaml', transport) | [
9,
841,
41,
2203,
718
] |
def METHOD_NAME(
cache_set_many_mock, default_projectkey, project_config_get_mock, globalconfig_get_mock_config
):
build_project_config(
public_key=default_projectkey.public_key,
update_reason="test",
)
assert cache_set_many_mock.call_count == 1
# Using a tuple because that's the format `.args` uses
assert cache_set_many_mock.call_args.args == (
{default_projectkey.public_key: {"is_mock_config": True}},
) | [
9,
758,
5033,
200,
409,
596
] |
def METHOD_NAME():
usage = '''
Encodes or decodes JSON Web Tokens based on input.
%(prog)s [options] <command> [options] input
Decoding examples:
%(prog)s --key=secret decode json.web.token
%(prog)s decode --no-verify json.web.token
Encoding requires the key option and takes space separated key/value pairs
separated by equals (=) as input. Examples:
%(prog)s --key=secret encode iss=me exp=1302049071
%(prog)s --key=secret encode foo=bar exp=+10
The exp key is special and can take an offset to current Unix time.
'''
arg_parser = argparse.ArgumentParser(
prog='pyjwt',
usage=usage
)
arg_parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s ' + __version__
)
arg_parser.add_argument(
'--key',
dest='key',
metavar='KEY',
default=None,
help='set the secret key to sign with'
)
arg_parser.add_argument(
'--alg',
dest='algorithm',
metavar='ALG',
default='HS256',
help='set crypto algorithm to sign with. default=HS256'
)
subparsers = arg_parser.add_subparsers(
title='PyJWT subcommands',
description='valid subcommands',
help='additional help'
)
# Encode subcommand
encode_parser = subparsers.add_parser('encode', help='use to encode a supplied payload')
payload_help = """Payload to encode. Must be a space separated list of key/value
pairs separated by equals (=) sign."""
encode_parser.add_argument('payload', nargs='+', help=payload_help)
encode_parser.set_defaults(func=encode_payload)
# Decode subcommand
decode_parser = subparsers.add_parser('decode', help='use to decode a supplied JSON web token')
decode_parser.add_argument(
'token',
help='JSON web token to decode.',
nargs='?')
decode_parser.add_argument(
'-n', '--no-verify',
action='store_false',
dest='verify',
default=True,
help='ignore signature and claims verification on decode'
)
decode_parser.set_defaults(func=decode_payload)
return arg_parser | [
56,
2345
] |
def METHOD_NAME(basename, pattern=None):
"""
Return a list of tuples containing 'active' filenames and modified times.
Inputs:
basename[str]: The base filename (e.g., file_out.e)
pattern[str]: (Optional) Additional files to consider via glob pattern (e.g., file_out.e-s*)
"""
def file_number(fname):
idx = fname.find('.e-s')
return int(fname[idx+4:]) if idx > 0 else 0
# List of all matching filenames
filenames = [basename]
if pattern:
filenames += glob.glob(pattern)
filenames.sort(key=file_number)
# Minimum filename modified time
modified = os.path.getmtime(filenames[0]) if os.path.exists(filenames[0]) else 0
# Populate a list of tuples: (filename, modified time)
output = []
for filename in filenames:
current_modified = os.path.getmtime(filename) if os.path.exists(filename) else 0
if current_modified >= modified:
output.append((filename, current_modified))
return output | [
19,
923,
3932
] |
def METHOD_NAME():
env: cy.CylonEnv = cy.CylonEnv(config=cy.MPIConfig(), distributed=True)
print("CylonEnv Initialized: My rank: ", env.rank)
inputFile1 = "data/input/cities_a_" + str(env.rank) + ".csv"
inputFile2 = "data/input/cities_b_" + str(env.rank) + ".csv"
unionFile = "data/output/union_cities_" + str(env.rank) + ".csv"
df1 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile1))
df2 = gcy.DataFrame.from_cudf(cudf.read_csv(inputFile2))
print("df1: \n", df1)
print("df2: \n", df2)
unionDf = df1.set_union(other=df2, env=env)
unionDf.to_cudf().to_csv(unionFile, index=False)
print(env.rank, " written unionFile to the file: ", unionFile)
env.finalize() | [
370,
3006,
1537
] |
def METHOD_NAME(sort_options):
"""Get defualt sort option for Invenio-Search-JS."""
return sorted_options(sort_options)[0]['value'] | [
235,
-1
] |
def METHOD_NAME(cls, dmd, name):
"""Return global catalog by name."""
catalog = getattr(
dmd.Devices,
"{}_{}Search".format(cls.zenpack_name.replace('.', '_'), name),
None)
if catalog:
return catalog
else:
return cls.create_global_catalog(dmd, name) | [
19,
285,
2824
] |
def METHOD_NAME(self):
return self.native.get_sensitive() | [
19,
1111
] |
def METHOD_NAME(self):
super(CachedFixtureTestCase, self).METHOD_NAME() | [
72,
1843
] |
def METHOD_NAME(self, method):
self.v1 = TinyVector(list(range(1, 11)))
self.v2 = TinyVector(list(range(11, 21)))
self.a1 = numpy.array(self.v1)
self.a2 = numpy.array(self.v2)
self.l1 = list(self.v1)
self.l2 = list(self.v2)
self.scalar = 3 | [
102,
103
] |
def METHOD_NAME(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] | [
1998
] |
def METHOD_NAME(arg):
logger.info('fail')
logger.info(arg) | [
69,
353,
168
] |
def METHOD_NAME() -> LevelUpMove:
return LevelUpMove(
u16(random.randint(1, 65_535)), u16(random.randint(1, 65_535))
) | [
5456,
33,
1,
132
] |
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem) | [
297,
365
] |
def METHOD_NAME(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'backend': 'not_backend',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}]
#'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config) | [
9,
3469,
141,
623,
129,
3470,
3471
] |
def METHOD_NAME(params):
# (rad/s)/V, inverse of Kt
return {
'Kv': 1.0 / params['Kt']
} | [
86,
4135
] |
def METHOD_NAME(self):
self.user = self.create_user(email="[email protected]", is_superuser=True)
self.superuser = self.create_user(email="[email protected]", is_superuser=True)
self.draft_doc = self.create_doc_integration(
name="spiderman", is_draft=True, has_avatar=True
)
self.published_doc = self.create_doc_integration(
name="spiderwoman", is_draft=False, has_avatar=True
)
self.avatar_payload = {
"avatar_photo": b64encode(self.load_fixture("rookout-color.png")),
"avatar_type": "upload",
} | [
0,
1
] |
def METHOD_NAME():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
def extern_generator(ins, outs):
"""Manually write the IR for the extern function, add pipeline."""
return tvm.tir.call_packed("my_extern_array_func1", ins[0], outs[0])
C = te.extern(A.shape, [A], extern_generator, name="C")
s = te.create_schedule(C.op)
@tvm.register_func
def my_extern_array_func1(aa, bb):
aa.copyto(bb)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
# build and invoke the kernel.
f = tvm.build(s, [A, C], target)
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy())
check_target("stackvm")
check_target("llvm") | [
9,
1699,
2376,
53
] |
def METHOD_NAME(self, text):
"""Set the selected item to the first one having the given text."""
ind = self.findText(text)
if ind == -1:
raise ValueError(text)
#self.value = value
self.setCurrentIndex(ind) | [
0,
526
] |
def METHOD_NAME(self, exeception_to_raise, mock_invoke_context):
context_mock = Mock()
mock_invoke_context.return_value.__enter__.return_value = context_mock
context_mock.run.side_effect = exeception_to_raise
with self.assertRaises(UserException):
do_cli(
stack_name=None,
resource_id="mock-resource-id",
event="event",
event_file=None,
parameter={},
output=RemoteInvokeOutputFormat.TEXT,
region=self.region,
profile=self.profile,
config_file=self.config_file,
config_env=self.config_env,
) | [
9,
241,
21,
442,
4311,
130,
12153
] |
def METHOD_NAME(self, context, layout):
layout.prop(self, 'mode', expand=True)
layout.prop(self, 'rescale', toggle=True)
layout.prop(self, 'join', toggle=True) | [
1100,
1409
] |
def METHOD_NAME(name, path_eval=False):
"""Returns value of specified configuration setting."""
default_value = DEFAULTS[name]
result = os.getenv(name, default_value)
# split on ':' and find the first suitable path
if (
path_eval
and isinstance(result, str)
and isinstance(default_value, str)
and not isinstance(default_value, bool)
):
locations = result.split(":")
result = None
for loc in locations:
if os.path.isfile(loc):
result = loc
break
if result is None:
result = locations[0]
# Check for setting that expects a boolean result.
if isinstance(default_value, bool):
return _cast_bool(result)
# Special default for FAUCET_EVENT_SOCK.
if name == "FAUCET_EVENT_SOCK":
if result == "0":
return ""
if _cast_bool(result):
return _PREFIX + "/var/run/faucet/faucet.sock"
if name == "FAUCET_EVENT_SOCK_HEARTBEAT":
if result == "0":
return 0
return result | [
19,
1333
] |
def METHOD_NAME(*plain_text_and_format_entities: tuple[str, list[TypeMessageEntity]]) \
-> tuple[str, list[TypeMessageEntity]]:
plain_text = ''
format_entities = []
surrogate_len_sum = 0
for text, entities in plain_text_and_format_entities:
plain_text += text
new_entities = []
for entity in entities:
new_entity = copy_entity(entity)
new_entity.offset += surrogate_len_sum
new_entities.append(new_entity)
surrogate_len_sum += surrogate_len(text)
format_entities.extend(new_entities)
format_entities = merge_contiguous_entities(format_entities)
return plain_text, format_entities | [
526,
61,
275,
5399,
2008
] |
def METHOD_NAME(self, args: NominatimArgs) -> int:
return asyncio.METHOD_NAME(export(args)) | [
22
] |
def METHOD_NAME(step0_data):
"""
"""
src, dest, step, cli_params = step0_data
src = src[0]
dest = dest[0]
warp_forward_out, warp_inverse_out = register_step_label(src=src, dest=dest, step=step, verbose=cli_params.verbose) | [
9,
372,
367,
636
] |
def METHOD_NAME(self):
"""
Similar to `Camera.update()`, but for OPSI.
"""
# self.device.screenshot()
self._view_init()
try:
self.view.load(self.device.image)
except (MapDetectionError, AttributeError) as e:
logger.warning(e)
logger.warning('Assuming camera is focused on grid center')
def empty(*args, **kwargs):
pass
backup, self.view.backend.load = self.view.backend.load, empty
self.view.backend.homo_loca = (53, 60)
self.view.backend.left_edge = False
self.view.backend.right_edge = False
self.view.backend.lower_edge = False
self.view.backend.upper_edge = False
self.view.load(self.device.image)
self.view.backend.load = backup | [
86,
350
] |
def METHOD_NAME(self, request, project_id, cluster_id):
"""设置节点调度状态
通过传递状态, 设置节点的调度状态
"""
params = self.params_validate(slz.NodeStatusSLZ)
# NOTE: 如果状态为REMOVABLE,则期望的是停止调度状态, 以便于进一步操作
unschedulable = True if params["status"] == node_status.REMOVABLE else False
client = Node(request.ctx_cluster)
client.set_nodes_schedule_status(unschedulable, params["node_name_list"])
request.audit_ctx.update_fields(
resource=get_nodes_repr(params["node_name_list"]),
extra=params,
description=_("节点停止调度") if unschedulable else _("节点允许调度"),
)
return Response() | [
0,
507,
452
] |
def METHOD_NAME(vertex, center, axis, angle):
vertex,center,axis,angle = match_long_repeat([vertex, center, axis, angle])
rotated = []
for ve,ce,ax,an in zip(vertex, center, axis, angle):
mat = Matrix.Rotation(radians(an), 4, ax)
c = Vector(ce)
rotated.append((c + mat @ ( Vector(ve) - c))[:])
return rotated | [
2227,
2271
] |
def METHOD_NAME(self, client, access_token, user, cleanup_create_test):
url = "/workers/some_worker_create_test/check-in"
payload = {
"username": user["username"],
"cpu": 4,
"memory": 2048,
"disk": 4096,
"offliners": ["mwoffliner", "phet"],
}
response = client.put(
url, json=payload, headers={"Authorization": access_token}
)
assert response.status_code == 204 | [
9,
4147,
80,
1794
] |
def METHOD_NAME(input_array, expected_output, expand_distance):
expanded = expand_labels(input_array, expand_distance)
assert_array_equal(expanded, expected_output) | [
9,
2450,
415
] |
def METHOD_NAME(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths | [
24,
768
] |
def METHOD_NAME(example_serialized):
"""Parses an Example proto containing a training example of an image.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/filename': tf.io.FixedLenFeature([], dtype=tf.string,
default_value="")
}
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
filename = tf.cast(features['image/filename'], dtype=tf.string)
return features['image/encoded'], label, filename | [
214,
1441,
2640
] |
def METHOD_NAME(self):
actual = ping._get_ping_command_args_list(
ping.PingCommandParams(
host_or_ip='google.com',
num_packets=4,
timeout_secs=5,
),
)
self.assertEqual(
['ping', 'google.com', '-c', '4', '-w', '5'],
actual,
)
actual = ping._get_ping_command_args_list(
ping.PingCommandParams(
host_or_ip='google.com',
num_packets=None,
timeout_secs=None,
),
)
self.assertEqual(
['ping', 'google.com', '-c', '4', '-w', '20'],
actual,
) | [
9,
559
] |
def METHOD_NAME(self, shared_state):
aiplatform.init(
project=e2e_base._PROJECT,
location=e2e_base._LOCATION,
)
vertex_dataset_name = f"projects/{e2e_base._PROJECT}/locations/{e2e_base._LOCATION}/datasets/dataset"
artifact = google_artifact_schema.VertexDataset(
vertex_dataset_name=vertex_dataset_name,
display_name=self.artifact_display_name,
metadata=self.artifact_metadata,
description=self.artifact_description,
).create()
shared_state["resources"].append(artifact)
expected_metadata = self.artifact_metadata.copy()
expected_metadata["resourceName"] = vertex_dataset_name
assert artifact.display_name == self.artifact_display_name
assert json.dumps(artifact.metadata, sort_keys=True) == json.dumps(
expected_metadata, sort_keys=True
)
assert artifact.schema_title == "google.VertexDataset"
assert artifact.description == self.artifact_description
assert "/metadataStores/default/artifacts/" in artifact.resource_name
assert (
artifact.uri
== f"https://{e2e_base._LOCATION}-aiplatform.googleapis.com/v1/{vertex_dataset_name}"
) | [
9,
3399,
126,
1831,
129
] |
f METHOD_NAME(self): | [
947,
2962,
497,
912
] |
def METHOD_NAME(self):
result1 = _locale._getdefaultlocale()
result2 = _locale._getdefaultlocale()
self.assertEqual(result1,result2) | [
9,
-1
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Password hash used for publishing.
"""
return pulumi.get(self, "publishing_password_hash") | [
14085,
2897,
1161
] |
def METHOD_NAME(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
return s.get(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}",
auth=(self.username, self.password),
)
log.info("There was a problem getting settings for vm %s", id) | [
19,
944,
817
] |
def METHOD_NAME(entity_type, uuid):
if entity_type not in entity_types:
abort(404)
body = request.get_json()
workspace_name = body.get('workspace_name')
client = get_client()
entity = client.get_entity(uuid)
vitessce_conf = client.get_vitessce_conf_cells_and_lifted_uuid(entity).vitessce_conf
if (vitessce_conf is None
or vitessce_conf.conf is None
or vitessce_conf.cells is None):
abort(404)
hubmap_id = entity['hubmap_id']
dataset_url = request.base_url.replace('.ipynb', '')
cells = [
new_markdown_cell(
f"Visualization for [{hubmap_id}]({dataset_url}); "
"If this notebook is running in a HuBMAP workspace, the dataset is symlinked:"),
new_code_cell(f'!ls datasets/{uuid}'),
new_markdown_cell('Visualization requires extra code to be installed:'),
new_code_cell(
'!pip uninstall community flask albumentations -y '
'# Preinstalled on Colab; Causes version conflicts.\n'
f'!pip install vitessce[all]=={vitessce.__version__}'),
*vitessce_conf.cells
]
return _nb_response_from_objs(hubmap_id, cells, workspace_name=workspace_name, uuids=[uuid]) | [
2419,
3549
] |
def METHOD_NAME(self):
# This method may be called multiple times on the same batch
# i.e., on retries
# we need to make sure we only close it out once
# otherwise compressed messages may be double-compressed
# see Issue 718
if not self._closed:
self._bytes_written = self._builder.size()
self._buffer = bytes(self._builder.build())
self._builder = None
self._closed = True | [
1462
] |
def METHOD_NAME(self, path):
for locale in PUNCTUATION_CHECK:
if path.endswith(locale + '.ts'):
return True
return False | [
250,
6751
] |
def METHOD_NAME(self, charge_point_vendor: str, charge_point_model: str, **kwargs):
self._profile = {
'Vendor': charge_point_vendor,
'Model': charge_point_model
}
self.name = self._uplink_converter.get_device_name(self._profile)
self.type = self._uplink_converter.get_device_type(self._profile)
self._callback((self._uplink_converter,
{'deviceName': self.name, 'deviceType': self.type, 'messageType': Action.MeterValues,
'profile': self._profile},
{'Vendor': charge_point_vendor, 'Model': charge_point_model, **kwargs}))
return call_result.BootNotificationPayload(
current_time=datetime.utcnow().isoformat(),
interval=10,
status=RegistrationStatus.accepted
) | [
69,
1642,
857
] |
def METHOD_NAME() -> Callable[[types.FunctionType], values.OnnxClosure]:
"""A parametric decorator used to annotate nested-functions that are used
as graph-attributes.
Returns:
A decorator that returns its input function, but attaches a graph_proto
attribute representing the input function. The translation is not
done at this time, but previously when the outer-level function
was translated to an OnnxFunction. The decorator just looks up
and retrieves the GraphProto representation previously generated.
Example:
::
@script()
def cumulative_sum(X: INT64['N']):
# Translation of cumulative_sum by @script will also translate Sum
# into a GraphProto, which will be stored in the OnnxFunction generated
# for cumulative_sum. At run-time (in eager-mode), the @graph decorator
# retrieves the pre-computed GraphProto and attaches it to the Sum function.
@graph()
def Sum(sum_in, next):
sum_out = sum_in + next
scan_out = op.Identity(sum_out)
return sum_out, scan_out
zero = op.Constant(value_int=0)
# The call to higher-order operator Scan below uses the above function
# Sum as a graph-attribute.
all_sum, result = op.Scan (zero, X, body=Sum, num_scan_inputs=1)
return result
"""
# This is a bit fragile. We want to get the ONNXFunction object representing
# the outer-scope ONNXScript function from the execution stack. The caller of
# @graph is the original script function (cumulative_sum in the above example),
# and the caller of that function is the wrapper function/method in the
# corresponding OnnxFunction object.
# Currently, there is no support for eager-mode execution of nested functions,
# so we don't need to handle doubly nested functions (e.g., a function defined
# inside Sum in the above example).
function_frame = sys._getframe(1) # pylint: disable=protected-access
wrapper_frame = sys._getframe(3) # pylint: disable=protected-access
onnx_function = wrapper_frame.f_locals["self"]
nested_functions = onnx_function.function_ir.nested_functions
def transform(f: types.FunctionType) -> values.OnnxClosure:
return values.OnnxClosure(nested_functions[f.__name__], function_frame, f)
return transform | [
303
] |
def METHOD_NAME(shots_and_their_points) -> None:
exact_found = 0
for pose, bearings, points in shots_and_their_points:
result = pygeometry.absolute_pose_three_points(bearings, points)
expected = pose.get_world_to_cam()[:3]
for Rt in result:
exact_found += np.linalg.norm(expected - Rt, ord="fro") < 1e-6
exacts = len(shots_and_their_points) - 2
assert exact_found >= exacts | [
9,
4653,
3954,
2756,
182
] |
def METHOD_NAME(self, name):
return posixpath.join(self.COPRO_TAR_DIR, name) | [
19,
171,
156
] |
def METHOD_NAME(dataset, mode="train", batch_size=1, batchify_fn=None, trans_fn=None):
if trans_fn:
dataset = dataset.map(trans_fn)
shuffle = True if mode == "train" else False
if mode == "train":
batch_sampler = paddle.io.DistributedBatchSampler(dataset, batch_size=batch_size, shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(dataset, batch_size=batch_size, shuffle=shuffle)
return paddle.io.DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=batchify_fn, return_list=True) | [
129,
568
] |
def METHOD_NAME(
formatting,
cats,
quantity,
total_quantity,
allocation,
total_allocation,
kappa,
kappasim=None,
):
if formatting == "plain":
if total_quantity is None:
print(_("No data found in current region"))
return
for i, c in enumerate(cats):
print(
_("Quantity disagreement for class {c}: {q:.2f} %").format(
c=c, q=quantity[i] * 100
)
)
print(
_("Total quantity disagreement: {q:.2f} %").format(q=total_quantity * 100)
)
for i, c in enumerate(cats):
print(
_("Allocation disagreement for class {c}: {q:.2f} %").format(
c=c, q=allocation[i] * 100
)
)
print(
_("Total allocation disagreement: {q:.2f} %").format(
q=total_allocation * 100
)
)
if kappa is not None:
print(_("Kappa: {kappa:.4f}").format(kappa=kappa))
if kappasim is not None:
print(_("Kappa simulation: {kappasim:.4f}").format(kappasim=kappasim))
elif formatting == "shell":
def format_value(val):
return f"{val:.4f}" if val is not None else ""
for i, c in enumerate(cats):
print(f"quantity_class_{c}={quantity[i]:.4f}")
print(f"total_quantity={format_value(total_quantity)}")
for i, c in enumerate(cats):
print(f"allocation_class_{c}={allocation[i]:.4f}")
print(f"total_allocation={format_value(total_allocation)}")
print(f"kappa={format_value(kappa)}")
if kappasim is not None:
print(f"kappasimulation={kappasim:.4f}")
elif formatting == "json":
# export everything even when None
# for automated processing
out = {}
for i, c in enumerate(cats):
out[f"quantity_class_{c}"] = quantity[i]
for i, c in enumerate(cats):
out[f"allocation_class_{c}"] = allocation[i]
out["total_quantity"] = total_quantity
out["total_allocation"] = total_allocation
out["kappa"] = kappa
out["kappasimulation"] = kappasim
print(
json.dumps(
json.loads(json.dumps(out), parse_float=lambda x: round(float(x), 4))
)
) | [
38,
51
] |
async def METHOD_NAME(self):
if await self.is_available(lambda x: x):
async with aiofiles.open(self.variants_file_path, 'r') as f:
self.initial_values = json.loads(await f.read())
for item in self.initial_values:
for i, v in enumerate(item):
item[i] = tuple(v)
else:
self.initial_values = [] | [
15,
114
] |
def METHOD_NAME(self, word: str) -> str:
if word.endswith("e"):
return word[:-1]
if word.endswith("y") and word[-2] not in self.VOWELS:
return word[:-1]
return word | [
2805
] |
async def METHOD_NAME(spawn_client, fake2, snapshot, static_time, pg):
client = await spawn_client(
authenticated=True, administrator=True, flags=[FlagName.SPACES]
)
user_1 = await fake2.users.create()
async with AsyncSession(pg) as session:
session.add(
SQLSpace(
id=0,
name="Space 0",
description="",
created_at=static_time.datetime,
updated_at=static_time.datetime,
)
)
await session.commit()
await get_authorization_client_from_app(client.app).add(
SpaceMembership(user_1.id, 0, SpaceRole.OWNER),
UserRoleAssignment(user_1.id, 0, SpaceProjectRole.EDITOR),
)
resp = await client.patch(
f"/spaces/0/members/{user_1.id}",
{"role": "member", "label": SpaceLabelRole.MANAGER},
)
assert resp.status == 200
assert await resp.json() == snapshot | [
9,
86,
1823,
2173
] |
def METHOD_NAME(diff_lines):
return diff_lines.b_blob.data_stream.read().decode("utf-8").split("\n") | [
385,
2443,
513,
801
] |
def METHOD_NAME(self):
instance = create_instance(Identifier())
instance.attr = "æøå"
assert instance.attr == "æøå" | [
9,
774,
41,
2503,
2474
] |
def METHOD_NAME() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
return {"case_sensitive": True} | [
19,
235,
200
] |
def METHOD_NAME(deprecated_v1):
deprecated_v1.constant("2.0", "3.0", "SOME_CONSTANT", 42)
module = sys.modules[__name__]
# alerting user to pending deprecation
with pytest.deprecated_call(match="pending deprecation"):
module.SOME_CONSTANT | [
9,
928,
1359
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.