text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, name, predicate):
def lazy_format(operation):
return lambda: format_operation(operation) or self.diagram.gettext(
"unnamed"
)
return Box(
Text(
text=name,
style={
"padding": (0, 0, 4, 0),
"font-size": "x-small",
"font-style": FontStyle.ITALIC,
},
),
*(
Text(text=lazy_format(operation), style={"text-align": TextAlign.LEFT})
for operation in self.subject.ownedOperation
),
style={
"padding": (4, 4, 4, 4),
"min-height": 8,
"justify-content": JustifyContent.START,
},
draw=draw_top_separator,
) | [
710,
3162
] |
def METHOD_NAME(self) -> str:
"""
The EventBridge bus.
"""
return pulumi.get(self, "eventbridge_bus") | [
4351,
2583
] |
def METHOD_NAME(self):
for arg in self.requests["region"]:
with argopy.set_options(api_timeout=ERDDAP_TIMEOUT):
fetcher = ArgoIndexFetcher(src=self.src).region(arg).fetcher
df = fetcher.to_dataframe()
assert isinstance(df, pd.core.frame.DataFrame) | [
9,
11180,
1216
] |
def METHOD_NAME() -> None:
round_trip_for(AwsCloudFormationStack) | [
9,
4054,
12715,
1980
] |
def METHOD_NAME(self, style: style_t, selector: int): ... | [
238,
641
] |
f METHOD_NAME(self): | [
9,
1448
] |
def METHOD_NAME(self):
"""Test that r.random.walk runs with overlaps are expected output"""
# assertModule is used to call module which we test
# we expect module to finish successfully
self.assertModule(
"r.random.walk", output=self.random_walk, steps=1000, seed=0, flags="s"
) | [
9,
236,
4716,
41,
2820,
1842,
1190
] |
def METHOD_NAME(self):
process_quit_count = 0
process_num = len(self._procs)
while process_quit_count < process_num:
result = self._output.get()
if result is _SENTINEL:
process_quit_count += 1 | [
537,
146,
651
] |
def METHOD_NAME(context: Context,
data_dict: DataDict) -> AuthResult:
'''Checks if a user is allowed to remove collaborators from a dataset
See :py:func:`~ckan.authz.can_manage_collaborators` for details
'''
user = context['user']
model = context['model']
pkg = model.Package.get(data_dict['id'])
user_obj = model.User.get(user)
assert pkg and user_obj
if not authz.can_manage_collaborators(pkg.id, user_obj.id):
return {
'success': False,
'msg': _('User %s not authorized to remove'
' collaborators from this dataset') % user}
return {'success': True} | [
360,
5324,
34
] |
def METHOD_NAME(project, text):
if not settings.MDRENDER_CACHE_ENABLE:
return func(project, text)
# Avoid cache of too short texts
if len(text) <= settings.MDRENDER_CACHE_MIN_SIZE:
return func(project, text)
sha1_hash = hashlib.sha1(force_bytes(text)).hexdigest()
key = "mdrender/{}-{}".format(sha1_hash, project.id)
# Try to get it from the cache
cached = cache.get(key)
if cached is not None:
return cached
returned_value = func(project, text)
cache.set(key, returned_value, timeout=settings.MDRENDER_CACHE_TIMEOUT)
return returned_value | [
972
] |
def METHOD_NAME():
"""
Test linkage management command for records that have both a fain and uri
"""
models_to_mock = [
{"model": AwardSearch, "award_id": 999, "fain": "RANDOM_FAIN_999", "uri": "RANDOM_URI_999"},
{"model": AwardSearch, "award_id": 1999, "fain": "RANDOM_FAIN_1999", "uri": "RANDOM_URI_1999"},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": 777,
"fain": "RANDOM_FAIN_999",
"uri": "RANDOM_URI_DNE",
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": 1777,
"fain": "RANDOM_FAIN_DNE",
"uri": "RANDOM_URI_1999",
},
]
for entry in models_to_mock:
baker.make(entry.pop("model"), **entry)
# use the award_search table because the award_search_temp table is not present in testing
call_command("update_file_c_linkages", "--recalculate-linkages", "--file-d-table=award_search")
expected_results = 999
file_c_award_fain = FinancialAccountsByAwards.objects.filter(financial_accounts_by_awards_id=777).first()
assert file_c_award_fain is not None
assert expected_results == file_c_award_fain.award_id
expected_results = 1999
file_c_award_uri = FinancialAccountsByAwards.objects.filter(financial_accounts_by_awards_id=1777).first()
assert file_c_award_uri is not None
assert expected_results == file_c_award_uri.award_id | [
9,
86,
4361,
4362,
-1,
61,
354
] |
def METHOD_NAME(session, client, jwt): # pylint:disable=unused-argument
"""Assert get connection endpoint returns not found when there is no active connection."""
headers = create_header(jwt, [BASIC_USER])
identifier = 'FM1234567'
business = factory_business(identifier)
create_dc_connection(business)
rv = client.get(f'/api/v2/businesses/{identifier}/digitalCredentials/connection',
headers=headers, content_type=content_type)
assert rv.status_code == HTTPStatus.NOT_FOUND
assert rv.json.get('message') == 'No active connection found.' | [
9,
19,
550,
130,
622
] |
def METHOD_NAME(cls):
return (
# do not edit anything below since these entries are generated from
# scripts/webcomicfactory.py
# START AUTOUPDATE
cls('AsTheMayoTurns',
'http://www.thewebcomicfactory.com/comic/as-the-mayo-turns/'),
cls('ComicBookMafia',
'http://www.thewebcomicfactory.com/comic/comic-book-mafia/'),
cls('Dealers',
'http://www.thewebcomicfactory.com/comic/dealers-1-1998-was-the-year/'),
cls('DigitalHobo',
'http://www.thewebcomicfactory.com/comic/digital-hobo-1-its-a-living-kinda/'),
cls('ECoastVsWCoast',
'http://www.thewebcomicfactory.com/comic/east-coast-vs-west-coast-greetings-from-the-coasts/'),
cls('GunCulture',
'http://www.thewebcomicfactory.com/comic/gun-culture/'),
cls('IHateMyKids',
'http://www.thewebcomicfactory.com/comic/i-hate-my-kids/'),
cls('InARelationship',
'http://www.thewebcomicfactory.com/comic/in-a-relationship-3/'),
cls('IntergalacticMedicalDoctor',
'http://www.thewebcomicfactory.com/comic/intergalactic-medical-doctor/'),
cls('JSchoolgirlsInLove',
'http://www.thewebcomicfactory.com/comic/japanese-schoolgirls-in-love-1/'),
cls('KingdomOfTheDwarves',
'http://www.thewebcomicfactory.com/comic/kingdom-of-the-dwarves/'),
cls('LesterCrenshawIsDead',
'http://www.thewebcomicfactory.com/comic/lester-crenshaw-is-dead/'),
cls('Millennials',
'http://www.thewebcomicfactory.com/comic/millennials/'),
cls('MiserableComedians',
'http://www.thewebcomicfactory.com/comic/miserable-comedians-1-funny-because-its-sad/'),
cls('OldeTymeGamer',
'http://www.thewebcomicfactory.com/comic/olde-tyme-gamer-playing-injured/'),
cls('PinJunkies',
'http://www.thewebcomicfactory.com/comic/pin-junkies/'),
cls('PostApocalypticNick',
'http://www.thewebcomicfactory.com/comic/post-apocalyptic-nick/'),
cls('RealTalk',
'http://www.thewebcomicfactory.com/comic/real-talk-people-who-cut-in-line/'),
cls('SoManyNightmares',
'http://www.thewebcomicfactory.com/comic/so-many-nightmares-freedom-nightmare/'),
cls('SportsGuys',
'http://www.thewebcomicfactory.com/comic/sports-guys/'),
cls('TalesOfPizza',
'http://www.thewebcomicfactory.com/comic/tales-of-pizza-bad-tipper/'),
cls('TAndA',
'http://www.thewebcomicfactory.com/comic/the-webcomic-factory-premiere-t-and-a/'),
cls('TheAntiwarComic',
'http://www.thewebcomicfactory.com/comic/the-antiwar-comic-the-party/'),
cls('TheGentlemensClub',
'http://www.thewebcomicfactory.com/comic/the-gentlemens-club/'),
cls('TheHorrorOfColony6',
'http://www.thewebcomicfactory.com/comic/the-horror-of-colony-6-page-1/'),
cls('TheKingsOfViralVideo',
'http://www.thewebcomicfactory.com/comic/the-kings-of-viral-video-premiere/'),
cls('TheSharonAndTonyExperiment',
'http://www.thewebcomicfactory.com/comic/the-sharon-and-tony-experiment/'),
cls('TonyDestructo',
'http://www.thewebcomicfactory.com/comic/tony-destructo/'),
cls('WeirdBikerTales',
'http://www.thewebcomicfactory.com/comic/weird-biker-tales-the-last-outlaw/'),
cls('WillysSpaceDive',
'http://www.thewebcomicfactory.com/comic/willys-space-dive/'),
# END AUTOUPDATE
) | [
-1
] |
def METHOD_NAME(self):
self.ssh_up_thread = threading.Thread(target=self.ssh_ping_thread, name='SSHPingThread', daemon=True)
self.ssh_up_thread.start() | [
447,
1264,
1,
600
] |
def METHOD_NAME(self, node: Import) -> None:
node.is_top_level = self.is_global_scope
super().METHOD_NAME(node) | [
716,
512
] |
def METHOD_NAME(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1) | [
76
] |
def METHOD_NAME():
"""
Test function to check that we sensibly extrapolate when handed a point outside of
the interpolations bounds
"""
interpolation_points = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 1.0, (1, 1): 1.0}
interpolator = UnstructuredInterpolator(interpolation_points)
interpolated_point = interpolator([[0, 2], [1, 2], [2, 2]])
assert np.all(interpolated_point == [0.0, 1.0, 2.0]) | [
9,
1737,
47,
634
] |
def METHOD_NAME(angle, t):
"""Compute weights of start and end for spherical linear interpolation.
Parameters
----------
angle : float
Rotation angle.
t : float or array, shape (n_steps,)
Position between start and end
Returns
-------
w1 : float or array, shape (n_steps,)
Weights for quaternion 1
w2 : float or array, shape (n_steps,)
Weights for quaternion 2
"""
if angle == 0.0:
return np.ones_like(t), np.zeros_like(t)
return (np.sin((1.0 - t) * angle) / np.sin(angle),
np.sin(t * angle) / np.sin(angle)) | [
5102,
733
] |
def METHOD_NAME(self, dtype):
if cuda.runtime.deviceCanAccessPeer(0, 1) != 1:
pytest.skip('peer access is unavailable')
with pytest.warns(cupy._util.PerformanceWarning):
self.check_copy(dtype, 0, 1) | [
9,
215,
8533,
502
] |
def METHOD_NAME():
return mock.patch('sys.stdout', new_callable=io.StringIO) | [
1575,
3709,
720
] |
def METHOD_NAME(parameter, factory):
def decorator(f):
return pytest.mark.parametrize(
parameter, [lambda: factory(), lambda: make_aiter(factory())]
)(f)
return decorator | [
1743,
-1
] |
def METHOD_NAME(self, offset: int, whence: int = 0) -> int:
"""Change stream position and return the new absolute position.
Seek to offset relative position indicated by whence:
* 0: Start of stream (the default). pos should be >= 0;
* 1: Current position - pos may be negative;
* 2: End of stream - pos usually negative.
"""
return self._file.METHOD_NAME(offset, whence) | [
336
] |
def METHOD_NAME(self, msg_params):
sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER)
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
local_sample_number = msg_params.get(MyMessage.MSG_ARG_KEY_NUM_SAMPLES)
self.aggregator.add_local_trained_result(
sender_id - 1, model_params, local_sample_number
)
b_all_received = self.aggregator.check_whether_all_receive()
logging.info("b_all_received = " + str(b_all_received))
if b_all_received:
global_model_params = self.aggregator.aggregate()
self.aggregator.test_on_server_for_all_clients(self.args.round_idx)
# start the next round
self.args.round_idx += 1
if self.args.round_idx == self.round_num:
# post_complete_message_to_sweep_process(self.args)
self.finish()
return
if self.is_preprocessed:
if self.preprocessed_client_lists is None:
# sampling has already been done in data preprocessor
client_indexes = [self.args.round_idx] * self.args.client_num_per_round
else:
client_indexes = self.preprocessed_client_lists[self.args.round_idx]
else:
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.args.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
print("indexes of clients: " + str(client_indexes))
print("size = %d" % self.size)
for receiver_id in range(1, self.size):
self.send_message_sync_model_to_client(
receiver_id, global_model_params, client_indexes[receiver_id - 1]
) | [
276,
277,
375,
578,
280,
340
] |
def METHOD_NAME(self):
return "source_subfolder" | [
1458,
3525
] |
def METHOD_NAME(b_mock, new_validator, node_key):
from bigchaindb.common.exceptions import InvalidPublicKey
for iv in ['ed25519-base32', 'ed25519-base64']:
new_validator['public_key']['type'] = iv
voters = ValidatorElection.recipients(b_mock)
with pytest.raises(InvalidPublicKey):
ValidatorElection.generate([node_key.public_key],
voters,
new_validator, None).sign([node_key.private_key]) | [
9,
5592,
2889,
532,
6315,
1609,
59
] |
def METHOD_NAME(self):
"""Test copied raw fields"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
for attribute in enrich_backend.RAW_FIELDS_COPY:
if attribute in item:
self.assertEqual(item[attribute], eitem[attribute])
else:
self.assertIsNone(eitem[attribute]) | [
9,
215,
772,
342
] |
def METHOD_NAME():
assert rules.perm_exists(perm_name) | [
9,
2878,
954
] |
def METHOD_NAME(predictions, references, rouge_types=None, use_stemmer=True):
predictions = [" ".join(prediction) for prediction in predictions]
references = [[" ".join(reference)] for reference in references]
bleu = evaluate.load("bleu")
assert len(predictions) == len(references)
bleu1_results = bleu.compute(predictions=predictions, references=references, max_order=1)
bleu2_results = bleu.compute(predictions=predictions, references=references, max_order=2)
bleu3_results = bleu.compute(predictions=predictions, references=references, max_order=3)
bleu4_results = bleu.compute(predictions=predictions, references=references, max_order=4)
result = {
"BLEU-1": bleu1_results["bleu"] * 100,
"BLEU-2": bleu2_results["bleu"] * 100,
"BLEU-3": bleu3_results["bleu"] * 100,
"BLEU-4": bleu4_results["bleu"] * 100,
}
return result | [
226,
8612,
5191
] |
def METHOD_NAME(self):
self.assertEqual(reader_cls_factory[GIT], GitReader)
self.assertEqual(reader_cls_factory[S3], S3Reader)
self.assertNotIn(FILE_SYSTEM, reader_cls_factory) | [
9,
3847,
1155
] |
def METHOD_NAME(actual: tp.Any, desired: tp.Any, err_msg: str = "") -> None:
try:
np.testing.assert_equal(actual, desired, err_msg=err_msg)
except AssertionError as e:
print("\n" + "# " * 12 + "DEBUG MESSAGE " + "# " * 12)
print(f"Expected: {desired}\nbut got: {actual}")
raise e | [
4872,
638,
926
] |
def METHOD_NAME(self, parent=None):
if parent is not None:
self["dialog"].set_transient_for(parent)
self["dialog"].show_all() | [
697
] |
def METHOD_NAME(settings):
return settings.USE_REGAL | [
2002
] |
def METHOD_NAME(self):
self.config = self.mock_config.return_value
self.config.TestSite = AttributeDict(
auth_url="https://test.nova.client.local",
username="TestUser",
password="test123",
project_name="TestProject",
user_domain_name="TestDomain",
project_domain_name="TestProjectDomain",
MachineTypes=["test2large"],
MachineTypeConfiguration=AttributeDict(
test2large=AttributeDict(
imageRef="bc613271-6a54-48ca-9222-47e009dc0c29"
)
),
MachineMetaData=AttributeDict(
test2large=AttributeDict(Cores=128, Memory=256, Disk=1000)
),
)
openstack_api = self.mock_openstack_api.return_value
openstack_api.init_api.return_value = async_return(return_value=None)
self.create_return_value = AttributeDict(
server=AttributeDict(name="testsite-089123")
)
openstack_api.servers.create.return_value = async_return(
return_value=self.create_return_value
)
self.get_return_value = AttributeDict(
server=AttributeDict(
name="testsite-089123", id="029312-1231-123123", status="ACTIVE"
)
)
openstack_api.servers.get.return_value = async_return(
return_value=self.get_return_value
)
openstack_api.servers.run_action.return_value = async_return(return_value=None)
openstack_api.servers.force_delete.return_value = async_return(
return_value=None
)
self.mock_openstack_api.return_value.init_api.return_value = async_return(
return_value=True
)
self.openstack_adapter = OpenStackAdapter(
machine_type="test2large", site_name="TestSite"
) | [
0,
1
] |
def METHOD_NAME(args):
place = "gpu"
place = paddle.set_device(place)
tokenizer = GPTTokenizer.from_pretrained(args.model_name_or_path)
model = OPTForCausalLM.from_pretrained(args.model_name_or_path)
# Set evaluate mode
model.eval()
bos_id = tokenizer.convert_tokens_to_ids("<|endoftext|>")
eos_id = tokenizer.convert_tokens_to_ids("<|endoftext|>")
input_ids_np = np.array([[bos_id] for i in range(args.batch_size)]).astype("int64").reshape([args.batch_size, 1])
input_ids = paddle.to_tensor(input_ids_np)
# Define model
num_loop = 100
with paddle.no_grad():
for i in range(num_loop):
# For warmup.
if 50 == i:
# PaddlePaddle >= 2.2
paddle.device.cuda.synchronize(place)
start = time.perf_counter()
model.generate(
input_ids=input_ids,
max_length=args.max_length,
decode_strategy=args.decode_strategy,
top_k=args.top_k,
top_p=args.top_p,
bos_token_id=bos_id,
eos_token_id=eos_id,
use_fast=True,
use_fp16_decoding=args.use_fp16_decoding,
)
paddle.device.cuda.synchronize(place)
fast_cost = (time.perf_counter() - start) / 50 * 1000
if args.use_fp16_decoding:
pprint(args)
print("Fast FP16 cost:", fast_cost)
return
with paddle.no_grad():
for i in range(num_loop):
# For warmup.
if 50 == i:
# PaddlePaddle >= 2.2
paddle.device.cuda.synchronize(place)
start = time.perf_counter()
model.generate(
input_ids=input_ids,
max_length=args.max_length,
decode_strategy=args.decode_strategy,
top_k=args.top_k,
top_p=args.top_p,
bos_token_id=bos_id,
eos_token_id=eos_id,
)
paddle.device.cuda.synchronize(place)
pd_cost = (time.perf_counter() - start) / 50 * 1000
device = torch.device("cuda:0")
hf_model = hf_opt_model.from_pretrained(args.model_name_or_path)
hf_model.to(device)
hf_model.eval()
hf_input_ids = torch.tensor(input_ids_np)
hf_input_ids = hf_input_ids.to(device)
if args.decode_strategy == "sampling":
do_sample = True
else:
do_sample = False
with torch.no_grad():
for i in range(num_loop):
# For warmup.
if 50 == i:
torch.cuda.synchronize()
start = time.perf_counter()
hf_model.generate(
hf_input_ids,
do_sample=do_sample,
max_length=args.max_length + 1,
bos_token_id=bos_id,
eos_token_id=eos_id,
pad_token_id=0,
top_k=args.top_k,
top_p=args.top_p,
)
torch.cuda.synchronize()
hf_cost = (time.perf_counter() - start) / 50 * 1000
pprint(args)
print("Fast FP32 cost:", fast_cost)
print("PD cost:", pd_cost)
print("HF cost:", hf_cost)
print("Speed up Fast FP32/PD:", pd_cost / fast_cost)
print("Speed up Fast FP32/HF:", hf_cost / fast_cost) | [
74,
2103
] |
def METHOD_NAME(self, parent):
self.font = self.value
super().METHOD_NAME(parent) | [
176
] |
def METHOD_NAME():
"""Returns true if the system uses floating-point 64bits for it's
long double type.
.. note:: Comparing `numpy.longdouble` and `numpy.float64` on Windows is not
possible (or at least not will all the numpy version)
"""
return _biggest_float == numpy.float64 | [
137,
14089,
15881
] |
def METHOD_NAME(command_line, env=None):
logger = logging.getLogger(__name__)
logger.debug("Running subprocess [%s] with output.", command_line)
command_line_args = shlex.split(command_line)
with subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) as command_line_process:
has_output = True
lines = []
while has_output:
line = command_line_process.stdout.readline()
if line:
lines.append(line.decode("UTF-8").strip())
else:
has_output = False
return lines | [
22,
2929,
41,
146
] |
def METHOD_NAME():
for name, attr in obj['extattrs'].items():
# skip inherited attributes
if attr.get('inheritance_source'):
continue
yield name, attr['value'] | [
474
] |
def METHOD_NAME(code):
self.called = True | [
1076
] |
def METHOD_NAME(gcs_bucket: str, filename: str):
"""Returns the contents of filename as a bytes object. Meant to be used to
patch gcs_utils.download_blob_as_bytes."""
return test_data | [
19,
9,
365
] |
def METHOD_NAME(rad):
"""Very simple method to convert Radians to degrees
Args:
rad (float): radians unit
Returns:
float: degrees
"""
return (rad * 180) / math.pi | [
5632,
24,
7759
] |
def METHOD_NAME(self):
return self.submitted()._by_group(REVIEWER_GROUP_NAME) | [
604,
7471
] |
def METHOD_NAME(self):
self._data_type = np.float32
# Very large tensor will always fail for gRPC because the Protobuf has a
# hard limit on 2GBs for the size of input tensors. All backends except
# plan backend should be able to handle payloads larger than 2GBs using
# HTTP.
very_large_tensor_shape = (
math.trunc(3 * (1024 * 1024 * 1024) / np.dtype(self._data_type).itemsize),
)
self._very_large_in0 = np.random.random(very_large_tensor_shape).astype(
self._data_type
)
# 1.9 GBs allows us to test gRPC with moderate sizes too.
large_tensor_shape = (
math.trunc(
1.9 * (1024 * 1024 * 1024) // np.dtype(self._data_type).itemsize
),
)
self._large_in0 = np.random.random(large_tensor_shape).astype(self._data_type)
small_tensor_shape = (1,)
self._small_in0 = np.random.random(small_tensor_shape).astype(self._data_type)
self._clients = (
(httpclient, httpclient.InferenceServerClient("localhost:8000")),
(grpcclient, grpcclient.InferenceServerClient("localhost:8001")),
) | [
0,
1
] |
def METHOD_NAME():
with DimensionalAnalysis():
# Arrange
si = constants_defaults.si
formulae = Formulae()
sut = formulae.hygroscopicity.r_cr
kp = 0.5
rd = 0.1 * si.micrometre
T = 300 * si.kelvins
sgm = constants_defaults.sgm_w
# Act
r_cr = sut(kp, rd**3, T, sgm)
# Assert
assert r_cr.to_base_units().units == si.metres | [
9,
3264,
3630
] |
def METHOD_NAME(cls): pass # pragma: nocover | [
8616
] |
def METHOD_NAME(val: Any, output_format: str = "standard", errors: str = "coarse") -> Any:
"""
Reformat a number string with proper separators and whitespace.
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
Note: in the case of UNP, the compact format is the same as the standard one.
"""
val = str(val)
if val in NULL_VALUES:
return [np.nan]
if not validate_by_unp(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
return [error_result]
if output_format in {"compact", "standard"}:
result = [unp.compact(val)]
return result | [
275
] |
def METHOD_NAME():
remote = {'url': 'http://openneuro.org.s3.amazonaws.com/',
'uuid': '57894849-d0c8-4c62-8418-3627be18a196'}
url = parse_rmet_line(
remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""")
assert url == 'http://openneuro.org.s3.amazonaws.com/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y' | [
9,
214,
7673,
534
] |
def METHOD_NAME(self):
assert self._plugin_pid, 'plugin_pid must be set before sending rpcs'
return self._plugin_pid | [
2793,
2243
] |
def METHOD_NAME(self) -> str:
"""Repository URI."""
if not self._repo:
self._repo = self.image.attrs["RepoTags"][0].rsplit(":", 1)[0]
return cast(str, self._repo) | [
522
] |
def METHOD_NAME(self, variables: Dict[str, Any]) -> Dict:
namespace = dict(variables or {})
namespace.setdefault("context", {})
if not namespace.get("util"):
namespace["util"] = VelocityUtil()
return namespace | [
123,
1194
] |
def METHOD_NAME(self, video_infos: dict,
sampled_inds: List[int]) -> Dict[str, List]:
"""Prepare data for the subsequent pipeline.
Args:
video_infos (dict): The whole video information.
sampled_inds (list[int]): The sampled frame indices.
Returns:
dict: The processed data information.
"""
frames_anns = video_infos['images']
final_data_info = defaultdict(list)
# for data in frames_anns:
for index in sampled_inds:
data = frames_anns[index]
# copy the info in video-level into img-level
for key in self.collect_video_keys:
if key == 'video_length':
data['ori_video_length'] = video_infos[key]
data['video_length'] = len(sampled_inds)
else:
data[key] = video_infos[key]
# Collate data_list (list of dict to dict of list)
for key, value in data.items():
final_data_info[key].append(value)
return final_data_info | [
123,
365
] |
def METHOD_NAME(self):
self.connection.service_type = "compute"
self.connection._ex_force_microversion = "2.67"
headers = self.connection.add_default_headers({})
self.assertEqual(headers["OpenStack-API-Version"], "compute 2.67")
self.connection.service_type = "compute"
self.connection._ex_force_microversion = "volume 2.67"
headers = self.connection.add_default_headers({})
self.assertNotIn("OpenStack-API-Version", headers)
self.connection.service_type = "volume"
self.connection._ex_force_microversion = "volume 2.67"
headers = self.connection.add_default_headers({})
self.assertEqual(headers["OpenStack-API-Version"], "volume 2.67") | [
9,
0,
-1
] |
def METHOD_NAME(request):
return HttpResponse(status=200) | [
1179,
473,
604,
1609,
246
] |
f METHOD_NAME(self,maxlength=12288): # usually 1024 is OK; require 12288 for ID19 | [
203,
572
] |
def METHOD_NAME(self):
url = "/_static/sentry/app/thisfiledoesnotexistlol.js"
response = self.client.get(url)
assert response.status_code == 404, response | [
9,
2121
] |
def METHOD_NAME(s1, lst=None, **kargs):
"""Same than voip_play, backward compatibility
"""
return voip_play(s1, lst, **kargs) | [
15932,
-1
] |
def METHOD_NAME(self, action_class):
obj = action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
action_class.objects.delete(obj.id)
with pytest.raises(CourseActionStateItemNotFoundError):
action_class.objects.find_first(course_key=self.course_key) | [
9,
34
] |
f METHOD_NAME(final): | [
129
] |
def METHOD_NAME(self):
"""Summary
Returns:
TYPE: Description
"""
return os.path.join(self.tmpdir, "das_cache", self.process + ".txt")
#return os.path.join(self.tmpdir, "das_cache", self.process + ".txt", self.escape_name() + ".txt") | [
19,
3931,
596,
1147
] |
def METHOD_NAME(xarr,
Temperature=25,
logColumn=13,
logDensity=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
gridbundle = None,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
Tex303,Tex322,Tex321,tau303,tau322,tau321 = gridbundle
# if this gets too far different from 1, we are gonna have a Bad Time.
scalefac = grid_vwidth/width
tex = (Tex303(logColumn,logDensity,Temperature),
Tex322(logColumn,logDensity,Temperature),
Tex321(logColumn,logDensity,Temperature))
tau = (tau303(logColumn,logDensity,Temperature)*scalefac,
tau322(logColumn,logDensity,Temperature)*scalefac,
tau321(logColumn,logDensity,Temperature)*scalefac)
if np.any(np.isnan(tex)) or np.any(np.isnan(tau)):
raise ValueError("Invalid column/density")
if verbose:
for ta,tk in zip(tau,tex):
print("density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (logDensity, Temperature, logColumn, ta, tk))
if debug:
import pdb; pdb.set_trace() | [
-1,
3074,
-1
] |
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
Gets the workflow trigger callback URL relative path parameters.
"""
return pulumi.get(self, "relative_path_parameters") | [
1821,
157,
386
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The workload classifier label.
"""
return pulumi.get(self, "label") | [
636
] |
def METHOD_NAME(self):
query = self.factory.create_query()
user = self.factory.create_user()
data = {"access_type": ACCESS_TYPE_MODIFY, "user_id": user.id}
rv = self.make_request("delete", "/api/queries/{}/acl".format(query.id), user=user, data=data)
self.assertEqual(rv.status_code, 403) | [
9,
610,
611,
43,
256,
2013
] |
def METHOD_NAME(self, host_ips):
resp = self.api_instance.list_pod_for_all_namespaces(_preload_content=False)
data = json.loads(resp.data)
pod_list = []
for info in data.get('items') or []:
pod_name = getitems(info, ['metadata', 'name'], '')
pod_namespace = getitems(info, ['metadata', 'namespace'], '')
item = self.render_resource(self.resource_kind, info, pod_name, pod_namespace)
if host_ips:
pod_host_ip = getitems(info, ['status', 'hostIP'], '')
if pod_host_ip in host_ips:
pod_list.append(item)
else:
pod_list.append(item)
return pod_list | [
19,
75,
6635
] |
def METHOD_NAME(self) -> None: ... | [
537
] |
def METHOD_NAME(self):
self.pre_operations()
self.VirtualMachineExtensionImagesGet(ctx=self.ctx)()
self.post_operations() | [
750,
710
] |
def METHOD_NAME(self):
"""test that a scalar select as a column is returned as such
and that type conversion works OK.
(this is half a SQLAlchemy Core test and half to catch database
backends that may have unusual behavior with scalar selects.)
"""
datetable = self.tables.has_dates
s = select([datetable.alias('x').c.today]).as_scalar()
s2 = select([datetable.c.id, s.label('somelabel')])
row = config.db.execute(s2).first()
eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0)) | [
9,
843,
2284,
1997,
1472
] |
def METHOD_NAME(self, balancer):
response = self.connection.request(
"/{}/load_balancers/{}".format(API_VERSION, balancer.id), method="DELETE"
)
return response.status == httplib.ACCEPTED | [
2656,
3737
] |
def METHOD_NAME(three_family_branch_with_trials):
"""Test that trials of parent experiment are not deleted"""
assert len(setup_storage().fetch_experiments({})) == 3
assert (
len(setup_storage()._fetch_trials({"experiment": hsh("test_double_exp", 1)}))
> 0
)
assert (
len(
setup_storage()._fetch_trials(
{"experiment": hsh("test_double_exp_child", 1)}
)
)
> 0
)
assert (
len(
setup_storage()._fetch_trials(
{"experiment": hsh("test_double_exp_grand_child", 1)}
)
)
> 0
)
execute("db rm -f test_double_exp_child --status *")
# Make sure no experiments were deleted
assert len(setup_storage().fetch_experiments({})) == 3
# Make sure only trials of given experiment were deleted
assert (
len(setup_storage()._fetch_trials({"experiment": hsh("test_double_exp", 1)}))
> 0
)
assert (
len(
setup_storage()._fetch_trials(
{"experiment": hsh("test_double_exp_child", 1)}
)
)
== 0
)
assert (
len(
setup_storage()._fetch_trials(
{"experiment": hsh("test_double_exp_grand_child", 1)}
)
)
== 0
) | [
9,
5528,
4231,
623,
10323
] |
def METHOD_NAME(self):
if not hasattr(self, "_novel_list"):
data = self.get_json(novel_list_url)
self._novel_list = {x["ID"]: x for x in data}
return self._novel_list | [
4734,
245
] |
f METHOD_NAME(self): | [
9,
16376,
77
] |
def METHOD_NAME(self, packet, frame_is_equal_to_last: bool):
timestamp = time.time()
if frame_is_equal_to_last:
half_of_timeout = (
((self._config["timeout"] * self._config["refresh_rate"]) - 1)
// 2
) / self._config["refresh_rate"]
if timestamp > self.last_frame_sent_time + half_of_timeout:
if self._destination is not None:
self._sock.sendto(
bytes(packet), (self.destination, self._config["port"])
)
self.last_frame_sent_time = timestamp
else:
if self._destination is not None:
self._sock.sendto(
bytes(packet), (self.destination, self._config["port"])
)
self.last_frame_sent_time = timestamp | [
5022,
5788
] |
def METHOD_NAME():
def testing():
x = pyro.to_funsor(torch.tensor([0.0, 1.0]), funsor.Real, dim_to_name={-1: "x"})
assert set(x.inputs) == {"x"}
px = pyro.to_funsor(
torch.ones(2, 3), funsor.Real, dim_to_name={-2: "x", -1: "y"}
)
assert px.inputs["x"].dtype == 2 and px.inputs["y"].dtype == 3
with pyro_backend("contrib.funsor"), NamedMessenger():
testing() | [
9,
4824,
1461,
24,
-1
] |
def METHOD_NAME(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir) | [
1260,
276
] |
def METHOD_NAME(args: list) -> bool:
"""Check if all inputs in teh list are time-like."""
return all([is_time_like(x) if x is not None else True for x in args]) | [
75,
1461,
472,
104,
2307
] |
def METHOD_NAME(
cls, obj: KubernetesObject, rkey: Optional[str] = None
) -> NormalizedResource:
if obj.namespace is None:
raise ValueError(
f"Cannot construct resource from Kubernetes object {obj.key} without namespace"
)
labels = dict(obj.labels)
if not rkey: # rkey is only set for annotations
# Default rkey for native Kubernetes resources
rkey = f"{obj.name}.{obj.namespace}"
# Some other code uses the 'ambassador_crd' label to know which resource to update
# .status for with the apiserver. Which is (IMO) a horrible hack, but I'm not up for
# changing it at the moment.
labels["ambassador_crd"] = rkey
else:
# Don't let it think that an annotation can have its status updated.
labels.pop("ambassador_crd", None)
# When creating an Ambassador object from a Kubernetes object, we have to make
# sure that we pay attention to 'errors', which will be set IFF watt's validation
# finds errors.
return cls.from_data(
obj.kind,
obj.name,
errors=obj.get("errors"),
namespace=obj.namespace,
generation=obj.generation,
version=obj.gvk.version,
api_group=obj.gvk.api_group,
labels=labels,
spec=obj.spec,
rkey=rkey,
) | [
280,
6254,
279
] |
def METHOD_NAME(self):
self.data.tl.find_marker_genes(cluster_res_key='leiden', n_genes=23) | [
9,
416,
1464,
7590,
293,
7590,
181
] |
def METHOD_NAME():
"""
Transfer data between two ports.
"""
params['file_transfer_serial_port'] = serials[0]
transfer_data(params, vm, sender='host')
params['file_transfer_serial_port'] = serials[1]
transfer_data(params, vm, sender='guest') | [
22,
4364,
365,
1286
] |
def METHOD_NAME(self) -> bool:
try:
return len(self.socket.LAST_ENDPOINT) > 0
except AttributeError:
return False | [
1083,
137,
4432
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Authorization name.",
required=True,
id_part="child_name_1",
)
_args_schema.circuit_name = AAZStrArg(
options=["--circuit-name"],
help="ExpressRoute circuit name.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(fast_microvm, record_property, metrics):
"""
Check boot time of microVM without a network device.
"""
vm = fast_microvm
vm.jailer.extra_args.update({"boot-timer": None})
_configure_and_run_vm(vm)
boottime_us = _get_microvm_boottime(vm)
print(f"Boot time with no network is: {boottime_us} us")
record_property("boottime_no_network", f"{boottime_us} us < {MAX_BOOT_TIME_US} us")
metrics.set_dimensions(DIMENSIONS)
metrics.put_metric("boot_time", boottime_us, unit="Microseconds")
assert (
boottime_us < MAX_BOOT_TIME_US
), f"boot time {boottime_us} cannot be greater than: {MAX_BOOT_TIME_US} us" | [
9,
18197,
654,
1228
] |
def METHOD_NAME(self):
min_value = self.chan_min_value.getValue()
max_value = self.chan_max_value.getValue()
return [min_value, max_value] | [
19,
4355
] |
def METHOD_NAME(self):
polygon_wkt_df = self.spark.read.format("csv").\
option("delimiter", "\t").\
option("header", "false").\
load(mixed_wkt_geometry_input_location)
polygon_wkt_df.createOrReplaceTempView("polygontable")
polygon_wkt_df.show()
polygon_df = self.spark.sql("select ST_GeomFromText(polygontable._c0) as countyshape from polygontable")
polygon_df.show(10)
assert polygon_df.count() == 100 | [
9,
1780,
6438,
280,
526
] |
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"clusterName", self.ctx.args.cluster_name,
required=True,
),
**self.serialize_url_param(
"metricsConfigurationName", self.ctx.args.metrics_configuration_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters | [
274,
386
] |
def METHOD_NAME(method_callers):
def signature_concat(method_caller):
return f'{method_caller.get("className").replace("/", ".")}.{method_caller.get("methodName")}'
method_caller_set = set()
for method_caller in method_callers:
if isinstance(method_caller, list):
for node in method_caller:
method_caller_set.add(signature_concat(node))
elif isinstance(method_caller, dict):
method_caller_set.add(signature_concat(method_caller))
return method_caller_set | [
197,
24,
0
] |
async def METHOD_NAME(
self, test_run_id: str, body: Union[JSON, IO], *, old_test_run_id: Optional[str] = None, **kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Create and start a new test run with the given name.
Create and start a new test run with the given name.
:param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic,
numeric, underscore or hyphen characters. Required.
:type test_run_id: str
:param body: Load test run model. Is either a model type or a IO type. Required.
:type body: JSON or IO
:keyword old_test_run_id: Existing test run identifier that should be rerun, if this is
provided, the test will run with the JMX file, configuration and app components from the
existing test run. You can override the configuration values for new test run in the request
body. Default value is None.
:paramtype old_test_run_id: str
:keyword content_type: Body Parameter content-type. Known values are:
'application/merge-patch+json'. Default value is None.
:paramtype content_type: str
:rtype: ~azure.developer.loadtesting._polling.LoadTestingLROPoller
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 5
create_or_update_test_run_operation = await super()._test_run_initial(
test_run_id, body, old_test_run_id=old_test_run_id, **kwargs
)
command = partial(self.get_test_run, test_run_id=test_run_id)
create_test_run_polling = AsyncTestRunStatusPoller(interval=polling_interval)
return AsyncLROPoller(command, create_or_update_test_run_operation, lambda *_: None, create_test_run_polling) | [
3287,
9,
22
] |
def METHOD_NAME():
dataset = Dataset(short_name='tas')
msg = "'diagnostic' facet missing from .*"
with pytest.raises(RecipeError, match=msg):
datasets_to_recipe([dataset]) | [
9,
4146,
24,
3912,
654,
10111
] |
def METHOD_NAME(job):
"""Return the parameters of the job in a format that is valid for using as ``**kwargs``"""
parameters = job["parameters"] or {}
py_parameters = {k.replace("-", "_"): v for k, v in parameters.items()}
return py_parameters | [
203,
386
] |
def METHOD_NAME(age, description=None, tokeninfo=None):
"""
Find unused tokens and mark them either by setting the description or by
setting a tokeninfo.
Tokeninfo parameter needs to be provided like key=value.
"""
tlist = _get_tokenlist(age)
for token_obj in tlist:
if description:
print("Setting description for token {0!s}: {1!s}".format(
token_obj.token.serial, description))
token_obj.set_description(description)
token_obj.save()
if tokeninfo:
key, value = tokeninfo.split("=")
print("Setting tokeninfo for token {0!s}: {1!s}={2!s}".format(
token_obj.token.serial, key, value))
token_obj.add_tokeninfo(key, value)
token_obj.save() | [
1743
] |
def METHOD_NAME(self) -> None:
"""Switches focus to the parent context. If the current context is the
top level browsing context, the context remains unchanged.
:Usage:
::
driver.switch_to.parent_frame()
"""
self._driver.execute(Command.SWITCH_TO_PARENT_FRAME) | [
935,
896
] |
def METHOD_NAME(bot: Text, **kwargs):
kwargs['bot'] = bot
for settings in MessageBroadcastSettings.objects(**kwargs):
settings = settings.to_mongo().to_dict()
settings['_id'] = settings['_id'].__str__()
yield settings | [
245,
817
] |
def METHOD_NAME(alpha: dc.float64, beta: dc.float64, C: dc.float64[NI, NJ], A: dc.float64[NI, NK], B: dc.float64[NK,
NJ]):
lA = np.empty((lNI, lNKa), dtype=A.dtype)
lB = np.empty((lNKb, lNJ), dtype=B.dtype)
lC = np.empty((lNI, lNJ), dtype=A.dtype)
Av = np.reshape(A, (Px, lNI, Py, lNKa))
A2 = np.transpose(Av, axes=(0, 2, 1, 3))
Bv = np.reshape(B, (Px, lNKb, Py, lNJ))
B2 = np.transpose(Bv, axes=(0, 2, 1, 3))
Cv = np.reshape(C, (Px, lNI, Py, lNJ))
C2 = np.transpose(Cv, axes=(0, 2, 1, 3))
dc.comm.Scatter(A2, lA)
dc.comm.Scatter(B2, lB)
dc.comm.Scatter(C2, lC)
tmp = distr.MatMult(lA, lB, (NI, NJ, NK))
lC[:] = alpha * tmp + beta * lC
dc.comm.Gather(lC, C2)
C[:] = np.transpose(C2, (0, 2, 1, 3)) | [
10616,
7508
] |
def METHOD_NAME(self): # noqa: PLR0912 # FIXME
while not self.queue.empty():
task: BatchTask = self.queue.get()
if isinstance(task.data, str):
file_path = task.data
if os.path.splitext(task.data)[1] in LoadROIImage.get_extensions():
project_tuple = LoadROIImage.load([task.data])
else:
project_tuple = LoadStackImage.load([task.data])
elif isinstance(task.data, MaskProjectTuple):
project_tuple: MaskProjectTuple = task.data
file_path = project_tuple.image.file_path
else:
continue
try:
name = os.path.basename(file_path)
blank = get_mask(project_tuple.roi_info.roi, project_tuple.mask, project_tuple.selected_components)
algorithm = cast(StackAlgorithm, MaskAlgorithmSelection[task.parameters.algorithm]())
algorithm.set_image(project_tuple.image)
algorithm.set_mask(blank)
algorithm.set_parameters(task.parameters.values)
if isinstance(task.save_prefix, tuple):
self.range_signal.emit(0, algorithm.get_steps_num() + 1)
else:
self.range_signal.emit(0, algorithm.get_steps_num())
# noinspection PyTypeChecker
segmentation = algorithm.calculation_run(partial(self.progress_info, name))
state2 = StackSettings.transform_state(
project_tuple,
segmentation.roi_info,
{i: segmentation.parameters for i in segmentation.roi_info.bound_info}
if segmentation.roi_info is not None
else {},
[],
)
if isinstance(task.save_prefix, tuple):
self.progress_info(name, "saving", algorithm.get_steps_num())
name = f"{os.path.splitext(os.path.basename(file_path))[0]}.seg"
re_end = re.compile(r"(.*_version)(\d+)\.seg$")
os.makedirs(task.save_prefix[0], exist_ok=True)
while os.path.exists(os.path.join(task.save_prefix[0], name)):
if match := re_end.match(name):
num = int(match[2]) + 1
name = match[1] + str(num) + ".seg"
else:
name = f"{os.path.splitext(os.path.basename(file_path))[0]}_version1.seg"
SaveROI.save(os.path.join(task.save_prefix[0], name), state2, parameters=task.save_prefix[1])
else:
self.multiple_result.emit(state2)
except Exception as e: # pylint: disable=broad-except
self.error_signal.emit(f"Exception occurred during proceed {file_path}. Exception info {e}")
self.index += 1
self.index = 0
self.execution_done.emit() | [
22,
7111
] |
def METHOD_NAME(self, node: mparser.CodeBlockNode) -> None:
self.visit_default_func(node)
for i in node.lines:
i.accept(self) | [
716,
544,
573,
1716
] |
def METHOD_NAME(model, adapter_type) -> NoReturn:
raise MissingMaterializationError(
materialization=model.config.materialized, adapter_type=adapter_type
) | [
1038,
9230
] |
def METHOD_NAME(fake_connection):
ticket = create_ticket(uuid="uuid")
data = json.dumps(ticket).encode("utf8")
fake_connection.response = FakeResponse(data=data)
response = imagetickets.get_ticket("uuid")
assert response == ticket | [
9,
377,
41,
17
] |
def METHOD_NAME(self, *args, **kwargs):
txt = super(JuliaBundle, self).METHOD_NAME(*args, **kwargs)
txt += self.module_generator.prepend_paths('EBJULIA_ADMIN_DEPOT_PATH', self.extensions_depot)
txt += self.module_generator.prepend_paths('EBJULIA_ADMIN_LOAD_PATH', self.admin_load_path)
return txt | [
93,
298,
1967
] |
def METHOD_NAME(tmp_path, md_saved):
cbr_path = datadir / "fake_cbr.cbr"
shutil.copy(cbr_path, tmp_path)
tmp_comic = comicapi.comicarchive.ComicArchive(tmp_path / cbr_path.name)
assert tmp_comic.seems_to_be_a_comic_archive()
assert tmp_comic.write_cbi(comicapi.genericmetadata.md_test)
md = tmp_comic.read_cbi()
assert md.replace(pages=[]) == md_saved.replace(
pages=[],
day=None,
alternate_series=None,
alternate_number=None,
alternate_count=None,
imprint=None,
notes=None,
web_link=None,
format=None,
manga=None,
page_count=None,
maturity_rating=None,
story_arcs=[],
series_groups=[],
scan_info=None,
characters=[],
teams=[],
locations=[],
) | [
9,
73,
-1,
647
] |
def METHOD_NAME(result, obj, indent):
s = '%.3f' % obj
while '.' in s and s.endswith('0'):
s = s[:-1]
result.append(s) | [
-1
] |
def METHOD_NAME(self, original_node, updated_node):
if (
isinstance(updated_node.func, cst.Attribute) and
updated_node.func.attr.value == "wire" and
eval(updated_node.func.value.value, {}, self.env) is magma_module
):
# m.wire(value, target)
return self._gen_assign(updated_node.args[1].value,
updated_node.args[0].value)
return updated_node | [
3457,
128
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.