text
stringlengths 15
7.82k
| ids
listlengths 1
7
|
---|---|
def METHOD_NAME(args, string):
if args.verbose:
print(string) | [
15428,
38
] |
def METHOD_NAME(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_dataset(dataset, expected_features) | [
9,
126,
280,
526,
2947,
623,
1645
] |
def METHOD_NAME(ctx: Context, actor: Actor) -> Optional[str]:
book_plain = get_slot("cur_book_plain")(ctx, actor)
if not book_plain:
return None
return get_published_year(book_plain) | [
19,
4884,
842
] |
def METHOD_NAME(self, v):
return struct.pack("<I", v) | [
77,
1715,
2236
] |
def METHOD_NAME(
make_organization,
make_user,
make_alert_receive_channel,
make_alert_group,
):
organization = make_organization()
user_1 = make_user(
organization=organization, role=LegacyAccessControlRole.VIEWER, _verified_phone_number="1234567890"
)
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
notify_user_task(user_1.pk, alert_group.pk)
error_log_record = UserNotificationPolicyLogRecord.objects.last()
assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
assert error_log_record.reason == NOTIFICATION_UNAUTHORIZED_MSG
assert error_log_record.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_FORBIDDEN | [
9,
959,
21,
168,
217,
1574
] |
def METHOD_NAME(band: tuple):
ctx = get_context()
assert band == ctx.band
return np.random.rand(3, 3) | [
3938
] |
def METHOD_NAME(deploy_cfn_template, aws_client):
secret_name = f"secret-{short_uid()}"
stack = deploy_cfn_template(template=TEST_TEMPLATE_11, parameters={"SecretName": secret_name})
rs = aws_client.secretsmanager.describe_secret(SecretId=secret_name)
assert rs["Name"] == secret_name
assert "DeletedDate" not in rs
aws_client.cloudformation.delete_stack(StackName=stack.stack_name)
assert wait_until(
lambda: aws_client.cloudformation.describe_stacks(StackName=stack.stack_id)["Stacks"][0][
"StackStatus"
]
== "DELETE_COMPLETE"
)
rs = aws_client.secretsmanager.describe_secret(SecretId=secret_name)
assert "DeletedDate" in rs | [
9,
6397,
276,
11255,
444
] |
def METHOD_NAME(integration_check, pg_instance):
"""
If no relation metrics are being collected, autodiscovery should not run.
"""
pg_instance["database_autodiscovery"] = DISCOVERY_CONFIG
pg_instance['relations'] = []
del pg_instance['dbname']
check = integration_check(pg_instance)
run_one_check(check, pg_instance)
assert check.autodiscovery is None | [
9,
15055,
1015,
1295
] |
def METHOD_NAME(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
for width in range(T.shape[0]):
for i in range(T.shape[0] - width):
include = np.asarray(range(i, i + width + 1))
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, include)
comp_P, comp_I = maamped(dask_client, T, m, include)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I) | [
9,
11758,
1872
] |
def METHOD_NAME(sa):
df = pd.DataFrame({"col": [True, False]})
validator = build_sa_validator_with_data(
df=df,
sa_engine_name="sqlite",
table_name="expect_column_values_to_be_in_type_list_dialect_pyathena_boolean_1",
)
# Monkey-patch dialect for testing purposes.
validator.execution_engine.dialect_module = aws.sqlalchemy_athena
result = validator.expect_column_values_to_be_in_type_list(
"col", type_list=["string", "boolean"]
)
assert result == ExpectationValidationResult(
success=True,
expectation_config={
"expectation_type": "expect_column_values_to_be_in_type_list",
"kwargs": {
"column": "col",
"type_list": ["string", "boolean"],
},
"meta": {},
},
result={
"element_count": 2,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
exception_info={
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
meta={},
) | [
9,
1297,
105,
199,
24,
673,
623
] |
def METHOD_NAME(self, thumbnails_dir):
current_time = time.time()
for root, _, filenames in os.walk(thumbnails_dir):
for filename in filenames:
path = os.path.join(root, filename)
modification_time = os.path.getmtime(path)
if current_time - modification_time > self._days_alive_secs:
os.remove(path) | [
1587,
950
] |
def METHOD_NAME(cur):
cmd = "select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';"
log.debug("SQL Query: %s", cmd)
cur.execute(cmd)
result = cur.fetchall()
return [x[0] for x in result] | [
245,
2253
] |
def METHOD_NAME(self):
"""
Test loading separate module configs
"""
self.render_config_template(
reload_path=self.working_dir + "/configs/*.yml",
reload_type="modules",
inputs=False,
)
os.mkdir(self.working_dir + "/logs/")
os.mkdir(self.working_dir + "/configs/")
logfile1 = self.working_dir + "/logs/test1.log"
logfile2 = self.working_dir + "/logs/test2.log"
with open(self.working_dir + "/configs/module1.yml", 'w') as f:
f.write(moduleConfigTemplate.format(
self.working_dir + "/logs/test1.log"))
with open(self.working_dir + "/configs/module2.yml", 'w') as f:
f.write(moduleConfigTemplate.format(
self.working_dir + "/logs/test2.log"))
proc = self.start_beat()
with open(logfile1, 'w') as f:
f.write("Hello 1\n")
self.wait_until(lambda: self.output_lines() == 1)
with open(logfile2, 'w') as f:
f.write("Hello 2\n")
self.wait_until(lambda: self.output_lines() == 2)
output = self.read_output()
# Reloading stopped.
self.wait_until(
lambda: self.log_contains("Loading of config files completed."),
max_timeout=15)
# Make sure the correct lines were picked up
assert self.output_lines() == 2
assert output[0]["message"] == "Hello 1"
assert output[1]["message"] == "Hello 2"
proc.check_kill_and_wait() | [
9,
557,
736
] |
def METHOD_NAME():
# Compatibility alias.
return os.path.abspath(common.GetDrakePath()) | [
19,
6415,
157
] |
def METHOD_NAME(self):
optlist, args = getopt.getopt(sys.argv[1:], 'shl:')
for key, val in optlist:
if key == '-s':
self.silent_mode = True
elif key == '-l':
self.target_logfile = val
elif key == '-h':
self.help()
if self.target_logfile is None:
self.help() | [
250,
134
] |
def METHOD_NAME(cls, course):
"""
Returns the grading policy hash for the given course.
"""
ordered_policy = json.dumps(
course.grading_policy,
separators=(',', ':'), # Remove spaces from separators for more compact representation
sort_keys=True,
)
return b64encode(sha1(ordered_policy.encode('utf-8')).digest()).decode('utf-8') | [
5840,
54,
1161
] |
def METHOD_NAME(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item) | [
188
] |
def METHOD_NAME(result_part: list,
tmpdir: Optional[str] = None
) -> Optional[list]:
"""Collect results on cpu mode.
Saves the results on different gpus to 'tmpdir' and collects them by the
rank 0 worker.
Args:
result_part (list): The part of prediction results.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. If is None, use `tempfile.mkdtemp()`
to make a temporary path. Defaults to None.
Returns:
list or None: The collected results.
"""
rank, world_size = get_dist_info()
if world_size == 1:
return result_part
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ), 32, dtype=torch.uint8)
if rank == 0:
mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8)
dir_tensor[:len(tmpdir)] = tmpdir
broadcast(dir_tensor, 0)
tmpdir = dir_tensor.numpy().tobytes().decode().rstrip()
else:
mkdir_or_exist(tmpdir)
# dump the part result to the dir
with open(osp.join(tmpdir, f'part_{rank}.pkl'), 'wb') as f: # type: ignore
pickle.dump(result_part, f, protocol=2)
barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
path = osp.join(tmpdir, f'part_{i}.pkl') # type: ignore
with open(path, 'rb') as f:
part_list.extend(pickle.load(f))
shutil.rmtree(tmpdir)
return part_list | [
1444,
1151,
51,
2265
] |
f METHOD_NAME(self): | [
0,
1
] |
def METHOD_NAME(x, axis):
return op("squeeze", [x, axis]).as_tensor() | [
3822
] |
def METHOD_NAME(request, record_schema) -> ConfiguredAirbyteCatalog:
record_schema = request.param if hasattr(request, "param") else record_schema
stream = ConfiguredAirbyteStream(
stream=AirbyteStream(name="my_stream", json_schema=record_schema, supported_sync_modes=[SyncMode.full_refresh]),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.append,
)
return ConfiguredAirbyteCatalog(streams=[stream]) | [
2824,
1964
] |
def METHOD_NAME(gradle_file_with_dependencies):
gradle_file, expected_regular_dependencies, expected_test_dependencies = gradle_file_with_dependencies
regular_dependencies, test_dependencies = utils.parse_dependencies(gradle_file)
assert len(regular_dependencies) == len(expected_regular_dependencies)
assert all([regular_dependency in expected_regular_dependencies for regular_dependency in regular_dependencies])
assert len(test_dependencies) == len(expected_test_dependencies)
assert all([test_dependency in expected_test_dependencies for test_dependency in test_dependencies]) | [
9,
214,
2410
] |
def METHOD_NAME(self):
fileNames, _ = QFileDialog.getOpenFileNames(self, "Open File(s)")
if fileNames:
self.tabManager.setCurrentWidget(self.replayWindow)
for fileName in fileNames:
self.replayWindow.openFile(fileName) | [
69,
1452,
171
] |
def METHOD_NAME(self,
neighborsToCut = "const FieldList<%(Dimension)s, std::vector<std::vector<int>>>&"):
"""Remove connectivity between neighbors. | [
188,
1939
] |
def METHOD_NAME(now):
docs = [{"lu": now, "k": k, "v": "old", "state": "failed"} for k in range(3)]
docs.extend([{"lu": now, "k": k, "v": "old", "state": "failed"} for k in range(18, 20)])
return docs | [
3368,
1423,
2228,
672
] |
async def METHOD_NAME(self) -> None:
if not OPENCV_AVAILABLE:
logger.error(
"Can't use red eye removal filter if OpenCV and NumPy are not available."
)
return
faces = [
face
for face in self.context.request.focal_points
if face.origin == "Face Detection"
]
if not faces:
return
mode, data = self.engine.image_data_as_rgb()
mode = mode.lower()
size = self.engine.size
image = np.ndarray(
shape=(size[1], size[0], 4 if mode == "rgba" else 3),
dtype="|u1",
buffer=data,
).copy()
for face in faces:
face_x = int(face.x - face.width / 2)
face_y = int(face.y - face.height / 2)
face_image = image[
face_y : face_y + face.height, face_x : face_x + face.width
]
eye_rects = self.cascade.detectMultiScale(
face_image,
scaleFactor=HAAR_SCALE,
minNeighbors=MIN_NEIGHBORS,
minSize=MIN_SIZE,
)
for pos_x, pos_y, width, height in eye_rects:
# Crop the eye region
eye_image = face_image[
pos_y : pos_y + height, pos_x : pos_x + width
]
# split the images into 3 channels
red, green, blue = cv2.split(eye_image)
# Add blue and green channels
blue_green = cv2.add(blue, green)
mean = blue_green // 2
# threshold the mask based on red color and combination of blue and green color
mask = ((red > RED_THRESHOLD * mean) & (red > 60)).astype(
np.uint8
) * 255
# Some extra region may also get detected , we find the largest region
# find all contours
contours_return = cv2.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
) # It return contours and Hierarchy
if len(contours_return) == 2:
contours, _ = contours_return
else:
_, contours, _ = contours_return
# find contour with max area
max_area = 0
max_cont = None
for cont in contours:
area = cv2.contourArea(cont)
if area > max_area:
max_area = area
max_cont = cont
if max_cont is None:
continue
mask = mask * 0 # Reset the mask image to complete black image
# draw the biggest contour on mask
cv2.drawContours(mask, [max_cont], 0, (255), -1)
# Close the holes to make a smooth region
mask = cv2.morphologyEx(
mask,
cv2.MORPH_CLOSE,
cv2.getStructuringElement(cv2.MORPH_DILATE, (5, 5)),
)
mask = cv2.dilate(mask, (3, 3), iterations=3)
# The information of only red color is lost,
# So we fill the mean of blue and green color in all
# three channels(BGR) to maintain the texture
# Fill this black mean value to masked image
mean = cv2.bitwise_and(mean, mask) # mask the mean image
mean = cv2.cvtColor(
mean, cv2.COLOR_GRAY2RGB
) # convert mean to 3 channel
mask = cv2.cvtColor(
mask, cv2.COLOR_GRAY2RGB
) # convert mask to 3 channel
eye = (
cv2.bitwise_and(~mask, eye_image) + mean
) # Copy the mean color to masked region to color image
face_image[pos_y : pos_y + height, pos_x : pos_x + width] = eye
self.engine.set_image_data(image.tobytes()) | [
4420,
4421
] |
def METHOD_NAME(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake") | [
298,
171,
2071,
157
] |
def METHOD_NAME():
data = {"key_1": "v1", "key_2": "v2", "key_3": "v3"}
form = ".{}:{}"
keys, serialized_data = serialize_dict(data, form)
assert Counter(keys) == Counter(data.keys())
data_parts = serialized_data.split(".")[1:]
for index, part in enumerate(data_parts):
key, value = part.split(":")
assert key == keys[index]
assert value == data[key] | [
9,
183
] |
def METHOD_NAME(self):
context = command_context.JobRunContext(self.filler)
todays_date = datetime.date.today().strftime("%Y-%m-%d")
assert_equal(context["shortdate"], todays_date) | [
9,
5991,
41,
202,
22,
5181
] |
def METHOD_NAME() -> List[str]:
return ["namesilo.com"] | [
19,
5356
] |
METHOD_NAME(self, *names: str, user=None, shared=None) -> int: | [
954
] |
def METHOD_NAME(url_root):
"""
Show documentation about officeRetrieve
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'office_id',
'value': 'integer', # boolean, integer, long, string
'description': 'The unique internal identifier for this office '
'(either office_id OR office_we_vote_id required -- not both. '
'If it exists, office_id is used instead of office_we_vote_id)',
},
{
'name': 'office_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for this office across all networks '
'(either office_id OR office_we_vote_id required -- not both.) NOTE: In the future we '
'might support other identifiers used in the industry.',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
'office_we_vote_id': 'wv01off922',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "ballot_item_display_name": string,\n' \
' "ballotpedia_id": string,\n' \
' "ballotpedia_office_id": integer,\n' \
' "ballotpedia_office_name": string,\n' \
' "ballotpedia_office_url": string,\n' \
' "ballotpedia_race_id": integer,\n' \
' "ballotpedia_race_office_level": string,\n' \
' "district_name": string,\n' \
' "google_civic_election_id": integer,\n' \
' "id": integer,\n' \
' "kind_of_ballot_item": string (CANDIDATE, MEASURE),\n' \
' "last_updated": string (time in this format %Y-%m-%d %H:%M:%S),\n' \
' "maplight_id": string,\n' \
' "number_voting_for": integer,\n' \
' "number_elected": integer,\n' \
' "ocd_division_id": string,\n' \
' "primary_party": string,\n' \
' "race_office_level": string,\n' \
' "state_code": string,\n' \
' "we_vote_id": string,\n' \
' "wikipedia_id": string,\n' \
'}'
template_values = {
'api_name': 'officeRetrieve',
'api_slug': 'officeRetrieve',
'api_introduction':
"Retrieve detailed information about one office.",
'try_now_link': 'apis_v1:officeRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values | [
1902,
404,
366,
671,
199
] |
def METHOD_NAME(self, mock_glob):
"""
Ensure that if we don't have any go sources the build system is not
suitable
"""
mock_glob.return_value = []
self.assertEqual(self.buildsystem.probe("src"), 0) | [
9,
2570,
654,
1515,
505
] |
def METHOD_NAME(response):
return HelperClient.is_response_contain(response, text="Эта страница доступна только участникам Клуба") | [
137,
1089,
4496
] |
def METHOD_NAME(s: BinaryIO, tag: Tag, isConstruct: bool) -> int:
"""
Unpack contextual tag and return the tag length.
:param s: stream
:param tag: BER tag
:param isConstruct: True if a construct is expected
"""
byte = Uint8.unpack(s.read(1))
if byte != ((Class.BER_CLASS_CTXT | berPC(isConstruct)) | (Tag.BER_TAG_MASK & tag)):
raise ValueError("Unexpected contextual tag")
return readLength(s) | [
203,
17191,
82
] |
def METHOD_NAME(ansible_module):
"""
:Title: Run tps-activity-find help
:Description: Run tps-activity-find help
:Requirement: TPS Server CLI Tests
:Setup:
Use subsystems setup via ansible playbooks
:Steps:
Setup Dogtagpki using ansible playbooks
:Expectedresults:
Dogtagpki should be setup via ansible playbooks
:Automated: Yes
:CaseComponent: \-
"""
activity_help_output = ansible_module.command('pki tps-activity-find --help')
for result in activity_help_output.values():
assert "--help Show help options" in result['stdout']
assert "--size <size> Page size" in result['stdout']
assert "--start <start> Page start" in result['stdout'] | [
9,
14825,
416,
40
] |
async def METHOD_NAME(self, store_id: bytes32) -> Dict[str, Any]:
response = await self.fetch("get_local_root", {"id": store_id.hex()})
return response | [
19,
125,
1563
] |
def METHOD_NAME():
"""
JavaScript specific to gizmo to be placed in the
{% block scripts %} block
"""
return ("tethys_gizmos/js/select_input.js",) | [
19,
16825,
3382
] |
def METHOD_NAME(self, timestamp: Timestamp) -> bool:
now = timestamp.total_wct.seconds
if now - self.last_check >= self.window_size:
self.last_check = now
return True
return False | [
250
] |
def METHOD_NAME(self, comm, dtype):
"""
Setup mesh and tacs assembler for problem we will be testing.
"""
# Overwrite default check values
if dtype == complex:
self.rtol = 5e-8
self.dh = 1e-50
else:
self.rtol = 1e-1
self.dh = 1e-7
# Only check for relative tolerance
self.atol = 1e99
# Create the stiffness object
props = constitutive.MaterialProperties(rho=2570.0, E=70e9, nu=0.3, ys=350e6)
stiff = constitutive.IsoShellConstitutive(props, t=0.1, tNum=0)
# Set up the element transform
ref_axis = np.array([0.0, 1.0, 1.0], dtype=dtype)
transform = elements.ShellRefAxisTransform(ref_axis)
elem = elements.Tri3Shell(transform, stiff)
# Allocate the TACSCreator object
vars_per_node = elem.getVarsPerNode()
creator = TACS.Creator(comm, vars_per_node)
if comm.rank == 0:
num_elems = 2 * nx * ny
num_nodes = (nx + 1) * (ny + 1)
# discretize plate
x = np.linspace(0, Lx, nx + 1, dtype)
y = np.linspace(0, Ly, ny + 1, dtype)
xyz = np.zeros([nx + 1, ny + 1, 3], dtype)
xyz[:, :, 0], xyz[:, :, 1] = np.meshgrid(x, y, indexing="ij")
node_ids = np.arange(num_nodes).reshape(nx + 1, ny + 1)
# Set connectivity for each element
conn = []
for i in range(nx):
for j in range(ny):
conn.append(
[node_ids[i, j], node_ids[i + 1, j], node_ids[i + 1, j + 1]]
)
conn.append(
[node_ids[i + 1, j + 1], node_ids[i, j + 1], node_ids[i, j]]
)
conn = np.array(conn, dtype=np.intc).flatten()
ptr = np.arange(0, 3 * num_elems + 1, 3, dtype=np.intc)
comp_ids = np.zeros(num_elems, dtype=np.intc)
creator.setGlobalConnectivity(num_nodes, ptr, conn, comp_ids)
# Set up the boundary conditions (fixed at left hand edge)
bcnodes = np.array(node_ids[0, :], dtype=np.intc)
creator.setBoundaryConditions(bcnodes)
# Set the node locations
creator.setNodes(xyz.flatten())
# Set the elements for each (only one) component
element_list = [elem]
creator.setElements(element_list)
# Create the tacs assembler object
assembler = creator.createTACS()
# Get number of elements on this processor
local_num_elems = assembler.getNumElements()
# Create object to hold pressures
aux_elems = TACS.AuxElements()
# Add gravity load to all elements
grav = elem.createElementInertialForce(g)
for elem_id in range(local_num_elems):
aux_elems.addElement(elem_id, grav)
# Set tractions in assembler
assembler.setAuxElements(aux_elems)
return assembler | [
102,
9096
] |
def METHOD_NAME(self, service_name, username):
return self.items[service_name][username] | [
19,
2897
] |
def METHOD_NAME(self, o, expr, multiindex):
o = self.reuse_if_untouched(o, expr, multiindex)
self.nodetype[o] = self.nodetype[expr]
return o | [
3460
] |
def METHOD_NAME(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code] | [
188,
2916
] |
def METHOD_NAME(self, system_frame):
m = system_frame
add_costing(m)
m.fs.costing.initialize()
results = solve(m)
assert_optimal_termination(results)
# check costing
assert value(m.fs.costing.LCOT) == pytest.approx(3.224725, rel=1e-3)
assert value(m.fs.costing.LCOW) == pytest.approx(3.224725, rel=1e-3) | [
9,
1431
] |
def METHOD_NAME(self):
spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration')
# Only executable processes should be started
self.assertIn('process_buddy', subprocesses)
self.assertNotIn('random_person_process', subprocesses)
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.get_tasks_from_spec_name('Start')[0]
# Set up some data to be evaluated so that the workflow can proceed
start.data['lover_name'] = 'Peggy'
self.workflow.do_engine_steps()
# Call activities should be created for executable processes and be reachable
buddy = self.workflow.get_tasks_from_spec_name('process_buddy')[0]
self.assertIsInstance(buddy.task_spec, CallActivity)
self.assertEqual(buddy.task_spec.spec, 'process_buddy')
self.assertEqual(buddy.state, TaskState.WAITING) | [
9,
11284
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
async def METHOD_NAME(self, ctx):
"""Manage mee6 conversions.""" | [
-1
] |
def METHOD_NAME(self):
bootstrap = Bootstrap(ztps_default_config=True)
config = random_string()
url = "http://{}/{}".format(bootstrap.server, config)
bootstrap.ztps.set_definition_response(
actions=[{"action": "test_action", "attributes": {"url": url}}]
)
bootstrap.ztps.set_action_response("test_action", get_action("replace_config"))
contents = random_string()
bootstrap.ztps.set_file_response(config, contents)
bootstrap.start_test()
try:
self.assertTrue(os.path.isfile(bootstrap.startup_config))
self.assertTrue(contents.split() == file_log(bootstrap.startup_config))
self.assertTrue(bootstrap.success())
except AssertionError as assertion:
print("Output: {}".format(bootstrap.output))
print("Error: {}".format(bootstrap.error))
raise_exception(assertion)
finally:
bootstrap.end_test() | [
9,
274,
1434
] |
def METHOD_NAME(self):
"""Take the parsed stats from the sargasso report and add them to the
basic stats table at the top of the report"""
headers = OrderedDict()
headers["sargasso_percent_assigned"] = {
"title": "% Assigned",
"description": "Sargasso % Assigned reads",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "RdYlGn",
}
headers["Assigned-Reads"] = {
"title": "{} Assigned".format(config.read_count_prefix),
"description": "Sargasso Assigned reads ({})".format(config.read_count_desc),
"min": 0,
"scale": "PuBu",
"modify": lambda x: float(x) * config.read_count_multiplier,
"shared_key": "read_count",
}
self.general_stats_addcols(self.sargasso_data, headers) | [
11511,
577,
410
] |
def METHOD_NAME(db, collection):
del collection['meta']
collection = Collection().save(collection)
assert 'meta' not in collection
yield collection | [
2228,
1098
] |
def METHOD_NAME(self):
input_dominoes = [(1, 2), (2, 3), (3, 1), (4, 5), (5, 6), (6, 4)]
output_chain = can_chain(input_dominoes)
self.refute_correct_chain(input_dominoes, output_chain) | [
9,
2018,
2756,
-1,
5359
] |
def METHOD_NAME(s3_seekable_obj, s3_client_stub):
s3_client_stub.add_response("head_object", {"ContentLength": 1000})
assert s3_seekable_obj.seek(0, io.SEEK_END) == 1000
assert s3_seekable_obj.tell() == 1000 | [
9,
336,
1798
] |
def METHOD_NAME(self) -> float:
if not self._pending_claims:
return 0.0
now = time.time()
return min([0, *[ready_at - now for ready_at in self._pending_timeouts.values()]]) | [
3223,
24,
243,
1359,
2556,
250
] |
def METHOD_NAME(sock, timeout=None):
"""Waits for writing to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, write=True, timeout=timeout) | [
618,
43,
77
] |
def METHOD_NAME(self, command: Command) -> None:
"""Sets the id for the pipette"""
self._pipette_id.update(self._mount_strings(command)) | [
0,
5302,
147
] |
def METHOD_NAME(param_kwargs):
"""Remove 'data' and 'cmd' keys, if present."""
arguments = param_kwargs
arguments.pop("data", None)
arguments.pop("cmd", None)
return arguments | [
1702,
1475
] |
def METHOD_NAME(self) -> _TypeCode: ... | [
-1
] |
def METHOD_NAME(self, feat):
"""Encode feature in `beam_search` (optional).
Args:
x (numpy.ndarray): input feature (T, D)
Returns:
paddle.Tensor: encoded feature (T, D)
"""
raise NotImplementedError("encode method is not implemented") | [
421
] |
def METHOD_NAME(self):
self.pre_operations()
self.AvailableEndpointServicesList(ctx=self.ctx)()
self.post_operations() | [
750,
710
] |
def METHOD_NAME(items):
ret = {}
for item in items:
ret.update(eval(item))
return ret | [
411,
553
] |
def METHOD_NAME(self):
fields_string = api.portal.get_registry_record(
'plone.request_access_form_accepted_fields',
default='',
)
fields = [
field.strip()
for field in fields_string.splitlines()
if len(field.strip()) > 0
]
if len(fields) == 0:
return self.default_fields
return fields | [
3242,
342
] |
def METHOD_NAME(signum: int, frame: typing.Optional[types.FrameType]) -> None:
# The loop may or may not be running, depending on the state of the application when this occurs.
# Signals on POSIX only occur on the main thread usually, too, so we need to ensure this is
# threadsafe.
# We log native thread IDs purely for debugging purposes.
if _LOGGER.isEnabledFor(ux.TRACE):
_LOGGER.log(
ux.TRACE,
"interrupt %s occurred on thread %s, process on thread %s will be notified shortly\n"
"Stacktrace for developer sanity:\n%s",
signum,
threading.get_native_id(),
loop_thread_id,
"".join(traceback.format_stack(frame)),
)
loop.call_soon_threadsafe(_raise_interrupt, signum) | [
1519
] |
def METHOD_NAME(index, character):
if not raise_on_unexpected: # not raising, so we dump the buffer into output and append this character
output.extend(multibyte_buffer)
multibyte_buffer.clear()
output.append(character)
nonlocal is_in_multibyte
is_in_multibyte = False
nonlocal multibytes_expected
multibytes_expected = 0
else:
raise ValueError(f"Expected multibyte continuation at index: {index}") | [
276,
1068,
365
] |
def METHOD_NAME(self):
all_scales = []
for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \
zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales):
all_scales.append(self.merge_layer_scales([qkv_scale, dense_scale, mh4h_scale, m4hh_scale]))
return torch.cat(all_scales) | [
411,
9005
] |
def METHOD_NAME(self):
status, out = self._run_command([self.command, 'fetch'])
if status != 0:
raise RepositoryException("cannot fetch {}: {}".format(self, out)) | [
1047
] |
f METHOD_NAME(self, env_var_value, flag_value, expect_seg_fault): | [
22,
61,
1162
] |
def METHOD_NAME(self):
super().METHOD_NAME()
self.data_source = get_sample_data_source()
self.data_source.domain = self.domain
self.data_source.save()
self.report = get_sample_report_config()
self.report.config_id = self.data_source.get_id
self.report.domain = self.domain
self.report.save() | [
0,
1
] |
def METHOD_NAME(self, uf2block, expected_chip_id):
flags = mkuf2.UF2Writer.UF2_FLAG_FAMILYID_PRESENT | mkuf2.UF2Writer.UF2_FLAG_MD5_PRESENT
parsed_binaries = []
block_list = [] # collect block numbers here
total_blocks = set() # collect total block numbers here
for block in UF2BlockReader(uf2block).get():
if block.blockNo == 0:
# new file has been detected
base_addr = block.targetAddr
current_addr = base_addr
binary_writer = BinaryWriter(self.generate_binary(0))
self.assertEqual(len(block), mkuf2.UF2Writer.UF2_BLOCK_SIZE)
self.assertEqual(block.magicStart0, mkuf2.UF2Writer.UF2_FIRST_MAGIC)
self.assertEqual(block.magicStart1, mkuf2.UF2Writer.UF2_SECOND_MAGIC)
self.assertEqual(block.flags & flags, flags)
self.assertEqual(len(block.data), mkuf2.UF2Writer.UF2_DATA_SIZE)
payload = block.data[:block.payloadSize]
md5_obj = hashlib.md5(payload)
md5_part = block.data[block.payloadSize:block.payloadSize + mkuf2.UF2Writer.UF2_MD5_PART_SIZE]
address, length = struct.unpack('<II', md5_part[:-md5_obj.digest_size])
md5sum = md5_part[-md5_obj.digest_size:]
self.assertEqual(address, block.targetAddr)
self.assertEqual(length, block.payloadSize)
self.assertEqual(md5sum, md5_obj.digest())
self.assertEqual(block.familyID, expected_chip_id)
self.assertEqual(block.magicEnd, mkuf2.UF2Writer.UF2_FINAL_MAGIC)
self.assertEqual(current_addr, block.targetAddr)
binary_writer.append(payload)
block_list.append(block.blockNo)
total_blocks.add(block.numBlocks)
if block.blockNo == block.numBlocks - 1:
self.assertEqual(block_list, list(range(block.numBlocks)))
# we have found all blocks and in the right order
self.assertEqual(total_blocks, {block.numBlocks}) # numBlocks are the same in all the blocks
del block_list[:]
total_blocks.clear()
parsed_binaries += [(base_addr, binary_writer.f_name)]
current_addr += block.payloadSize
return parsed_binaries | [
356,
37
] |
def METHOD_NAME(self, mock_request, mock_session):
session = ncclient.transport.SSHSession(self.device_handler)
obj = Unlock(session, self.device_handler, raise_mode=RaiseMode.ALL)
obj.request(target="running")
node = new_ele("unlock")
sub_ele(sub_ele(node, "target"), "running")
xml = ElementTree.tostring(node)
call = mock_request.call_args_list[0][0][0]
call = ElementTree.tostring(call)
self.assertEqual(call, xml) | [
9,
2671
] |
def METHOD_NAME(self, func):
arr = self.run_in_threads(func, n_threads=4)
distinct = set(arr)
self.assertEqual(len(distinct), 1, distinct) | [
250,
4955,
10426
] |
def METHOD_NAME(self):
raise NotImplementedError() | [
19,
734,
797
] |
def METHOD_NAME(self):
self.login_as(user=self.user)
response = self.client.post(self.url, format="json")
assert response.status_code == 201
assert ApiToken.objects.get(token=response.data["token"]) | [
9,
129,
466
] |
def METHOD_NAME(v, f, all_tris, epsilon, safe_check):
for vertices, polygons in zip(*C([v, f])):
yield bvh_tree_from_polygons(vertices, polygons, all_triangles=all_tris, epsilon=epsilon, safe_check=safe_check) | [
-1,
24,
-1,
50
] |
def METHOD_NAME(cfg, box_coder):
matcher = Matcher(
cfg.MODEL.RPN.FG_IOU_THRESHOLD,
cfg.MODEL.RPN.BG_IOU_THRESHOLD,
allow_low_quality_matches=True,
)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION
)
loss_evaluator = RPNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
generate_rpn_labels
)
return loss_evaluator | [
93,
4464,
1572,
8539
] |
def METHOD_NAME(f):
"""Return decorator swallowing the contest name if in multi contest mode.
"""
@wraps(f)
def wrapped_f(self, *args):
if self.is_multi_contest():
# Swallow the first argument (the contest name).
f(self, *(args[1:]))
else:
# Otherwise, just forward all arguments.
f(self, *args)
return wrapped_f | [
457,
8807
] |
def METHOD_NAME(self):
"""Test that we can create a fileset aggregation from a folder that contains one file and delete the
aggregation through the api"""
self.create_composite_resource()
new_folder = 'fileset_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the txt file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.generic_file, upload_folder=new_folder)
# there should be one resource file
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# file has a folder
self.assertEqual(res_file.file_folder, new_folder)
# check that the resource file is not part of an aggregation
self.assertEqual(res_file.has_logical_file, False)
self.assertEqual(FileSetLogicalFile.objects.count(), 0)
# set folder to fileset logical file type (aggregation)
set_type_url = reverse('set_file_type_public', kwargs={"pk": self.composite_resource.short_id,
"file_path": "",
"hs_file_type": "FileSet"})
self.client.post(set_type_url, data={"folder_path": new_folder})
res_file = self.composite_resource.files.first()
# file has the same folder
self.assertEqual(res_file.file_folder, new_folder)
self.assertEqual(res_file.logical_file_type_name, self.logical_file_type_name)
self.assertEqual(FileSetLogicalFile.objects.count(), 1)
# aggregation dataset name should be same as the folder name
self.assertEqual(res_file.logical_file.dataset_name, new_folder)
delete_agg_url = reverse('delete_aggregation_public', kwargs={"resource_id": self.composite_resource.short_id,
"file_path": new_folder,
"hs_file_type": "FileSetLogicalFile"})
self.client.delete(delete_agg_url)
self.assertEqual(FileSetLogicalFile.objects.count(), 0)
self.assertEqual(self.composite_resource.files.all().count(), 0)
self.composite_resource.delete() | [
9,
5081,
129,
34
] |
def METHOD_NAME(self, captchaType, url, siteKey, captchaParams):
taskID = None
if not captchaParams.get('clientKey'):
raise CaptchaParameter(
"CapMonster: Missing clientKey parameter."
)
self.clientKey = captchaParams.get('clientKey')
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
hostParsed = urlparse(captchaParams.get('proxy', {}).get('https'))
if not hostParsed.scheme:
raise CaptchaParameter('Cannot parse proxy correctly, bad scheme')
if not hostParsed.netloc:
raise CaptchaParameter('Cannot parse proxy correctly, bad netloc')
ports = {
'http': 80,
'https': 443
}
self.proxy = {
'proxyType': hostParsed.scheme,
'proxyAddress': hostParsed.hostname,
'proxyPort': hostParsed.port if hostParsed.port else ports[self.proxy['proxyType']],
'proxyLogin': hostParsed.username,
'proxyPassword': hostParsed.password,
}
else:
self.proxy = None
try:
taskID = self.requestSolve(captchaType, url, siteKey)
return self.requestJob(taskID)
except polling2.TimeoutException:
try:
if taskID:
self.reportJob(taskID)
except polling2.TimeoutException:
raise CaptchaTimeout(
"CapMonster: Captcha solve took to long and also failed "
f"reporting the task with task id {taskID}."
)
raise CaptchaTimeout(
"CapMonster: Captcha solve took to long to execute "
f"task id {taskID}, aborting."
) | [
19,
2244,
3485
] |
def METHOD_NAME(self, name):
if hasattr(self, normalize(name)):
raise AssertionError("Attribute '%s' should not be set" % name) | [
250,
309,
130,
0
] |
def METHOD_NAME(batch: Dict[str, Any], batch_idx: int) -> int:
values = batch["values"]
batches = batch["batches"]
num_values = len(values)
num_batch_indexes = len(batches)
num_timestamps = len(batch["timestamps"])
if num_values != num_batch_indexes or num_batch_indexes != num_timestamps:
pytest.fail(
f"mismatched slices: not ({num_values} == {num_batch_indexes} == {num_timestamps})"
)
if not any(values):
pytest.fail(f"received bad batch, something went wrong: {batch}")
if batches[0] != batch_idx:
pytest.fail(
f"batch did not start at correct batch, {batches[0]} != {batch_idx}: {batch}"
)
# Check batches are monotonic with no gaps.
if not all(x + 1 == y for x, y in zip(batches, batches[1:])):
pytest.fail(f"skips in batches sampled: {batch}")
# 10 is just a threshold at which it would be really strange for a batch to be monotonic.
if accumulated and len(values) > 10 and all(x < y for x, y in zip(values, values[1:])):
pytest.fail(
f"per batch accumulated metric was monotonic, which is really fishy: {batch}"
)
return int(batches[-1]) + 1 | [
187,
845,
2277
] |
def METHOD_NAME(self):
if self._token is None and self.user and self._password:
self._token = self.__oidc.token(username=self.user, password=self._password)
self._token['time'] = time.time()
elif not self._token or not ('expires_in' in self._token and 'time' in self._token):
# cannot refresh
return
elif self._token['expires_in'] < int(time.time()) - self._token['time'] + 10:
try:
self._token = self.__oidc.refresh_token(self._token['refresh_token'])
self._token['time'] = time.time()
except Exception:
self._token = self.__oidc.token(username=self.user, password=self._password)
self._token['time'] = time.time() | [
19,
1089,
466,
280,
14710
] |
def METHOD_NAME(self, params, headers):
"""
Update URL parameters with values from self.gke_params.
@inherits: :class:`GoogleBaseConnection.pre_connect_hook`
"""
params, headers = super().METHOD_NAME(params, headers)
if self.gke_params:
params.update(self.gke_params)
return params, headers | [
709,
707,
1021
] |
def METHOD_NAME(verbose=None):
from test import test_cmd
test_support.run_doctest(test_cmd, verbose)
test_support.run_unittest(TestAlternateInput) | [
9,
57
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind") | [
1253
] |
def METHOD_NAME(features, out_features, out_bp, indice_pairs,
indice_pair_num):
if features.dtype == torch.float32 or features.dtype == torch.half:
return ext_module.METHOD_NAME(features, out_features,
out_bp, indice_pairs,
indice_pair_num)
else:
raise NotImplementedError | [
15336,
7371,
2955
] |
def METHOD_NAME(num_dense_features, example):
"""Parser function for pre-processed Criteo TSV records."""
label_defaults = [[0.0]]
int_defaults = [[0.0] for _ in range(num_dense_features)]
categorical_defaults = [['00000000'] for _ in range(len(_VOCAB_SIZES))]
record_defaults = label_defaults + int_defaults + categorical_defaults
fields = tf.io.decode_csv(
example, record_defaults, field_delim='\t', na_value='-1')
num_labels = 1
features = {}
features['targets'] = tf.reshape(fields[0], (-1,))
int_features = []
for idx in range(num_dense_features):
positive_val = tf.nn.relu(fields[idx + num_labels])
int_features.append(tf.math.log(positive_val + 1))
int_features = tf.stack(int_features, axis=1)
cat_features = []
for idx in range(len(_VOCAB_SIZES)):
field = fields[idx + num_dense_features + num_labels]
# We append the column index to the string to make the same id in different
# columns unique.
cat_features.append(
tf.strings.to_hash_bucket_fast(field + str(idx), _VOCAB_SIZES[idx]))
cat_features = tf.cast(
tf.stack(cat_features, axis=1), dtype=int_features.dtype)
features['inputs'] = tf.concat([int_features, cat_features], axis=1)
return features | [
214,
1441,
667
] |
f METHOD_NAME(self): | [
9,
129,
280,
171,
7909,
41,
1205
] |
f METHOD_NAME(self,inputstr,typefunc): | [
19,
15605,
44
] |
def METHOD_NAME(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
hp_names = self._problem._hp_space._space.get_hyperparameter_names()
for hp_name in hp_names:
if hp_name == "loss":
res["loss"] = job.config["loss"]
else:
res[hp_name] = job.config["hyperparameters"][hp_name]
return res | [
2973,
219
] |
def METHOD_NAME(self):
return str(date.today().year) | [
19,
235,
339,
1201,
842
] |
def METHOD_NAME(self, path):
if os.path.isdir(path):
tools.rmdir(path)
else:
os.remove(path) | [
188
] |
def METHOD_NAME(self, container, representation):
self.update(container, representation) | [
705
] |
def METHOD_NAME(self, serial_port_to_save):
# Can be optionally overridden to save a serial port derived from a udev serial number to a cache
self.serial_port = serial_port_to_save
# upstream_config.serial_port = serial_port_to_save
# upstream_config.save() | [
73,
4364,
237
] |
def METHOD_NAME(self):
backfills = [TEST_BACKFILL_2, TEST_BACKFILL_1]
validate_entries_are_sorted(backfills) | [
9,
109,
1389
] |
def METHOD_NAME(cls) -> bool:
"""Check if anything is selected at all."""
return bool(cls.__selection) | [
220,
3115
] |
def METHOD_NAME( placement_verilog_d, concrete_name, nets_d, *, skip_globals=False):
hpwl_top_down = calculate_HPWL_from_placement_verilog_d_top_down( placement_verilog_d, concrete_name, nets_d, skip_globals=skip_globals)
hpwl_bottom_up = calculate_HPWL_from_placement_verilog_d_bottom_up( placement_verilog_d, concrete_name, skip_globals=skip_globals)
if hpwl_top_down != hpwl_bottom_up:
logger.warning( f'HPWL calculated in different ways differ: top_down: {hpwl_top_down} bottom_up: {hpwl_bottom_up}')
return hpwl_top_down | [
1593,
13674,
741,
280,
4764,
7149,
227
] |
def METHOD_NAME(self, mock_import_module):
self.sat = Sat()
self.solver = MagicMock()
self.transaction = Mock()
self.transaction.newpackages = Mock(
return_value=[Mock()]
)
self.selection = Mock()
self.solver.transaction = Mock(
return_value=self.transaction
)
self.sat.pool.Solver = Mock(
return_value=self.solver
)
self.sat.pool.select = Mock(
return_value=self.selection
)
mock_import_module.assert_called_once_with('solv')
self.solv = mock_import_module.return_value
self.sat.pool.setarch.assert_called_once_with()
self.sat.pool.setarch.reset_mock() | [
102
] |
def METHOD_NAME(address: str) -> Optional[BTCAddress]:
"""
Converts a legacy BCH address to CashAddr format.
Code is taken from:
https://github.com/oskyk/cashaddress/blob/master/cashaddress/convert.py#L46
Returns None if an error occured during conversion.
"""
try:
is_valid = is_valid_bitcoin_cash_address(address)
if not is_valid:
return None
if address.startswith(_PREFIX) is False:
address = _PREFIX + ':' + address
_, base32string = address.split(':')
decoded_string = _b32decode(base32string)
converted_bits = bech32.convertbits(decoded_string, 5, 8)
if converted_bits is None:
return None
version = _address_type('cash', converted_bits[0])[0]
legacy_version = _address_type('legacy', version)[1]
payload = converted_bits[1:-6]
return BTCAddress(b58encode_check(
_code_list_to_string([legacy_version] + payload),
).decode())
except ValueError:
return None | [
3513,
24,
3116,
85
] |
def METHOD_NAME(self):
"""The default options for Multiverse USD extraction."""
return {
"writeAll": False,
"writeTransforms": False,
"writeVisibility": False,
"writeAttributes": True,
"writeMaterials": True,
"writeVariants": False,
"writeVariantsDefinition": False,
"writeActiveState": False,
"writeNamespaces": True,
"numTimeSamples": 1,
"timeSamplesSpan": 0.0
} | [
235,
1881
] |
f METHOD_NAME(self, smoothing_coefficient, expected_actions): | [
9,
3647,
1116
] |
def METHOD_NAME():
package1 = Requirement('test-package[ext1]>=1.1.1; extra == "group1"')
assert format_requirement_string(package1) == 'Missing test-package[ext1]>=1.1.1; extra == "group1"'
package2 = Requirement('test-package>=1.1.1; extra == "group1" or extra == "group2" or extra == "group3"')
assert format_requirement_string(
package2) == 'Missing test-package>=1.1.1; extra == "group1" or "group2" or "group3"'
package3 = Requirement('test-package>=1.1.1')
assert format_requirement_string(package3) == 'Missing test-package>=1.1.1' | [
9,
275,
6577,
144
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.