text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
node = Dataset(source=self.source, time_key="day", data_key=["data", "other"])
out = node.eval(node.coordinates.transpose("time", "lat", "lon"))
assert out.dims == ("time", "lat", "lon", "output")
np.testing.assert_array_equal(out["output"], ["data", "other"])
np.testing.assert_array_equal(out.sel(output="data"), self.data)
np.testing.assert_array_equal(out.sel(output="other"), self.other)
node.close_dataset()
# single
node = Dataset(source=self.source, time_key="day", data_key=["other"])
out = node.eval(node.coordinates.transpose("time", "lat", "lon"))
assert out.dims == ("time", "lat", "lon", "output")
np.testing.assert_array_equal(out["output"], ["other"])
np.testing.assert_array_equal(out.sel(output="other"), self.other)
node.close_dataset()
# alternate output names
node = Dataset(source=self.source, time_key="day", data_key=["data", "other"], outputs=["a", "b"])
out = node.eval(node.coordinates.transpose("time", "lat", "lon"))
assert out.dims == ("time", "lat", "lon", "output")
np.testing.assert_array_equal(out["output"], ["a", "b"])
np.testing.assert_array_equal(out.sel(output="a"), self.data)
np.testing.assert_array_equal(out.sel(output="b"), self.other)
node.close_dataset()
# default
node = Dataset(source=self.source, time_key="day")
out = node.eval(node.coordinates.transpose("time", "lat", "lon"))
assert out.dims == ("time", "lat", "lon", "output")
np.testing.assert_array_equal(out["output"], ["data", "other"])
np.testing.assert_array_equal(out.sel(output="data"), self.data)
np.testing.assert_array_equal(out.sel(output="other"), self.other)
node.close_dataset() | [
9,
19,
365,
107
] |
def METHOD_NAME(self) -> timedelta:
tot = self.tot
travel_time = self.travel_time_between_waypoints(
self.layout.ingress, self.layout.drop_off
)
return tot - travel_time | [
10366,
104
] |
def METHOD_NAME(deployment: "Deployment") -> Dict[str, str]:
labels = {
"prefect.io/deployment-name": deployment.name,
}
if deployment.updated is not None:
labels["prefect.io/deployment-updated"] = deployment.updated.in_timezone(
"utc"
).to_iso8601_string()
return labels | [
414,
1503,
415
] |
def METHOD_NAME(self) -> dict:
"""Accessor for the JSON record value generated."""
return {
**{
prop: getattr(self, f"_{prop}").ser
for prop in (
"queries_msg",
"disclosures",
)
if getattr(self, prop) is not None
},
} | [
148,
99
] |
def METHOD_NAME(self, index: Optional[int]) -> None:
r"""
Args:
index: Value of default index. This index will be returned when OOV token is queried.
"""
self.vocab.METHOD_NAME(index) | [
0,
235,
724
] |
def METHOD_NAME(self, params):
for key, value in params.items():
if isinstance(value, list) or isinstance(value, tuple):
for i, subval in enumerate(value):
if isinstance(subval, dict):
subdict = self._encode_nested_dict("%s[%d]" % (key, i), subval)
yield from self.METHOD_NAME(subdict)
else:
yield ("%s[%d]" % (key, i), subval)
elif isinstance(value, dict):
subdict = self._encode_nested_dict(key, value)
yield from self.METHOD_NAME(subdict)
elif isinstance(value, datetime.datetime):
yield (key, self._encode_datetime(value))
else:
yield (key, value) | [
58,
421
] |
def METHOD_NAME(log_file_name):
# open log file
if os.path.exists(log_file_name):
os.remove(log_file_name)
with open(log_file_name, 'w') as logfile:
machine_name = "None"
if "-machine" in sys.argv:
machine_name = sys.argv[sys.argv.index("-machine") + 1]
else:
logfile.write("\n**** Error, machine name argument required (-machine my_machine_name)\n")
result = 1
# remove old output files, if any
files_to_remove = glob.glob(base_name+".e*")
files_to_remove.append('*.out')
files_to_remove.append('*.nem')
files_to_remove.append('*.pex')
for file in os.listdir(os.getcwd()):
if file in files_to_remove:
os.remove(file)
# run Peridigm
command = ["mpiexec", "-np", "4", "../../../src/Peridigm", base_name+".yaml"]
p = Popen(command, stdout=PIPE)
return_code = p.wait()
if return_code != 0:
result = return_code
out, err = p.communicate()
if out != None:
out = out.decode()
logfile.write(str(out))
if err != None:
err = err.decode()
logfile.write(str(err))
logfile.flush()
# concatenate output files
command = ["../../../scripts/epu", "-p", "4", base_name]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
# compare performance statistics against gold statistics
# performance data for current run
stdout_vals = out.split()
wallclock_time_string = ""
for i in range(len(stdout_vals)):
if stdout_vals[i] == "Total":
wallclock_time_string = stdout_vals[i+2]
wallclock_time = 0.0
try:
wallclock_time = float(wallclock_time_string)
except ValueError:
print("Error converting wallclock time string to a floating-point value")
# gold standard performance data for this machine
perf_gold_file = open(base_name+".perf")
buff = read_line(perf_gold_file)
gold_perf_data = []
while buff != None:
vals = buff.split()
if machine_name in vals:
gold_perf_data = vals
buff = read_line(perf_gold_file)
if gold_perf_data == []:
logfile.write("\n**** Error, reference (gold) performance data not found for machine " + machine_name + "\n")
sys.exit(1)
gold_num_proc = int(gold_perf_data[1])
gold_wallclock_time = float(gold_perf_data[2])
gold_wallclock_time_tolerance = float(gold_perf_data[3])
if(wallclock_time > gold_wallclock_time + gold_wallclock_time_tolerance):
result = 1
logfile.write("\n**** PERFORMANCE TEST FAILED: wallclock time exceeded benchmark value plus tolerance.")
elif(wallclock_time < gold_wallclock_time - gold_wallclock_time_tolerance):
result = 1
logfile.write("\n**** PERFORMANCE TEST FAILED: wallclock time was LESS than benchmark value minus tolerance (code is running TOO FAST!).")
else:
logfile.write("\n**** PERFORMANCE TEST PASSED: wallclock time was within tolerance.")
logfile.write("\n**** wallclock time = " + str(wallclock_time))
logfile.write("\n**** benchmark value = " + str(gold_wallclock_time))
logfile.write("\n**** tolerance = " + str(gold_wallclock_time_tolerance) +"\n")
logfile.flush()
# compare output against gold file only if the gold file is present
gold_file_name = base_name + "_gold.e"
if os.path.exists(gold_file_name):
command = ["../../../scripts/exodiff", \
"-stat", \
"-f", \
base_name+".comp", \
base_name+".e", \
base_name+"_gold.e"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
else:
logfile.write("\n**** Gold file " + gold_file_name + " not found, skipping exodiff.\n\n")
return result | [
407,
9
] |
def METHOD_NAME(self) -> int | None: ... | [
395,
-1
] |
def METHOD_NAME(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev") | [
19,
828,
2794
] |
def METHOD_NAME(self, path, loader):
"""
Return the source files from which to load data,
if the path is a directory - lookup vars file inside
"""
global_vars_paths = self.get_option("paths")
extensions = self.get_option("_valid_extensions")
found_files = []
for g_path in global_vars_paths:
b_opath = os.path.realpath(to_bytes(os.path.join(path, g_path)))
opath = to_text(b_opath)
try:
if not os.path.exists(b_opath):
# file does not exist, skip it
self._display.vvv(f"Path: {opath} does not exist - skipping")
continue
self._display.vvv(f"Adding Path: {opath} to global variables")
if os.path.isdir(b_opath):
self._display.debug(f"\tProcessing dir {opath}")
res = loader._get_dir_vars_files(opath, extensions)
self._display.debug(f"Found variable files {str(res)}")
found_files.extend(res)
else:
found_files.append(b_opath)
except Exception as e:
raise AnsibleParserError(to_native(e)) from e
return found_files | [
416,
1210,
1458
] |
def METHOD_NAME(test_fn):
with pytest.raises(unittest.SkipTest):
test_fn() | [
9,
256,
4081,
450,
241,
79
] |
def METHOD_NAME():
return JSONDeserializer() | [
763,
3815
] |
async def METHOD_NAME(payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
return await prov.embeddings(payload) | [
2465
] |
def METHOD_NAME(self: EasyOCRRunnable, *args: t.Any, **kwargs: t.Any) -> t.Any:
return self.predict_fns[method_name](*args, **kwargs) | [
22
] |
def METHOD_NAME(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0 | [
69,
59,
586
] |
def METHOD_NAME(field, data):
return val * data[ptype, "particle_ones"] | [
18176
] |
def METHOD_NAME(self, t):
return self.t_error(t) | [
791,
10282,
168
] |
def METHOD_NAME():
client.load('mirror')
assert client.get()['status'] == 200, 'init'
body = '0123456789' * 500
(resp, sock) = client.post(
headers={
'Host': 'localhost',
'Connection': 'keep-alive',
},
start=True,
body=body,
read_timeout=1,
)
assert resp['body'] == body, 'keep-alive 1'
body = '0123456789'
resp = client.post(sock=sock, body=body)
assert resp['body'] == body, 'keep-alive 2' | [
9,
1515,
8753,
2829
] |
def METHOD_NAME():
controller = realestate_controller.RealEstateController(queue=None)
controller.print_help() | [
9,
38,
40
] |
async def METHOD_NAME(client, args):
azure_monitor_metric = AzureMonitorMetric("Async ServiceBus Sender")
process_monitor = ProcessMonitor("monitor_sender_stress_async.log", "sender_stress_async")
stress_test = StressTestRunnerAsync(
senders=[client.get_queue_sender(QUEUE_NAME)],
receivers=[],
message_size=args.message_size,
send_batch_size=args.send_batch_size,
duration=timedelta(seconds=args.duration),
azure_monitor_metric=azure_monitor_metric,
process_monitor=process_monitor,
fail_on_exception=False
)
await stress_test.run_async() | [
958,
353
] |
def METHOD_NAME(self):
earthquake = wntr.scenario.Earthquake((40, 55), 5, 10000.0)
data = pd.DataFrame(
columns=["Diameter", "Material", "Topography", "Liquifaction"],
index=["P1", "P2", "P3"],
)
data.loc["P1", :] = ["Small", "ACP", "Narrow valley", "Total"]
data.loc["P2", :] = ["Medium", "PV", "Terrace", "Partial"]
data.loc["P3", :] = ["Large", "SP", "Stiff alluvial", "None"]
C = earthquake.correction_factor(data)
expected = pd.Series(
[1 * 1.2 * 3.2 * 2.4, 0.8 * 1 * 1.5 * 2, 0.5 * 0.3 * 0.4 * 1],
index=["P1", "P2", "P3"],
)
assert_series_equal(C, expected) | [
9,
2451,
397
] |
def METHOD_NAME(self) -> None: ... | [
83,
16548
] |
def METHOD_NAME(pdf_path, password):
pdf_path = RESOURCE_ROOT / pdf_path
reader = PdfReader(pdf_path)
writer = PdfWriter()
if password:
reader.decrypt(password)
page = reader.pages[0]
writer.add_page(page)
op = Transformation().rotate(90).scale(1.2)
page.add_transformation(op)
page.merge_page(page)
op = Transformation().scale(1).translate(tx=1, ty=1)
page.add_transformation(op)
page.merge_page(page)
op = Transformation().rotate(90).scale(1).translate(tx=1, ty=1)
page.add_transformation(op)
page.merge_page(page)
page.add_transformation((1, 0, 0, 0, 0, 0))
page.scale(2, 2)
page.scale_by(0.5)
page.scale_to(100, 100)
page = writer.pages[0]
page.compress_content_streams()
page.extract_text() | [
1174,
829
] |
def METHOD_NAME(self, target):
"""Returns all the location types supported for the given target.
This is a protected method expected to be implemented by a subclass.
Parameters
----------
target : any
The UI target for which supported location types are queried.
Returns
-------
locators_classes : set
Supported locator types for the given target type.
""" | [
19,
1081
] |
def METHOD_NAME(self):
super().METHOD_NAME()
# check handling of epoch vs t0
a = self.create(epoch=10)
b = self.create(t0=10)
utils.assert_quantity_sub_equal(a, b) | [
9,
80
] |
def METHOD_NAME(df_factory):
# Create test databn
x = np.array([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan, np.nan, np.nan])
df = df_factory(x=x)
uniques = df.x.unique(dropnan=True)
assert set(uniques) == set(['', 'A', 'B', -1, 0, 2, None]) | [
9,
2768,
1038
] |
def METHOD_NAME(
account_id: str,
region_name: str,
result: dict,
logical_resource_id: str,
resource: dict,
):
resource["PhysicalResourceId"] = result["QueueUrl"] | [
276,
1571
] |
def METHOD_NAME(self):
return bool(self.__scope == FREE) | [
137,
3712
] |
def METHOD_NAME(self, inputs: Dict[str, torch.Tensor]):
encoder_outputs = {}
for input_feature_name, input_values in inputs.items():
encoder = self.input_features.get(input_feature_name)
encoder_output = encoder(input_values)
encoder_outputs[input_feature_name] = encoder_output[ENCODER_OUTPUT]
return encoder_outputs | [
76
] |
def METHOD_NAME(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError("Got inappropriate size arg: {}".format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.METHOD_NAME((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.METHOD_NAME((ow, oh), interpolation)
else:
return img.METHOD_NAME(size[::-1], interpolation) | [
1128
] |
def METHOD_NAME(self):
AccessLog.objects.all().delete()
response = self.login(is_valid_username=True, is_valid_password=True)
response = self.client.get(reverse("admin:logout"))
self.assertEqual(AccessLog.objects.all().count(), 0)
self.assertContains(response, "Logged out", html=True) | [
9,
1205,
2431,
529,
1434,
390
] |
async def METHOD_NAME(
cls: Type["Transport"],
stream: Stream,
upgrade_request: H11Request | None = None,
) -> "Transport":
ws = WSConnection(ConnectionType.SERVER)
if upgrade_request:
# TODO: Typing issue between h11 and wsproto headers
# (see: https://github.com/python-hyper/wsproto/issues/173)
ws.initiate_upgrade_connection(
headers=upgrade_request.headers, # type: ignore[arg-type]
path=upgrade_request.target,
)
transport = cls(stream, ws)
# Wait for client to init WebSocket handshake
event: Union[str, Event] = "Websocket handshake timeout"
with trio.move_on_after(WEBSOCKET_HANDSHAKE_TIMEOUT):
event = await transport._next_ws_event()
if isinstance(event, Request):
transport.logger.debug("Accepting WebSocket upgrade")
await transport._net_send(AcceptConnection())
return transport
transport.logger.warning("Unexpected event during WebSocket handshake", ws_event=event)
raise TransportError(f"Unexpected event during WebSocket handshake: {event}") | [
176,
43,
163
] |
def METHOD_NAME(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE-b2.ok'))
self.assertRegex(stdout, r'Everything is ok\.')
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK) | [
9,
217,
250,
420,
4248,
1178,
13527
] |
def METHOD_NAME(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens(
[s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,
))
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
)
src_tokens = merge('source')
if samples[0]['target'] is not None:
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {
'id': torch.LongTensor([s['id'] for s in samples]),
'nsentences': len(samples),
'ntokens': sum(len(s['source']) for s in samples),
'net_input': {
'src_tokens': src_tokens,
'src_lengths': torch.LongTensor([
s['source'].numel() for s in samples
]),
},
'target': target,
} | [
3677
] |
async def METHOD_NAME(volume: aiodocker.docker.DockerVolume):
inspected_volume = await volume.show()
async for attempt in AsyncRetrying(reraise=True, wait=wait_fixed(1)):
with attempt:
print(f"<-- deleting volume '{inspected_volume['Name']}'...")
await volume.delete()
print(f"<-- volume '{inspected_volume['Name']}' deleted") | [
618,
43,
2276,
3099
] |
def METHOD_NAME(self):
result = []
for app in cache.get_apps():
model_list = cache.get_models(app)
for model in model_list:
file_fields = [
field.name
for field in model._meta.fields
if field.get_internal_type() == 'FileField'
]
if len(file_fields) > 0:
files = model.objects.all().values_list(*file_fields)
result.extend(
[
split_name(file)[0]
for file in itertools.chain.from_iterable(files)
if file
]
)
return result | [
19,
2002,
1537
] |
def METHOD_NAME(tdtxt, expected):
"""py.test for cell2txt"""
soup = BeautifulSoup(tdtxt, "html.parser")
td = soup.td
result = readhtml.cell2txt(td)
assert result == expected | [
9,
-1
] |
def METHOD_NAME(skills_items: List[str], active_mods):
if ModNames.luck_skill in active_mods:
skills_items.append("Luck Level")
if ModNames.socializing_skill in active_mods:
skills_items.append("Socializing Level")
if ModNames.magic in active_mods:
skills_items.append("Magic Level")
if ModNames.archaeology in active_mods:
skills_items.append("Archaeology Level")
if ModNames.binning_skill in active_mods:
skills_items.append("Binning Level")
if ModNames.cooking_skill in active_mods:
skills_items.append("Cooking Level") | [
1459,
692,
7260,
33
] |
def METHOD_NAME(self) -> Optional[int]:
return self.result_set.METHOD_NAME if self.result_set else None | [
11956
] |
def METHOD_NAME(node: Union[nodes.For, nodes.Comprehension]) -> bool:
"""Return whether the index variable in the for loop/comprehension is ONLY used to index the iterable.
True if unnecessary usage, False otherwise or if index variable not used at all.
"""
index_nodes = []
for assign_name_node in node.target.nodes_of_class((nodes.AssignName, nodes.Name)):
index_nodes.extend(_index_name_nodes(assign_name_node.name, node))
return all(_is_redundant(index_node, node) for index_node in index_nodes) and index_nodes | [
137,
2783,
4379
] |
def METHOD_NAME(self):
if self._machine_log:
STATUS.connect('machine-log-changed',lambda w: self.loadLog())
elif self._integrator_log:
STATUS.connect('periodic', self._periodicCheck) | [
12078,
176
] |
def METHOD_NAME(self, handle, msg, timeout) -> CanalError:
with error_check("Blocking send error"):
return CanalError(self.__m_dllBasic.CanalBlockingSend(handle, msg, timeout)) | [
5999,
353
] |
def METHOD_NAME(self):
"""A boolean indicating that this is a parameter object."""
#
# The semantics of ParamData and parameter are different.
# By default, ParamData are immutable. Hence, we treat the
# parameter objects as non-parameter data ... for now.
#
return False | [
137,
511,
44
] |
async def METHOD_NAME(url: str, session: aiohttp.ClientSession) -> bool:
image_extensions = ("jpg", "png", "jpeg", "gif", "webp")
if not any(url.endswith(ext) for ext in image_extensions):
return False
try:
async with session.get(url=url) as response:
return response.status == 200
except (aiohttp.InvalidURL, aiohttp.ClientConnectorError, asyncio.TimeoutError):
return False
except Exception as e: # skipcq: PYL-W0703
sentry_sdk.capture_exception(e)
return False | [
187,
660,
274
] |
def METHOD_NAME(self, job_path):
with open(job_path, 'r') as job_file:
job = job_file.read()
job_id = self._submit(job)
return job_id | [
579
] |
def METHOD_NAME(self):
data = self.cleaned_data["file"]
try:
zipfile = ZipFile(data)
interesting_files = {
x
for x in zipfile.namelist()
if x.lower()[-4:] in (".shp", ".shx", ".dbf")
}
extensions = sorted([x.lower()[-4:] for x in interesting_files])
if extensions != [".dbf", ".shp", ".shx"]:
raise BadZipFile()
except BadZipFile:
raise forms.ValidationError(
"This is not a zip file, or it doesn't contain exactly one .shp, .shx "
"and .dbf file."
)
return data | [
1356,
171
] |
def METHOD_NAME(self, x):
"""find the lowest empty row for column x"""
# start from the bottom and work up
for y in range(self.height - 1, -1, -1):
if self[x, y] == 0:
return y
raise ValueError("that column is full") | [
320
] |
def METHOD_NAME(*_, **__):
checkpoint() | [
1771
] |
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME() | [
1462
] |
def METHOD_NAME(
self,
early_termination_name: Optional[EarlyTerminationPolicyType],
sampling_algorithm_name: SamplingAlgorithmType,
) -> None:
expected_nlp_sweep_settings_obj = self._get_entity_obj(early_termination_name, sampling_algorithm_name)
rest_sweep_settings_obj = expected_nlp_sweep_settings_obj._to_rest_object()
round_trip_nlp_sweep_settings_obj = NlpSweepSettings._from_rest_object(rest_sweep_settings_obj)
assert (
round_trip_nlp_sweep_settings_obj == expected_nlp_sweep_settings_obj
), f"expected: {expected_nlp_sweep_settings_obj}, actual: {round_trip_nlp_sweep_settings_obj}" | [
9,
7138,
981,
817,
3834,
3835
] |
def METHOD_NAME():
dev = square_virtual_device(control_r=1.0, num_qubits=2)
with pytest.raises(ValueError, match="Qubit not part of the device."):
dev.distance(TwoDQubit(0, 0), TwoDQubit(2, 2))
assert np.isclose(dev.distance(TwoDQubit(0, 0), TwoDQubit(1, 0)), 1.0)
dev = PasqalVirtualDevice(control_radius=1.0, qubits=[cirq.LineQubit(0), cirq.LineQubit(1)])
assert np.isclose(dev.distance(cirq.LineQubit(0), cirq.LineQubit(1)), 1.0)
dev = PasqalVirtualDevice(
control_radius=1.0, qubits=[cirq.GridQubit(0, 0), cirq.GridQubit(1, 0)]
)
assert np.isclose(dev.distance(cirq.GridQubit(0, 0), cirq.GridQubit(1, 0)), 1.0) | [
9,
1886
] |
def METHOD_NAME(v: str):
assert v == "shh", f'expected args.bar.tags["b"] to equal "shh" but got: "{v}"' | [
187,
1484
] |
def METHOD_NAME(self):
match = (self.video.vendor_id, self.video.model_id)
if match == (0x046d, 0x082d):
return ("mid", [
("low", "video/x-h264,width=640,height=360,framerate=5/1"),
("mid", "video/x-h264,width=1280,height=720,framerate=15/2"),
("high", "video/x-h264,width=1920,height=1080,framerate=10/1"),
])
if match == (0x046d, 0x0892):
return ("mid", [
("low", "image/jpeg,width=640,height=360,framerate=5/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=15/2"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x046d, 0x08e5): # Logitech HD Pro Webcam C920
return ("mid", [
("low", "image/jpeg,width=640,height=360,framerate=5/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=15/2"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x1224, 0x2825): # LogiLink UA0371
return ("mid", [
("low", "image/jpeg,width=640,height=480,framerate=30/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=30/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=30/1"),
])
if match == (0x05a3, 0x9331): # WansView Webcam 102
return ("mid", [
("low","video/x-h264,width=640,height=360,framerate=30/1"),
("mid","video/x-h264,width=1280,height=720,framerate=30/1"),
("high","video/x-h264,width=1920,height=1080,framerate=30/1"),
])
if match == (0x534d, 0x2109): # MacroSilicon
return ("mid", [
("low", "image/jpeg,width=720,height=480,framerate=10/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=10/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x1d6c, 0x0103): # HD 2MP WEBCAM
return ("mid", [
("low", "video/x-h264,width=640,height=480,framerate=25/1"),
("mid", "video/x-h264,width=1280,height=720,framerate=25/1"),
("high", "video/x-h264,width=1920,height=1080,framerate=25/1"),
])
if match == (0x0c45, 0x636d): # AUKEY PC-LM1E
return ("mid", [
("low", "image/jpeg,width=640,height=480,pixel-aspect-ratio=1/1,framerate=30/1"),
("mid", "image/jpeg,width=864,height=480,pixel-aspect-ratio=1/1,framerate=30/1"),
("high", "image/jpeg,width=1280,height=1024,pixel-aspect-ratio=1/1,framerate=30/1"),
])
self.logger.warning(
"Unkown USB video device {:04x}:{:04x}, using fallback pipeline."
.format(*match))
return ("mid", [
("low", "image/jpeg,width=640,height=480,framerate=30/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=30/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=30/1"),
]) | [
19,
2163
] |
def METHOD_NAME(worker_id):
np.random.seed(worker_id)
imgaug.seed(worker_id) | [
235,
1794,
176,
667
] |
def METHOD_NAME(values: List[str], char: str = '"') -> List[str]:
result: List[str] = []
for item in values:
result.append(item.strip(char))
return result | [
1360,
3874,
280,
245
] |
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self) -> Iterator[AtlasEntity]:
yield self._create_atlas_user_entity()
yield self._create_atlas_reader_entity() | [
129,
243,
4043,
2419
] |
def METHOD_NAME(self, request):
return Response({"ok": True}) | [
72
] |
def METHOD_NAME(paginator, page):
"""
Renders the necessary links to paginating a long list
"""
# For very long lists (e.g. the English ingredient with more than 8000 items)
# we muck around here to remove the pages not inmediately 'around' the current
# one, otherwise we end up with a useless block with 300 pages.
if paginator.num_pages > PAGINATION_MAX_TOTAL_PAGES:
start_page = page.number - PAGINATION_PAGES_AROUND_CURRENT
for i in range(page.number - PAGINATION_PAGES_AROUND_CURRENT, page.number + 1):
if i > 0:
start_page = i
break
end_page = page.number + PAGINATION_PAGES_AROUND_CURRENT
for i in range(page.number, page.number + PAGINATION_PAGES_AROUND_CURRENT):
if i > paginator.num_pages:
end_page = i
break
page_range = range(start_page, end_page)
else:
page_range = paginator.page_range
# Set the template variables
return {'page': page, 'page_range': page_range} | [
2855
] |
def METHOD_NAME(char):
# Translators: The Control (Ctrl) key
return "" # return ' (%s+%s)' % (_('Ctrl'), char) | [
1947
] |
def METHOD_NAME(self):
"""Verify extra args are ignored
Maybe we should be throwing an error, but since `.format()`
doesn't throw an error, we would have to add our own argument
checking which I think is more trouble than its worth
"""
loc = translate_locator("test", "foo.message:testing,one,two,three")
assert loc == "testing one" | [
9,
1967,
335
] |
def METHOD_NAME(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json; charset=utf-8':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response | [
19,
17
] |
async def METHOD_NAME(self) -> None:
await self.create_connection(with_db=False)
await self.execute_script(f'CREATE USER "{self.database}" IDENTIFIED BY "{self.password}"')
await self.execute_script(f'GRANT ALL PRIVILEGES TO "{self.database}"')
await self.close() | [
1267,
129
] |
def METHOD_NAME(
self,
locale: str = None,
from_record: int = None,
record_count: int = None,
item_date_range: dict = time_range(),
include_item: str = None,
wallet: str = None,
session: requests.Session = None,
lightweight: bool = None, | [
19,
598,
925
] |
def METHOD_NAME(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
m.clear_link_info()
return fn(*args, **kwargs)
return wrapped | [
503,
41,
537,
548,
100
] |
def METHOD_NAME(
app: Application, options: Options, handlers: _RouteHandlerSpecs
) -> None:
prefixed_handlers: list[Any] = [
(urljoin(options.url_prefix, route_pattern), *tuple(handler_info))
for route_pattern, *handler_info in handlers
]
app.add_handlers(r".*", prefixed_handlers) | [
238,
1519
] |
async def METHOD_NAME(self):
while True:
peak_hours = await self.retrieve_data(energie_gv_at.URL_PEAK_HOURS)
await self.handle_peak_hours(peak_hours)
elec_now = await self.retrieve_data(energie_gv_at.URL_ELECTRICITY_NOW)
await self.handle_electricity_now(elec_now)
renewable_now = await self.retrieve_data(energie_gv_at.URL_RENEWABLE_NOW)
await self.handle_renewable_now(renewable_now)
delta = datetime.timedelta(hours=1)
now = datetime.datetime.now()
next_hour = (now + delta).replace(microsecond=0, second=0, minute=0)
wait_seconds = (next_hour - now).seconds
await asyncio.sleep(wait_seconds + 10) | [
86,
1751
] |
def METHOD_NAME(self, test, capture=False, **args):
if not self.USE_XML:
return
e = self.__e
self.__e = None
if e is None:
return
ET = self.__ET
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
if self._stdout_buffer is not None:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = stdout
if self._stderr_buffer is not None:
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = stderr
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, str(v2))
else:
e2.text = str(v2)
else:
e2.text = str(v) | [
238,
1571
] |
def METHOD_NAME(self,test):
self.testLog['result'] = 'passed' | [
238,
1434
] |
def METHOD_NAME(data):
maya.cmds.fireRender(of="RPR_MAYA_TRACE_PATH") | [
1452,
2576,
451
] |
def METHOD_NAME(self):
tb = FilesystemBackend(app=self.app, url=self.url)
yesterday_task_ids = [uuid() for i in range(10)]
today_task_ids = [uuid() for i in range(10)]
for tid in yesterday_task_ids:
tb.mark_as_done(tid, 42)
day_length = 0.2
time.sleep(day_length) # let FS mark some difference in mtimes
for tid in today_task_ids:
tb.mark_as_done(tid, 42)
with patch.object(tb, 'expires', 0):
tb.cleanup()
# test that zero expiration time prevents any cleanup
filenames = set(os.listdir(tb.path))
assert all(
tb.get_key_for_task(tid) in filenames
for tid in yesterday_task_ids + today_task_ids
)
# test that non-zero expiration time enables cleanup by file mtime
with patch.object(tb, 'expires', day_length):
tb.cleanup()
filenames = set(os.listdir(tb.path))
assert not any(
tb.get_key_for_task(tid) in filenames
for tid in yesterday_task_ids
)
assert all(
tb.get_key_for_task(tid) in filenames
for tid in today_task_ids
) | [
9,
950
] |
def METHOD_NAME(self, ofile):
try:
tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(ofile, 'w') as nnpzip:
self._export_files(tmpdir)
for f in glob.glob('{}/*'.format(tmpdir)):
nnpzip.write(f, os.path.basename(f))
finally:
shutil.rmtree(tmpdir) | [
294,
15541
] |
def METHOD_NAME(self):
ref_variable, ref_optimizer = _create_variable_and_slots([1, 2, 3, 4, 5, 6, 7])
new_variable, new_optimizer = _create_variable_and_slots([0, 0, 0, 0, 0, 0])
mapping = [0, -1, -1, 4, -1, 2]
expected = [1, 0, 0, 5, 0, 3]
vocab_lib.update_variable_and_slots(
ref_variable, new_variable, ref_optimizer, new_optimizer, mapping
)
variables = [new_variable] + [
new_optimizer.get_slot(new_variable, slot) for slot in ("m", "v")
]
for variable in self.evaluate(variables):
self.assertAllEqual(variable, expected) | [
9,
3259,
1210,
86
] |
async def METHOD_NAME(self):
pass | [
958,
531,
481
] |
def METHOD_NAME(module, name):
raise FakeError(module, name) | [
1278,
416,
2
] |
def METHOD_NAME(inps):
print('\n-------------------READ INPUTS -------------------')
print(f'Read metadata from file: {inps.file}')
atr = readfile.read_attribute(inps.file)
inps.pix_box, inps.geo_box = subset.subset_input_dict2box(vars(inps), atr)
inps.outfile = inps.outfile if inps.outfile else atr['PROJECT_NAME']
# date1/2 and dset
ftype = atr['FILE_TYPE']
if ftype in ['timeseries', 'HDFEOS']:
date1, date2 = inps.dset.split('_')
inps.dset = date2
elif ftype == 'ifgramStack':
date1, date2 = inps.dset.split('-')[1].split('_')
else:
# velocity, unw
date1, date2 = ptime.yyyymmdd(atr['DATE12'].replace('_','-').split('-'))
if inps.dset.startswith('step'):
date1 = date2 = inps.dset.split('step')[-1]
print(f'InSAR start / end date: {date1} / {date2}')
## read data
print(f'Read {inps.dset} from file: {inps.file}')
dis, atr = readfile.read(inps.file, datasetName=inps.dset, box=inps.pix_box)
if ftype == 'timeseries':
print(f'Read {date1} from file: {inps.file}')
dis -= readfile.read(inps.file, datasetName=date1, box=inps.pix_box)[0]
# convert radians to meters
if atr['UNIT'] == 'radian':
phase2range = float(atr['WAVELENGTH']) / (-4 * np.pi)
dis *= phase2range
# mask
if inps.mask_file is not None:
mask = readfile.read(inps.mask_file, box=inps.pix_box)[0]
print(f'Set data to NaN for pixels with zero value in file: {inps.mask_file}')
dis[mask==0] = np.nan
# read geometry incidence / azimuth angle
print(f'\nread incidence / azimuth angle from file: {inps.geom_file}')
inc_angle = readfile.read(inps.geom_file, datasetName='incidenceAngle', box=inps.pix_box)[0]
az_angle = readfile.read(inps.geom_file, datasetName='azimuthAngle', box=inps.pix_box)[0]
print(f'Mean LOS incidence angle: {np.nanmean(inc_angle):.2f}°')
print(f'Mean LOS azimuth angle: {np.nanmean(az_angle):.2f}°')
# update attributes
if inps.subset_lat is not None or inps.subset_x is not None:
atr = attr.update_attribute4subset(atr, inps.pix_box)
## create kite container
create_kite_container(
dis,
atr,
date1,
date2,
inc_angle,
az_angle,
out_file=inps.outfile,
)
return | [
73,
-1
] |
def METHOD_NAME(self):
data = {
"QoS": self.QoS,
"noLocal": self.noLocal,
"retainAsPublished": self.retainAsPublished,
"retainHandling": self.retainHandling,
}
return data | [
763
] |
def METHOD_NAME(self, level):
self.assert_highlighted_chr(
"if answer != 246 and answer == 'yes'",
"KK TTTTTT KK NNN KKK TTTTTT KK SSSSS",
level=level, lang="en") | [
9,
217,
-1,
61
] |
def METHOD_NAME(self, signal_name, callback):
"""
Connect a listener to a specific signal.
Args:
signal_name (str): The name of the signal to listen to
callback (callable): The callable that is called when the signal is triggered
"""
signal_listeners = self.listeners.setdefault(signal_name, [])
if callback not in signal_listeners:
signal_listeners.append(callback) | [
238,
4130
] |
def METHOD_NAME(self) -> 'outputs.MigrateAgentModelResponseSystemData':
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(self):
if not os.path.exists(self.datapath):
self._download_data()
else:
dataset = load_dataset(self.datapath, self.format) | [
123,
365
] |
def METHOD_NAME(self, o):
return ((o, False),) | [
1545
] |
def METHOD_NAME(self):
self.guake.METHOD_NAME() | [
-1
] |
def METHOD_NAME(self):
return self.sslobj.METHOD_NAME() | [
449,
-1,
234
] |
def METHOD_NAME(self):
"""Gets the actual value corresponding to what the user typed."""
try:
value = self.control.text()
except AttributeError:
value = self.control.toPlainText()
value = str(value)
try:
value = self.evaluate(value)
except:
pass
try:
ret = self.factory.mapping.get(value, value)
except TypeError:
# The value is probably not hashable.
ret = value
return ret | [
19,
21,
99
] |
def METHOD_NAME(mapping, fn_args, fn_kwargs):
# remap any positional argument given to the function that looks like a
# File
remap_list_of_files(mapping, fn_args)
# remap any keyword argument in the same way, but we need to treat
# "inputs" and "outputs" specially because they are lists, and
# "stdout" and "stderr", because they are not File's.
for kwarg, maybe_file in fn_kwargs.items():
if kwarg in ["inputs", "outputs"]:
remap_list_of_files(mapping, maybe_file)
if kwarg in ["stdout", "stderr"]:
if maybe_file:
(fname, mode) = get_std_fname_mode(kwarg, maybe_file)
if fname in mapping:
fn_kwargs[kwarg] = (mapping[fname], mode)
else:
# Treat anything else as a possible File to be remapped.
remap_location(mapping, maybe_file) | [
6436,
75,
1537
] |
f METHOD_NAME(self, info, dir, value): | [
2276,
24,
21
] |
async def METHOD_NAME() -> None:
"""Tests creating a valid JWT."""
reg = Registrant(subject="s u b j e c t !", agent="bond", agent_id="007")
key = str(uuid4())
now = datetime.now(tz=timezone.utc)
# Nice long duration to ensure the token doesn't expire during our test.
duration = timedelta(days=10, seconds=100)
audience = "com.opentrons.fake.audience.for.test"
# First, generate a token that doesn't use a JTI
token = create_jwt(
signing_key=key,
duration=duration,
registrant=reg,
audience=audience,
now=now,
)
decoded = jwt.decode(jwt=token, key=key, algorithms=["HS512"], audience=audience)
assert decoded["iat"] == int(now.timestamp())
assert decoded["exp"] == int((now + duration).timestamp())
assert decoded["sub"] == reg.subject
assert decoded["ot_agent"] == reg.agent
assert decoded["ot_aid"] == reg.agent_id
assert decoded["aud"] == audience
assert "jti" not in decoded.keys()
# Generate the same token, but with a jti
jti = "unique value that is unique"
token = create_jwt(
signing_key=key,
duration=duration,
registrant=reg,
audience=audience,
now=now,
id=jti,
)
decoded = jwt.decode(jwt=token, key=key, algorithms=["HS512"], audience=audience)
assert decoded["iat"] == int(now.timestamp())
assert decoded["exp"] == int((now + duration).timestamp())
assert decoded["sub"] == reg.subject
assert decoded["ot_agent"] == reg.agent
assert decoded["ot_aid"] == reg.agent_id
assert decoded["aud"] == audience
assert decoded["jti"] == jti | [
9,
129,
3026,
5009
] |
def METHOD_NAME(self):
counts = [
models.Candidate.query.count(),
models.CandidateDetail.query.count(),
models.CandidateHistory.query.distinct(
models.CandidateHistory.candidate_id
).count(),
models.CandidateSearch.query.count(),
]
assert len(set(counts)) == 1 | [
9,
7899,
2496
] |
def METHOD_NAME(self):
"""
:type: integer
"""
self._completeIfNotSet(self._ahead_by)
return self._ahead_by.value | [
10660,
604
] |
def METHOD_NAME(track_map):
color_map = np.zeros_like(track_map).astype(np.uint8)
color_map = color_map[..., None].repeat(3, axis=-1)
for id_cur in np.unique(track_map):
if id_cur == 0:
continue
color_map[track_map == id_cur] = id2rgb(sha256num(id_cur))
return color_map | [
-1
] |
def METHOD_NAME(repo):
fileRenameMap = dict()
renamePattern = re.compile(r"(.*){(.*) => (.*)}(.*)")
cnt = 0
for commit in repo.iter_commits():
cnt += 1
curr = 0
for commit in repo.iter_commits():
curr += 1
print("Checking commit {}/{} -> {}".format(curr, cnt, commit.hexsha))
for objpath, stats in commit.stats.files.items():
match = renamePattern.match(objpath)
if match:
# the file was renamed, store the rename to follow up later
oldFile = (match.group(1) + match.group(2) + match.group(4)).replace("//", "/")
newFile = (match.group(1) + match.group(3) + match.group(4)).replace("//", "/")
while newFile in fileRenameMap:
newFile = fileRenameMap[newFile]
if oldFile != newFile:
fileRenameMap[oldFile] = newFile
else:
newFile = fileRenameMap[objpath] if objpath in fileRenameMap else objpath
if stats['insertions'] > 0:
if not newFile in fileAuthorStats:
fileAuthorStats[newFile] = dict()
authorName = unicode(commit.author.name)
if authorName in assumeSameAuthor:
authorName = assumeSameAuthor[authorName]
if not authorName in fileAuthorStats[newFile]:
fileAuthorStats[newFile][authorName] = {
'years': dict(),
'first_commit': commit.committed_datetime
}
elif commit.committed_datetime < fileAuthorStats[newFile][authorName]['first_commit']:
fileAuthorStats[newFile][authorName]['first_commit'] = commit.committed_datetime
if not commit.committed_datetime.year in fileAuthorStats[newFile][authorName]['years']:
fileAuthorStats[newFile][authorName]['years'][commit.committed_datetime.year] = 0
fileAuthorStats[newFile][authorName]['years'][commit.committed_datetime.year] += stats['insertions'] | [
56,
171,
577
] |
def METHOD_NAME(env_async, items):
tmpl = env_async.from_string(
"""
{%- for grouper, list in items()|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(items=items) == "a:1:2|b:1|" | [
9,
2834,
1815,
724
] |
def METHOD_NAME(self, inputs, outputs):
"""Run analysis."""
if inputs.species != inputs.gaf.output.species:
self.warning(
"Selected genes Species must be the same as the Species field of the GAF file."
)
self.error(
f"Selected genes are from {inputs.species}, "
f"while GAF file has defined {inputs.gaf.output.species} under Species field."
)
org_features = self.feature.filter(
source=inputs.source, species=inputs.species, feature_id__in=inputs.genes
)
if len(org_features) == 0:
self.error("No genes were fetched from the knowledge base.")
if inputs.source == inputs.gaf.output.source:
target_ids = inputs.genes
else:
mapping_res = self.mapping.filter(
source_db=inputs.source,
source_species=inputs.species,
target_db=inputs.gaf.output.source,
target_species=inputs.gaf.output.species,
source_id__in=inputs.genes,
)
if len(mapping_res) == 0:
self.error("Failed to map features.")
ids = defaultdict(list)
target_ids = []
for m in mapping_res:
if m.source_id in inputs.genes:
target_ids.append(m.target_id)
if m.source_id in ids:
self.warning(f"Mapping {m.source_id} returned multiple times.")
ids[m.source_id].append(m.target_id)
if len(inputs.genes) > len(ids):
self.warning("Not all features could be mapped.")
if len(target_ids) > 0:
with open("mapped_ids.txt", "w") as f:
writer = csv.writer(f, delimiter="\t", lineterminator="\n")
writer.writerow([inputs.source, inputs.gaf.output.source])
for key, value in ids.items():
for v in value:
writer.writerow([key, v])
outputs.ids = "mapped_ids.txt"
with tempfile.NamedTemporaryFile() as input_genes:
input_genes.write(" ".join(target_ids).encode("UTF-8"))
input_genes.flush()
args = [
str(inputs.pval_threshold),
str(inputs.min_genes),
inputs.ontology.output.obo_obj.path,
inputs.gaf.output.gaf_obj.path,
input_genes.name,
]
(Cmd["processor"][args] > "terms.json")()
outputs.source = inputs.gaf.output.source
outputs.species = inputs.gaf.output.species
outputs.terms = "terms.json" | [
22
] |
def METHOD_NAME():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.default_points = f.PointsFactory(project=m.project, value=0)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.points5 = f.PointsFactory(project=m.project, value=16)
m.points6 = f.PointsFactory(project=m.project, value=32)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.project.default_points = m.default_points
m.project.save()
m.user_story1 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story1.role_points.filter(role=m.role1).update(points=m.points1)
m.user_story2 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story2.role_points.filter(role=m.role1).update(points=m.points2)
m.user_story3 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story3.role_points.filter(role=m.role1).update(points=m.points3)
m.user_story4 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story4.role_points.filter(role=m.role1).update(points=m.points4)
# 5 and 6 are inclosed milestones
m.user_story5 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story5.role_points.filter(role=m.role1).update(points=m.points5)
m.user_story6 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story6.role_points.filter(role=m.role1).update(points=m.points6)
return m | [
365
] |
def METHOD_NAME(config: RouteConfig):
prov = get_provider(config.model.provider)(config)
async def _embeddings(payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
return await prov.embeddings(payload)
return _embeddings | [
129,
2465,
841
] |
def METHOD_NAME(self):
# test with different alpha
self.linescatterplot.unselected_alpha = 0.4
self.linescatterplot.index.metadata["selections"] = [
(arange(10) % 2 == 0),
]
gc = PlotGraphicsContext(self.size)
gc.render_component(self.linescatterplot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255)) | [
9,
14731,
2481,
3115,
1139
] |
def METHOD_NAME(session_backend_config_memory: "ServerSideSessionConfig") -> None:
session_backend_config_memory.exclude = ["north", "south"]
@get("/north")
def north_handler(request: Request) -> Dict[str, bool]:
return get_session_installed(request)
@get("/south")
def south_handler(request: Request) -> Dict[str, bool]:
return get_session_installed(request)
@get("/west")
def west_handler(request: Request) -> Dict[str, bool]:
return get_session_installed(request)
with create_test_client(
route_handlers=[north_handler, south_handler, west_handler],
middleware=[session_backend_config_memory.middleware],
) as client:
response = client.get("/north")
assert response.json() == {"has_session": False}
response = client.get("/south")
assert response.json() == {"has_session": False}
response = client.get("/west")
assert response.json() == {"has_session": True} | [
9,
3174,
982,
652
] |
def METHOD_NAME(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
This method is executed automatically when convert.py is started.
All arguments are automatically got from command line arguments or config file in method configure
Returns:
annotations: list of annotation representation objects.
meta: dictionary with additional dataset level metadata (if provided)
"""
dataset_directory = get_path(self.data_dir, is_directory=True)
# create dataset metadata
metadata = self.get_meta()
# read and convert annotation
images_dir = dataset_directory / 'test'
annotations = self._convert_annotations(images_dir, metadata, progress_callback, progress_interval)
return ConverterReturn(annotations, metadata, None) | [
197
] |
async def METHOD_NAME(
self, continuation, client_call_details, request_iterator
):
if self.tracing_skipped(client_call_details):
return await continuation(client_call_details, request_iterator)
with self._start_interceptor_span(
client_call_details.method,
) as span:
new_details = self.propagate_trace_in_details(client_call_details)
continuation_with_args = functools.partial(
continuation, new_details, request_iterator
)
return await self._wrap_unary_response(
continuation_with_args, span
) | [
5846,
919,
521
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.