text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(project, from_tag, to_tag, color):
to_tag = to_tag.lower()
sql = """
UPDATE userstories_userstory
SET tags = array_distinct(array_replace(tags, %(from_tag)s, %(to_tag)s))
WHERE project_id = %(project_id)s AND %(from_tag)s = ANY(tags);
UPDATE tasks_task
SET tags = array_distinct(array_replace(tags, %(from_tag)s, %(to_tag)s))
WHERE project_id = %(project_id)s AND %(from_tag)s = ANY(tags);
UPDATE issues_issue
SET tags = array_distinct(array_replace(tags, %(from_tag)s, %(to_tag)s))
WHERE project_id = %(project_id)s AND %(from_tag)s = ANY(tags);
UPDATE epics_epic
SET tags = array_distinct(array_replace(tags, %(from_tag)s, %(to_tag)s))
WHERE project_id = %(project_id)s AND %(from_tag)s = ANY(tags);
"""
cursor = connection.cursor()
cursor.execute(sql, params={"from_tag": from_tag, "to_tag": to_tag, "project_id": project.id})
tags_colors = dict(project.tags_colors)
tags_colors.pop(from_tag)
tags_colors[to_tag] = color
project.tags_colors = list(tags_colors.items())
project.save(update_fields=["tags_colors"]) | [
2004,
82
] |
def METHOD_NAME(resource_group_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListNetworkManagerEffectiveConnectivityConfigurationsResult]:
"""
List all effective connectivity configurations applied on a virtual network.
:param str resource_group_name: The name of the resource group.
:param str skip_token: When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
:param str virtual_network_name: The name of the virtual network.
"""
... | [
245,
1228,
722,
3071,
1939,
4880,
146
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The primary access key.
"""
return pulumi.get(self, "primary_key") | [
1379,
59
] |
def METHOD_NAME(
label_model: Union[EthereumLabel, PolygonLabel]
) -> NftApprovalEvent:
assert (
label_model.label_data["type"] == "event"
), "Expected label to be of type 'event'"
assert (
label_model.label_data["name"] == "Approval"
), "Expected label to be of type 'Approval'"
if isinstance(label_model, EthereumLabel):
blockchain_type = "ethereum"
else:
blockchain_type = "polygon"
return NftApprovalEvent(
blockchain_type=blockchain_type,
token_address=label_model.address,
owner=label_model.label_data["args"]["owner"],
approved=label_model.label_data["args"]["approved"],
token_id=label_model.label_data["args"]["tokenId"],
log_index=label_model.log_index,
transaction_hash=label_model.transaction_hash,
) | [
214,
9250,
417
] |
def METHOD_NAME(self):
node = CoordData(coord_name="time")
coords = Coordinates([[1, 2, 3, 4, 5]], ["time"])
node_p = Parallel(source=node, number_of_workers=2, chunks={"time": 2})
o = node.eval(coords)
o_p = o.copy()
o_p[:] = np.nan
node_p.eval(coords, output=o_p)
np.testing.assert_array_equal(o, o_p) | [
9,
1498,
457,
600,
226,
1917,
-1
] |
def METHOD_NAME():
return _INSTRUMENTATIONS_BIT_MASK | [
19,
-1
] |
def METHOD_NAME(markdown, page, **kwargs):
path = page.file.src_uri
if path in exclude_list_content:
logger.debug(f"Fichier ignoré car dans la liste d'exclusion : {path}")
return
for match in re.findall(regex_pattern, markdown):
img_url = match[1].strip()
if img_url in exclude_list_url:
logger.debug(f"Image ignorée car dans la liste d'exclusion : {img_url}")
return
if img_url.startswith("http"):
img_length = get_remote_image_length(image_url=img_url)
if img_length and img_url.endswith(".gif") and img_length > max_size_gif:
logger.error(
f"G002 || Poids des images GIF maximum : {convert_octets(max_size_gif)}. || "
f"'{path}' contient une image {img_url} "
f"de {convert_octets(img_length)}."
)
elif img_length and img_length > max_size:
logger.error(
f"G002 || Poids des images (hors GIF) maximum : {convert_octets(max_size)}. || "
f"'{path}' contient une image {img_url} "
f"de {convert_octets(img_length)}."
)
elif img_length is None:
logger.info(
f"L'image {img_url}, présente dans {path} est inaccessible."
)
else:
logger.debug(
"Impossible d'accéder à l'image {img_url} présente dans le fichier {path}"
)
else:
logger.debug("Image locale ignorée") | [
69,
1174,
108
] |
def METHOD_NAME(char):
return {'type': 'SpecialChar', 'content': char} | [
341,
3874
] |
def METHOD_NAME(seas, yr):
if seas == "spring":
return f"{yr}-06-01"
if seas == "summer":
return f"{yr}-09-01"
if seas == "autumn":
return f"{yr}-12-01"
if seas == "winter":
return f"{yr}-03-01"
if seas == "all":
return f"{yr}-01-01"
raise ValueError("Invalid input for season (seas):", seas) | [
1798,
2396
] |
def METHOD_NAME(self):
a = np.repeat(np.arange(5), 5)
b = np.repeat(np.arange(5), 5)
box = BoxWhisker((a, b, np.arange(25)), ['a', 'b'], 'd').opts(box_line_width='b')
plot = bokeh_renderer.get_plot(box)
source = plot.handles['vbar_1_source']
glyph = plot.handles['vbar_1_glyph']
self.assertEqual(source.data['box_line_width'], np.arange(5))
self.assertEqual(property_to_dict(glyph.line_width), {'field': 'box_line_width'}) | [
9,
3521,
11391,
534,
2327,
441
] |
def METHOD_NAME(self):
if self.key_info["is_root"]:
return 0xFFFFFFFF
try:
csk_id = int(self.key_info["csk_id"])
except ValueError:
csk_id = int(self.key_info["csk_id"], 16)
return csk_id | [
19,
147
] |
def METHOD_NAME(xyz: np.ndarray) -> np.ndarray:
r"""
xyz to R-theta-z transform::
y R
| /
| /
| / theta
*------------x
.. math:: x = R \cos(\theta)
.. math:: y = R \sin(\theta)
Returns
-------
xyz : (3,) float ndarray
the point in the local coordinate system
"""
xyz = np.atleast_2d(xyz)
assert len(xyz.shape) == 2, xyz.shape
x = xyz[:, 0]
y = xyz[:, 1]
theta = np.degrees(np.arctan2(y, x))
R = np.sqrt(x * x + y * y)
return np.array([R, theta, xyz[:, 2]], dtype=xyz.dtype).T | [
2846,
24,
-1,
877
] |
f METHOD_NAME(self): | [
9,
-1,
-1
] |
def METHOD_NAME(db):
defc = baker.make("references.DisasterEmergencyFundCode", code="M", group_name="covid_19")
award_id_too_old = 988
award_id_too_new = 989
awards = [
baker.make("search.AwardSearch", award_id=award_id_too_old, action_date="2020-01-01"),
baker.make("search.AwardSearch", award_id=award_id_too_new, action_date="2020-01-01"),
*baker.make(
"search.AwardSearch", award_id=cycle([1, 2, 3, 4, 5, 6, 7, 8, 9]), _quantity=9, action_date="2020-01-01"
),
]
for index, award in enumerate(awards):
if index % 2 == 0:
baker.make("awards.FinancialAccountsByAwards", submission_id=10, award=award, disaster_emergency_fund=defc)
else:
baker.make("awards.FinancialAccountsByAwards", submission_id=12, award=award, disaster_emergency_fund=defc)
AwardSearch.objects.filter(award_id=award.award_id).update(
update_date=OLD_DATE
) # convoluted line to sidestep auto_now()
yield award_id_too_old, award_id_too_new | [
2585,
365,
2228,
61,
80
] |
def METHOD_NAME(name: str, func: tvm.tir.PrimFunc) -> bool:
"""
Determine of a tir.block with `name` exists in `func`
"""
def _hb(op):
if isinstance(op, tvm.tir.Block):
_found_blocks.append(op.name_hint)
_found_blocks = []
tvm.tir.stmt_functor.post_order_visit(func.body, _hb)
return name in _found_blocks | [
220,
573
] |
def METHOD_NAME(image):
"""Determine nonstandard properties of an input image.
Parameters
----------
image : array
The input image.
Returns
-------
ip : ImageProperties named tuple
The properties of the image:
- signed: whether the image has negative values.
- out_of_range_float: if the image has floating point data
outside of [-1, 1].
- low_data_range: if the image is in the standard image
range (e.g. [0, 1] for a floating point image) but its
data range would be too small to display with standard
image ranges.
- unsupported_dtype: if the image data type is not a
standard skimage type, e.g. ``numpy.uint64``.
"""
immin, immax = np.min(image), np.max(image)
imtype = image.dtype.type
try:
lo, hi = dtypes.dtype_range[imtype]
except KeyError:
lo, hi = immin, immax
signed = immin < 0
out_of_range_float = (np.issubdtype(image.dtype, np.floating) and
(immin < lo or immax > hi))
low_data_range = (immin != immax and
is_low_contrast(image))
unsupported_dtype = image.dtype not in dtypes._supported_types
return ImageProperties(signed, out_of_range_float,
low_data_range, unsupported_dtype) | [
19,
660,
748
] |
def METHOD_NAME(x):
"Return the string in title case cleaning spaces."
import re
y = re.sub(r'\s+', ' ', x.strip())
return y.title() | [
24,
2893,
331,
12883
] |
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME() | [
1462
] |
def METHOD_NAME(self):
self.assertEqual(signal.NSIG, 23)
self.assertEqual(signal.SIGABRT, 22)
self.assertEqual(signal.SIGBREAK, 21)
self.assertEqual(signal.SIGFPE, 8)
self.assertEqual(signal.SIGILL, 4)
self.assertEqual(signal.SIGINT, 2)
self.assertEqual(signal.SIGSEGV, 11)
self.assertEqual(signal.SIGTERM, 15)
self.assertEqual(signal.SIG_DFL, 0)
self.assertEqual(signal.SIG_IGN, 1) | [
9,
298,
891
] |
def METHOD_NAME(self, tag: Tag) -> bool:
# INFO - BL - 2019/4/8 - returns True if tag is blacklisted or
# contains a blacklisted class or id
if tag.name.lower() in self.config.tag_blacklist:
return True
if "class" in tag.attrs:
for elem in self.config.class_blacklist:
if elem in tag.attrs["class"]:
return True
if "id" in tag.attrs:
for elem in self.config.id_blacklist:
if elem in tag.attrs["id"]:
return True
return False | [
82,
24,
297
] |
def METHOD_NAME(self):
"""
Get the device's brightness
"""
self.logger.debug("DBus call get_scroll_brightness")
return self.zone["scroll"]["brightness"] | [
19,
3476,
1271
] |
def METHOD_NAME(self):
output = self.get_output(['-c', '2', '--no-header-row', 'examples/no_header_row.csv'])
self.assertNotIn('1. "a"', output)
self.assertIn('2. "b"', output) | [
9,
654,
572,
843
] |
def METHOD_NAME(private_thread, user):
return reply_thread(private_thread, poster=user, posted_on=timezone.now()) | [
547,
600,
21,
1922
] |
def METHOD_NAME(self, prefix: Tuple[str, ...]):
return KrausChannel(
kraus_ops=self._kraus_ops, key=protocols.with_key_path_prefix(self._key, prefix)
) | [
41,
59,
157,
426
] |
def METHOD_NAME(each_version):
m = parse(u'lambda a, b: a i', version=each_version)
assert m.children[0].type == 'error_node' | [
9,
3550,
1778,
7535
] |
def METHOD_NAME():
"""Test zooming with a fixed aspect ratio set"""
vb = pg.ViewBox(lockAspect=1)
# Give the viewbox a size of the proper aspect ratio to keep things easy
vb.setFixedHeight(10)
vb.setFixedWidth(10)
# request a range with a good ratio
testRange = pg.QtCore.QRect(0, 0, 10, 10)
vb.setRange(testRange, padding=0)
expected = [[testRange.left(), testRange.right()],
[testRange.top(), testRange.bottom()]]
viewRange = vb.getState()['viewRange']
viewWidth = viewRange[0][1] - viewRange[0][0]
viewHeight = viewRange[1][1] - viewRange[1][0]
# Assert that the width and height are equal, since we locked the aspect ratio at 1
assert viewWidth == viewHeight
# and for good measure, that it is the same as the test range
assert viewRange == expected
# Now try to set to something with a different aspect ratio
testRange = pg.QtCore.QRect(0, 0, 10, 20)
vb.setRange(testRange, padding=0)
viewRange = vb.getState()['viewRange']
viewWidth = viewRange[0][1] - viewRange[0][0]
viewHeight = viewRange[1][1] - viewRange[1][0]
# Don't really care what we got here, as long as the width and height are the same
assert viewWidth == viewHeight | [
9,
2093,
4517
] |
def METHOD_NAME(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_obj_by_id = self.device_collection[DEVICE1_ID]
device_obj_by_serial = self.device_collection[DEVICE1_SERIAL]
self.assertIs(device_obj_by_id, device_obj_by_serial) | [
9,
19
] |
METHOD_NAME(self, what): | [
1180
] |
async def METHOD_NAME(self, key, default=None):
if default is None:
default = {}
out = await self._db.hgetall(key)
data = {key.decode('utf-8'): value for key, value in out.items()}
if data is None:
return default
return data | [
19,
2605,
553
] |
def METHOD_NAME():
resolver_return = Mock(name="Malbec")
add_typename_to_possible_return(resolver_return, "Product")
assert getattr(resolver_return, "_Mock__typename") == "Product" | [
9,
238,
5541,
24,
279
] |
f METHOD_NAME(self, currentInp): | [
362,
24,
2026
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return self._output() | [
1519
] |
def METHOD_NAME(self):
"""
Return a list of the current category and its ancestors
"""
return self.category.get_descendants_and_self() | [
19,
2065
] |
def METHOD_NAME(self, node):
self.visit_admonition(node) | [
716,
2581,
1716
] |
def METHOD_NAME(self):
return "MgmtErrorFormat" | [
168,
275
] |
def METHOD_NAME():
# repo
assert open_issues_count(10, 25430).iloc[0]['open_count'] > 0
# repo_group
assert open_issues_count(10).iloc[0]['open_count'] > 0 | [
9,
1452,
4818,
29
] |
def METHOD_NAME(name, **kwargs):
_lexyacc_toolchain(name = name, **kwargs)
native.toolchain(
name = name + "_toolchain",
toolchain = ":" + name,
toolchain_type = "@io_kythe//tools/build_rules/lexyacc:toolchain_type",
) | [
15107,
11709
] |
def METHOD_NAME(self, node, extras):
"""
Default recognition processing.
This is the method which should be overridden in most cases
to provide derived classes with custom recognition
processing functionality.
This default processing method does nothing.
- *node* -- The root node of the recognition parse tree.
- *extras* -- A dictionary of all elements from the
extras list contained within this recognition.
Maps element name -> element value.
""" | [
356,
5732
] |
def METHOD_NAME(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) | [
56,
8193
] |
def METHOD_NAME(self, mathfunc):
@dace.program
def func(arg1, arg2):
return mathfunc(arg1, arg2)
res = func(0.7, 0.5)
assert np.allclose(mathfunc(0.7, 0.5), res) | [
9,
7640,
1997
] |
def METHOD_NAME(cls):
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
os.remove(os.path.join(cls.execution_path, "chgcar.cube"))
os.remove(os.path.join(cls.execution_path, "random_CHGCAR")) | [
531,
481,
2
] |
def METHOD_NAME(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir() | [
57
] |
def METHOD_NAME(dt: datetime.datetime) -> datetime.datetime:
"""Convert naive (timezone unaware) datetime.datetime
to aware timezone in local timezone
Args:
dt: datetime.datetime without timezone
Returns:
datetime.datetime with local timezone
Raises:
TypeError if dt is not a datetime.datetime object
ValueError if dt is not a naive/timezone unaware object
"""
if type(dt) != datetime.datetime:
raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}")
if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:
# has timezone info
raise ValueError(
"dt must be naive/timezone unaware: "
f"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}"
)
return dt.replace(tzinfo=get_local_tz(dt)) | [
884,
4806,
24,
125
] |
def METHOD_NAME(Lmax,m,s,ds=None,dm=None):
n = Lmax + 1 - max(abs(m),abs(s))
a, b = abs(m+s), abs(m-s)
if ds == dm == None:
return n,a,b
if ds == None: ds = 0
if dm == None: dm = 0
m += dm
s += ds
dn = Lmax + 1 - max(abs(m),abs(s)) - n
da,db = abs(m+s) - a, abs(m-s) - b
return n,a,b,dn,da,db | [
-1,
10069
] |
def METHOD_NAME(self):
with pytest.raises(IndexError):
_ = self._d['a']['x', 5.0 * sc.units.dimensionless] | [
9,
1737,
47,
661,
5147
] |
def METHOD_NAME():
return list | [
13007
] |
def METHOD_NAME(self, value):
self.position_attr.set_value(value) | [
0,
99
] |
def METHOD_NAME(self):
"""Check subclass on multiple inclusions before transform"""
for class_inclusion_list in self.coco_class_inclusion_lists:
dataset = COCODetectionDataset(
class_inclusion_list=class_inclusion_list, max_num_samples=self.max_samples_per_plot * self.n_plot, **self.dataset_coco_base_config
)
dataset.plot(max_samples_per_plot=self.max_samples_per_plot, n_plots=self.n_plot, plot_transformed_data=False) | [
9,
107,
777,
126,
9260,
1553,
9652
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(self):
openmediavault.setenv("OMV_CONFIG_FILE", "xyz.conf")
self.assertIsNone(self.command_helper.create_backup())
self.assertFalse(self.command_helper._backup_path) | [
9,
129,
1001,
180
] |
def METHOD_NAME(self, evt, station, det,
power_threshold=6.5,
coinc_window=110 * units.ns,
number_concidences=3,
triggered_channels=None,
power_mean=None,
power_std=None,
trigger_name='default_integrated_power'):
"""
simulate ARA trigger logic
Parameters
----------
evt: Event
The event on which to run the module
station: Station
The station on which to run the module
det: Detector or GenericDetector
The detector description
power_threshold: float
The factor of sigma that the signal needs to exceed the noise
coinc_window: float
time window in which number_concidences channels need to trigger
number_concidences: int
number of channels that are requried in coincidence to trigger a station
triggered_channels: array of ints
channels ids that are triggered on
power_mean : float
Parameter extracted in ARA from noise.
If not given, it is calculated from generic noise
power_std : float
Parameter extracted in ARA from noise.
If not given, it is calculated from generic noise
trigger_name: string
a unique name of this particular trigger
"""
# if the run method specifies power mean and rms we use these values,
# if the parameters are None, the power mean and rms gets calculated for
# some standard assumptions on the noise RMS and it needs to be done only once
if triggered_channels is None:
triggered_channels = [0, 1, 2, 3, 4, 5, 6, 7]
if(power_mean is not None and power_std is not None):
self._power_mean = power_mean
self._power_std = power_std
else:
error_msg = 'The power_mean and power_std parameters are not defined. '
error_msg += 'Please define them. You can use the calculate_noise_parameters '
error_msg += 'function in utilities.diodeSimulator to do so.'
raise ValueError(error_msg)
self.power_threshold = power_threshold
# No coincidence requirement yet
trigger = {}
trigger_times = []
times_min = []
times_max = []
sampling_rates = []
number_triggered_channels = 0
for channel in station.iter_channels():
channel_id = channel.get_id()
if channel_id not in triggered_channels:
continue
trigger[channel_id] = self.has_triggered(channel)
if trigger[channel_id]:
number_triggered_channels += 1
times = channel.get_times()
trace_after_diode = self._diode.tunnel_diode(channel)
arg_trigger = np.argmin(trace_after_diode)
trigger_times.append(times[arg_trigger])
times_min.append(np.min(times))
times_max.append(np.max(times))
sampling_rates.append(channel.get_sampling_rate())
has_triggered = False
trigger_time = None
if (number_triggered_channels >= number_concidences):
trace_times = np.arange(np.min(times_min), np.max(times_max),
1 / np.min(sampling_rates))
trigger_times = np.array(trigger_times)
slice_left = int(coinc_window / 2 / (trace_times[1] - trace_times[0]))
slice_right = len(trace_times) - slice_left
for trace_time in trace_times[slice_left:slice_right]:
if (np.sum(np.abs(trace_time - trigger_times) <= coinc_window / 2) >= number_concidences):
has_triggered = True
trigger_time = np.min(trigger_times)
break
trigger = IntegratedPowerTrigger(trigger_name, power_threshold,
coinc_window, channels=triggered_channels,
number_of_coincidences=number_concidences,
power_mean=self._power_mean, power_std=self._power_std)
if not has_triggered:
trigger.set_triggered(False)
logger.info("Station has NOT passed trigger")
trigger_time = 0
trigger.set_trigger_time(trigger_time)
else:
trigger.set_triggered(True)
trigger.set_trigger_time(trigger_time)
logger.info("Station has passed trigger, trigger time is {:.1f} ns (sample {})".format(
trigger.get_trigger_time() / units.ns, trigger_time))
station.set_trigger(trigger) | [
22
] |
def METHOD_NAME(self) -> Optional[BatchSpec]:
"""Getter for active batch's batch_spec"""
if not self.active_batch:
return None
return self.active_batch.batch_spec | [
923,
2277,
1457
] |
def METHOD_NAME():
"""queue_reader"""
queue = multiprocessing.Queue(queue_size)
for reader in readers:
p = multiprocessing.Process(
target=_read_into_queue, args=(reader, queue))
p.start()
reader_num = len(readers)
finish_num = 0
while finish_num < reader_num:
sample = deserialize_data(queue.get())
if sample is None:
finish_num += 1
else:
yield sample | [
651,
781
] |
def METHOD_NAME(self, chunk_size, decode_content=False):
assert chunk_size == block_size
left = total_response_size
while left > 0:
if left <= block_size:
raise requests.exceptions.ConnectionError()
data = b"X" * min(chunk_size, left)
left -= len(data)
yield data | [
919
] |
def METHOD_NAME(self, field, allow_edit=False):
"""Render Analyses Services Listing Table
"""
instance = getattr(self, "instance", field.aq_parent)
table = api.get_view("table_analyses_services",
context=instance,
request=self.REQUEST)
# Call listing hooks
table.update()
table.before_render()
if allow_edit is False:
return table.contents_table_view()
return table.ajax_contents_table() | [
3186
] |
def METHOD_NAME(epoch):
"""
Convert a string in the form of "YYYYMMDD" into a datetime object
Parameters
----------
epoch : string
String to convert to a list of datetime objects.
The string must have the format of YYYYMMDD.
Returns
-------
datetime_epoch : datetime
"""
return datetime.date(int(epoch[0:4]), int(epoch[4:6]), int(epoch[6:8])) | [
1165,
144,
24,
153
] |
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data") | [
112,
365
] |
def METHOD_NAME(migrate_project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
solution_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSolutionResult]:
"""
Solution REST Resource.
:param str migrate_project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that migrate project is part of.
:param str solution_name: Unique name of a migration solution within a migrate project.
"""
... | [
19,
725,
146
] |
def METHOD_NAME(self):
with log_errors():
workers = list(self.scheduler.workers.values())
rmm_total = []
rmm_used = []
external_used = []
gpu_index = []
y = []
worker = []
external_used_x = []
memory_max = 0
gpu_total = []
rmm_memory_text = []
for idx, ws in enumerate(workers):
try:
rmm_metrics = ws.metrics["rmm"]
gpu_metrics = ws.metrics["gpu"]
gpu_info = ws.extra["gpu"]
except KeyError:
continue
rmm_total_worker = rmm_metrics["rmm-total"] # RMM memory only
rmm_used_worker = rmm_metrics["rmm-used"]
gpu_total_worker = gpu_info["memory-total"] # All GPU memory
gpu_used_worker = gpu_metrics["memory-used"]
external_used_worker = gpu_used_worker - rmm_total_worker
rmm_total.append(rmm_total_worker)
rmm_used.append(rmm_used_worker)
gpu_total.append(gpu_total_worker)
external_used.append(external_used_worker)
external_used_x.append(rmm_total_worker + external_used_worker / 2)
worker.append(ws.address)
gpu_index.append(idx)
y.append(idx)
memory_max = max(memory_max, gpu_total_worker)
rmm_memory_text.append(
"RMM memory used: {}/{}\nTotal GPU memory used: {}/{}".format(
format_bytes(rmm_used_worker),
format_bytes(rmm_total_worker),
format_bytes(gpu_used_worker),
format_bytes(gpu_total_worker),
)
)
self.memory_figure.title.text = dedent(
"""\
RMM Utilization: {} / {}
GPU Memory: {} / {}
""".format(
format_bytes(sum(rmm_used)),
format_bytes(sum(rmm_total)),
format_bytes(sum([*rmm_total, *external_used])),
format_bytes(sum(gpu_total)),
)
)
result = {
"rmm-total": rmm_total,
"rmm-used": rmm_used,
"external-used": external_used,
"rmm-total-half": [m // 2 for m in rmm_total],
"rmm-used-half": [m // 2 for m in rmm_used],
"external-used-x": external_used_x,
"worker": worker,
"gpu-index": gpu_index,
"y": y,
"escaped_worker": [escape.url_escape(w) for w in worker],
"rmm_memory_text": rmm_memory_text,
}
self.memory_figure.x_range.end = memory_max
METHOD_NAME(self.source, result) | [
86
] |
def METHOD_NAME(testpath):
"""Import Points via Pandas dataframe, deprecated behaviour."""
dfr = pd.read_csv(testpath / CSV1, skiprows=3)
mypoints = xtgeo.Points()
attr = {"IX": "I", "JY": "J", "KZ": "K"}
with pytest.warns(DeprecationWarning):
mypoints.from_dataframe(dfr, east="X", north="Y", tvdmsl="Z", attributes=attr)
assert mypoints.dataframe.X_UTME.mean() == dfr.X.mean() | [
9,
512,
280,
1616,
2497
] |
def METHOD_NAME(submission_id):
"""Non-blocking attempt to notify a running ES of the submission"""
rs = RemoteServiceClient(ServiceCoord("EvaluationService", 0))
rs.connect()
rs.new_submission(submission_id=submission_id)
rs.disconnect() | [
2946,
353,
857
] |
def METHOD_NAME(printout, stack):
"""
Pretty print the stack trace and environment information
for debugging those hard to reproduce user problems. :)
"""
printout.write("======== Salt Debug Stack Trace =========\n")
traceback.print_stack(stack, file=printout)
printout.write("=========================================\n") | [
-1
] |
def METHOD_NAME(self, level):
"""Get tile info for a particular level.
Override the tiles.py method.
level the level to get tile info for
Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y) or None if 'levels'
doesn't exist.
"""
# is required level available?
if level not in self.levels:
return None
# see if we can open the tile info file.
info_file = os.path.join(self.tiles_dir, '%d' % level, TileInfoFilename)
try:
with open(info_file, 'rb') as fd:
info = pickle.load(fd)
except IOError:
info = None
return info | [
19,
100
] |
def METHOD_NAME(
instance_type: str) -> Optional[Dict[str, int]]:
return common.get_accelerators_from_instance_type_impl(_df, instance_type) | [
19,
6094,
280,
89,
44
] |
def METHOD_NAME(values, prefix):
"""
This function takes a list as 'values' parameter and filters out all list values which contain prefix.
The prefix is defined in parameter 'prefix'
"""
if values is None:
raise errors.AnsibleFilterError('Values is not provided')
if prefix is None:
raise errors.AnsibleFilterError('Prefix is not provided')
if not isinstance(values, list):
raise errors.AnsibleFilterError('Wrong type for values')
if not isinstance(prefix, str):
raise errors.AnsibleFilterError('Wrong type for the prefix')
return filter(lambda x: x.startswith(prefix), values) | [
527,
604,
426
] |
def METHOD_NAME(self):
self.execute_command.return_value = 0, "451:321", "any-stderr"
self.assertEqual("451", VCSRevision().get_svn_revision_count()) | [
9,
427,
1413,
71,
529,
1474,
1646
] |
def METHOD_NAME(self):
result = self._get('pools/default/nodeServices')
return result | [
19,
3186
] |
def METHOD_NAME(self, processed_data, _):
'''EOS MLAG data massaging'''
for entry in processed_data:
# There's no MLAG without systemID
if not entry['systemId']:
return []
entry['mlagDualPortsList'] = []
entry['mlagSinglePortsList'] = []
entry['mlagErrorPortsList'] = []
for port_info in zip(entry.get('_localInterfaceList', []),
entry.get('_linkStateList', [])):
if port_info[1] == 'active-full':
entry['mlagDualPortsList'].append(port_info[0])
elif port_info[1] == 'active-partial':
entry['mlagSinglePortsList'].append(port_info[0])
elif port_info[1] in ['disabled', 'inactive']:
entry['mlagErrorPortsList'].append(port_info[0])
return processed_data | [
1356,
7743,
365
] |
def METHOD_NAME(packer, button_val, car_fingerprint):
values = {
'CRUISE_BUTTONS': button_val,
'CRUISE_SETTING': 0,
}
# send buttons to camera on radarless cars
bus = 2 if car_fingerprint in HONDA_BOSCH_RADARLESS else get_pt_bus(car_fingerprint)
return packer.make_can_msg("SCM_BUTTONS", bus, values) | [
5340,
1409,
462
] |
def METHOD_NAME(result, set_ref, scheduler="credit"):
"""
Get the value of set_ref.
:param result: CmdResult struct
:param set_ref: the parameter has been set
:param scheduler: the scheduler of xen(default is credit)
"""
output = result.stdout.strip()
if not re.search("Scheduler", output):
test.fail("Output is not standard:\n%s" % output)
result_lines = output.splitlines()
set_value = None
for line in result_lines:
key_value = line.split(":")
key = key_value[0].strip()
value = key_value[1].strip()
if key == "Scheduler":
if value != scheduler:
test.cancel("This test do not support"
" %s scheduler." % scheduler)
elif key == set_ref:
set_value = value
break
return set_value | [
-1,
146,
6399
] |
def METHOD_NAME(schemas):
hash = hashlib.sha1()
for key in sorted(schemas.keys()):
schema = json.dumps(schemas[key]).encode("utf-8")
hash.update(schema)
return hash.hexdigest() | [
135,
1799
] |
def METHOD_NAME(self) -> None:
"""
L{LogPublisher.removeObserver} removes an observer.
"""
o1 = cast(ILogObserver, lambda e: None)
o2 = cast(ILogObserver, lambda e: None)
o3 = cast(ILogObserver, lambda e: None)
publisher = LogPublisher(o1, o2, o3)
publisher.removeObserver(o2)
self.assertEqual({o1, o3}, set(publisher._observers)) | [
9,
188,
6090
] |
def METHOD_NAME(self) -> int:
self._file_url = self._get_file_url()
if isinstance(self._fs, fsspec.AbstractFileSystem):
with DatasetGeoTiffFsDataAccessor.create_env_session(self._fs):
overviews = self._get_overview_count()
else:
assert_true(self._fs is None, message="invalid type for fs")
return len(overviews) + 1 | [
19,
181,
1043,
17082
] |
def METHOD_NAME(arg):
return _make(arg, _slsdet.MacAddr) | [
93,
989
] |
def METHOD_NAME(self, geojson, all_touched=False):
json_to_file(geojson, self.uri)
config = RasterizedSourceConfig(
vector_source=GeoJSONVectorSourceConfig(uris=self.uri),
rasterizer_config=RasterizerConfig(
background_class_id=self.background_class_id,
all_touched=all_touched))
config.update()
source = config.build(self.class_config, self.crs_transformer,
self.extent)
return source | [
56,
1458
] |
def METHOD_NAME(dataset: tf.data.Dataset) -> tf.data.Dataset:
def _parse_example_bytes(serialized_proto_tensor):
field_dict = {'snippets': tf.io.FixedLenFeature(shape=(), dtype=tf.string)}
parsed_fields = tf.io.parse_example(serialized_proto_tensor, field_dict)
return collections.OrderedDict(snippets=parsed_fields['snippets'])
return dataset.map(_parse_example_bytes, num_parallel_calls=tf.data.AUTOTUNE) | [
238,
4078
] |
def METHOD_NAME(self) -> bytes: ... | [
15675
] |
def METHOD_NAME(code: str, expected: bool) -> None:
assert is_retryable_exception(ClientError({"Error": {"Code": code, "Message": "eff"}}, "foo")) is expected | [
250,
544
] |
def METHOD_NAME(module_name):
try:
module = importlib.import_module(module_name)
fp = pathlib.Path(module.__path__[0], "py.typed")
if not fp.exists():
fp.touch()
except ModuleNotFoundError:
print(f"Can't enforce PEP 561 for {module_name} because it is not installed.")
return | [
5265,
5266
] |
def METHOD_NAME(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a') | [
9,
4132,
486,
442
] |
def METHOD_NAME(self, tmp_path, caplog):
# default section isn't required as long as gentoo repo exists
(path := tmp_path / "file").write_text(
textwrap.dedent(
"""\
[foo]
location = /var/gentoo/repos/foo
[gentoo]
location = /var/gentoo/repos/gentoo"""
)
)
defaults, repos = load_repos_conf(path)
assert defaults["main-repo"] == "gentoo"
assert list(repos.keys()) == ["foo", "gentoo"]
assert not caplog.text | [
9,
665,
235,
1287
] |
def METHOD_NAME():
""" Try to guess the platform string according to the operating system, current environment and python interpreter.
Ganga provides precompiled external packages on a limited set of _default platforms_ as explained in:
https://twiki.cern.ch/twiki/bin/view/ArdaGrid/GangaSupportedPlatforms
This function is set only to detect the well-known platform strings as defined by the LCG SPI project and is not meant to be a
generic platform detection utility. If the platform cannot be guessed a default one is returned. This may or may not work on
other systems. In this case you should resolve the external binary dependencies yourself.
Comments about current implementations:
SLC5 platform is detected using platform module.
If it's not SLC5 then:
We assume that 64 bit python implies the slc4, amd64 system.
We assume that 32 bit python implies the slc4, ia32 system.
We ignore IA64 architecture (Opteron) as not frequently used.
"""
# assume INTEL processors (i386, i686,x64), ignore IA64 architecture
platf4 = {32: 'slc4_ia32_gcc34', 64: 'slc4_amd64_gcc34'}
platf5 = {32: 'i686-slc5-gcc43-opt', 64: 'x86_64-slc5-gcc43-opt'}
# for older python versions use some tricks
import sys
bits = sys.maxsize >> 32
if bits:
arch = 64
else:
arch = 32
platfstring = platf4
import platform
import re
c = re.compile(r'\S+-redhat-(?P<ver>\S+)-\S+')
r = c.match(platform.platform())
if r and r.group('ver').split('.')[0] == '5':
platfstring = platf5
return platfstring[arch] | [
2991,
2773
] |
def METHOD_NAME(mock_packages, capfd):
with capfd.disabled():
out = split(maintainers("--all", "--by-user"))
assert out == [
"adamjstewart:",
"py-extension1,",
"py-extension2",
"user0:",
"maintainers-3",
"user1:",
"maintainers-1,",
"maintainers-3,",
"py-extension1",
"user2:",
"maintainers-1,",
"maintainers-2,",
"maintainers-3,",
"py-extension1",
"user3:",
"maintainers-2,",
"maintainers-3",
]
with capfd.disabled():
out = split(maintainers("--all", "--by-user", "user1", "user2"))
assert out == [
"user1:",
"maintainers-1,",
"maintainers-3,",
"py-extension1",
"user2:",
"maintainers-1,",
"maintainers-2,",
"maintainers-3,",
"py-extension1",
] | [
9,
75,
604,
21
] |
def METHOD_NAME(
inp_shape, kernel_size, padding=(0, 0), dilation=(1, 1), stride=(1, 1) | [
226,
1306,
146
] |
def METHOD_NAME(self, fallback: NullTranslations) -> None: ... | [
238,
1008
] |
def METHOD_NAME(self):
# DONE: load the dictionary if there is any
if os.path.exists(self.three_hop_dict_label_path):
loaded_dict = json.loads(open(self.three_hop_dict_label_path).read())
else:
loaded_dict = {}
# DONE: update the loaded dict with current data
loaded_dict.update(self.three_hop_dict_label)
# DONE: write the updated dict to the file
with open(self.three_hop_dict_label_path, 'w') as f:
f.write(json.dumps(loaded_dict))
f.close()
self.three_hop_dict_label = {} | [
86,
553
] |
def METHOD_NAME(self, category, endpoint):
# wraps mandatory and optional params in one js Object
query_string_params = False
mandatory_params = self.get_mandatory_query_params(endpoint)
if self.has_params_object(endpoint):
if len(mandatory_params) > 0:
query_string_params = f'{{{", ".join(mandatory_params) if len(mandatory_params) > 1 else mandatory_params[0]}, ...params}}'
else:
query_string_params = f'params'
else:
if len(mandatory_params) > 0:
query_string_params = f'{"{{" + ", ".join(mandatory_params) + "}}" if len(mandatory_params) > 1 else mandatory_params[0]}'
endpoint_method_args = ", ".join(s for s in [
f'"{self.get_endpoint_category()}"',
self.get_endpoint_id1() if self.get_endpoint_id1() else "null",
f'"{self.get_endpoint_subcategory()}"' if self.subcategory else "null",
self.get_endpoint_id2() if self.get_endpoint_id2() else "null",
f'"{self.get_endpoint_action()}"' if self.get_endpoint_action() else "null",
"data" if self.has_body() else False,
query_string_params
] if s)
return (f' {self.get_method_doc(endpoint)}'
f' {self.camelCase(self.get_method_name(endpoint, category))}({self.get_method_args(endpoint)}) {{\n'
f' return this._{self.get_endpoint_method(endpoint).lower()}({endpoint_method_args});\n'
f' }}\n') | [
19,
103,
1208
] |
def METHOD_NAME(non_recursive):
if isinstance(non_recursive, ir.Recur) and non_recursive.name == loop_name:
return True
return any([METHOD_NAME(c) for c in non_recursive.children]) | [
1992,
2203,
128
] |
def METHOD_NAME(self):
max_lte_signal = constants.MAX_CELL_SIGNAL[Radio.lte]
self.compare("signalStrength", max_lte_signal, None, "gsm")
self.compare("signalStrength", max_lte_signal, max_lte_signal, "lte") | [
9,
900
] |
def METHOD_NAME():
args = parse_args()
root_path = args.root_path
with mmengine.Timer(print_tmpl='It takes {}s to convert BID annotation'):
files = collect_files(
osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'))
image_infos = collect_annotations(files, nproc=args.nproc)
if args.val_ratio:
image_infos = split_train_val_list(image_infos, args.val_ratio)
splits = ['training', 'val']
else:
image_infos = [image_infos]
splits = ['training']
for i, split in enumerate(splits):
dump_ocr_data(image_infos[i],
osp.join(root_path, 'instances_' + split + '.json'),
'textdet') | [
57
] |
def METHOD_NAME(self):
prog = MathematicalProgram()
b = prog.NewBinaryVariables(4)
prog.AddLinearConstraint(b[0] <= 1 - 0.5 * b[1])
prog.AddLinearConstraint(b[1] <= 1 - 0.5 * b[0])
prog.AddLinearCost(-b[0] - b[1])
prog.SetSolverOption(GurobiSolver.id(), "Presolve", 0)
prog.SetSolverOption(GurobiSolver.id(), "Heuristics", 0.)
prog.SetSolverOption(GurobiSolver.id(), "Cuts", 0)
prog.SetSolverOption(GurobiSolver.id(), "NodeMethod", 2)
b_init = np.array([0, 0., 0., 0.])
prog.SetInitialGuess(b, b_init)
solver = GurobiSolver()
explored_node_count = 0
def node_callback(prog, solver_status_info, x, x_vals):
nonlocal explored_node_count
explored_node_count = solver_status_info.explored_node_count
solver.AddMipNodeCallback(
callback=lambda prog, solver_status_info, x, x_vals: node_callback(
prog, solver_status_info, x, x_vals))
best_objectives = []
def sol_callback(prog, callback_info, objectives):
print(f"explored nodes {callback_info.explored_node_count}")
objectives.append(callback_info.best_objective)
solver.AddMipSolCallback(
callback=lambda prog, callback_info: sol_callback(
prog, callback_info, best_objectives))
result = solver.Solve(prog)
self.assertTrue(result.is_success())
self.assertGreater(explored_node_count, 0)
self.assertGreater(len(best_objectives), 0) | [
9,
1076
] |
f METHOD_NAME(self): | [
9,
8649
] |
def METHOD_NAME(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>', )
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
error_list = []
while True:
bigram = min(
pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except Exception as err:
error_list.append(err)
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word | [
3138
] |
def METHOD_NAME(pr_number: str, labels_to_add: List) -> requests.Response:
url = AIRBYTE_ISSUES_ENDPOINT + f"/{pr_number}/labels"
response = requests.post(url, headers=GITHUB_API_COMMON_HEADERS, json={"labels": labels_to_add})
response.raise_for_status()
logger.info(f"Labels {labels_to_add} added to PR {pr_number}")
return response | [
238,
415,
24,
1933
] |
def METHOD_NAME(r: redis.Redis):
size = 30
all_keys_dict = key_val_dict(size=size)
assert all(r.set(k, v) for k, v in all_keys_dict.items())
assert len(r.keys()) == size
cursor, keys = r.scan()
key_to_remove = next(x for x in all_keys_dict if x not in keys)
assert r.delete(key_to_remove) == 1
assert r.get(key_to_remove) is None
while cursor != 0:
cursor, data = r.scan(cursor=cursor)
keys.extend(data)
assert len(set(keys)) == len(keys)
assert len(keys) == size - 1
assert key_to_remove not in keys | [
9,
793,
34,
794,
59,
795,
796
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, url):
self.url = url
return self | [
19
] |
def METHOD_NAME(self):
self.assertIn(self.s1, self.playlist.list()) | [
9,
245
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(self, arguments):
"""
Setup the environment variables for the process.
"""
utils.lib.zproc_set_env(self._p, utils.ffi.new("zhash_t **", arguments._p)) | [
0,
485
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.