code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_stories(self, userids: Optional[List[int]] = None) -> Iterator[Story]:
"""Get available stories from followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs to be processed in terms of downloading their stories, or None.
"""
if not userids:
data = self.context.graphql_query("d15efd8c0c5b23f0ef71f18bf363c704",
{"only_stories": True})["data"]["user"]
if data is None:
raise BadResponseException('Bad stories reel JSON.')
userids = list(edge["node"]["id"] for edge in data["feed_reels_tray"]["edge_reels_tray_to_reel"]["edges"])
def _userid_chunks():
userids_per_query = 100
for i in range(0, len(userids), userids_per_query):
yield userids[i:i + userids_per_query]
for userid_chunk in _userid_chunks():
stories = self.context.graphql_query("bf41e22b1c4ba4c9f31b844ebb7d9056",
{"reel_ids": userid_chunk, "precomposed_overlay": False})["data"]
yield from (Story(self.context, media) for media in stories['reels_media']) | Get available stories from followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs to be processed in terms of downloading their stories, or None. | Below is the the instruction that describes the task:
### Input:
Get available stories from followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs to be processed in terms of downloading their stories, or None.
### Response:
def get_stories(self, userids: Optional[List[int]] = None) -> Iterator[Story]:
"""Get available stories from followees or all stories of users whose ID are given.
Does not mark stories as seen.
To use this, one needs to be logged in
:param userids: List of user IDs to be processed in terms of downloading their stories, or None.
"""
if not userids:
data = self.context.graphql_query("d15efd8c0c5b23f0ef71f18bf363c704",
{"only_stories": True})["data"]["user"]
if data is None:
raise BadResponseException('Bad stories reel JSON.')
userids = list(edge["node"]["id"] for edge in data["feed_reels_tray"]["edge_reels_tray_to_reel"]["edges"])
def _userid_chunks():
userids_per_query = 100
for i in range(0, len(userids), userids_per_query):
yield userids[i:i + userids_per_query]
for userid_chunk in _userid_chunks():
stories = self.context.graphql_query("bf41e22b1c4ba4c9f31b844ebb7d9056",
{"reel_ids": userid_chunk, "precomposed_overlay": False})["data"]
yield from (Story(self.context, media) for media in stories['reels_media']) |
def raise_exception_if_baseline_file_is_unstaged(filename):
"""We want to make sure that if there are changes to the baseline
file, they will be included in the commit. This way, we can keep
our baselines up-to-date.
:raises: ValueError
"""
try:
files_changed_but_not_staged = subprocess.check_output(
[
'git',
'diff',
'--name-only',
],
).split()
except subprocess.CalledProcessError:
# Since we don't pipe stderr, we get free logging through git.
raise ValueError
if filename.encode() in files_changed_but_not_staged:
log.error((
'Your baseline file ({}) is unstaged.\n'
'`git add {}` to fix this.'
).format(
filename,
filename,
))
raise ValueError | We want to make sure that if there are changes to the baseline
file, they will be included in the commit. This way, we can keep
our baselines up-to-date.
:raises: ValueError | Below is the the instruction that describes the task:
### Input:
We want to make sure that if there are changes to the baseline
file, they will be included in the commit. This way, we can keep
our baselines up-to-date.
:raises: ValueError
### Response:
def raise_exception_if_baseline_file_is_unstaged(filename):
"""We want to make sure that if there are changes to the baseline
file, they will be included in the commit. This way, we can keep
our baselines up-to-date.
:raises: ValueError
"""
try:
files_changed_but_not_staged = subprocess.check_output(
[
'git',
'diff',
'--name-only',
],
).split()
except subprocess.CalledProcessError:
# Since we don't pipe stderr, we get free logging through git.
raise ValueError
if filename.encode() in files_changed_but_not_staged:
log.error((
'Your baseline file ({}) is unstaged.\n'
'`git add {}` to fix this.'
).format(
filename,
filename,
))
raise ValueError |
async def display_columns_and_rows(
self,
database,
table,
description,
rows,
link_column=False,
truncate_cells=0,
):
"Returns columns, rows for specified table - including fancy foreign key treatment"
table_metadata = self.ds.table_metadata(database, table)
sortable_columns = await self.sortable_columns_for_table(database, table, True)
columns = [
{"name": r[0], "sortable": r[0] in sortable_columns} for r in description
]
pks = await self.ds.execute_against_connection_in_thread(
database, lambda conn: detect_primary_keys(conn, table)
)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await self.ds.foreign_keys_for_table(database, table)
}
cell_rows = []
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value": jinja2.Markup(
'<a href="/{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format(
database=database,
table=urllib.parse.quote_plus(table),
flat_pks=str(
jinja2.escape(
path_from_row_pks(row, pks, not pks, False)
)
),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = pm.hook.render_cell(
value=value,
column=column,
table=table,
database=database,
datasette=self.ds,
)
if plugin_display_value is not None:
display_value = plugin_display_value
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = (
LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
)
display_value = jinja2.Markup(link_template.format(
database=database,
table=urllib.parse.quote_plus(other_table),
link_id=urllib.parse.quote_plus(str(value)),
id=str(jinja2.escape(value)),
label=str(jinja2.escape(label)),
))
elif value in ("", None):
display_value = jinja2.Markup(" ")
elif is_url(str(value).strip()):
display_value = jinja2.Markup(
'<a href="{url}">{url}</a>'.format(
url=jinja2.escape(value.strip())
)
)
elif column in table_metadata.get("units", {}) and value != "":
# Interpret units using pint
value = value * ureg(table_metadata["units"][column])
# Pint uses floating point which sometimes introduces errors in the compact
# representation, which we have to round off to avoid ugliness. In the vast
# majority of cases this rounding will be inconsequential. I hope.
value = round(value.to_compact(), 6)
display_value = jinja2.Markup(
"{:~P}".format(value).replace(" ", " ")
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + u"\u2026"
cells.append({"column": column, "value": display_value})
cell_rows.append(cells)
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
columns = [
{"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1}
] + columns
return columns, cell_rows | Returns columns, rows for specified table - including fancy foreign key treatment | Below is the the instruction that describes the task:
### Input:
Returns columns, rows for specified table - including fancy foreign key treatment
### Response:
async def display_columns_and_rows(
self,
database,
table,
description,
rows,
link_column=False,
truncate_cells=0,
):
"Returns columns, rows for specified table - including fancy foreign key treatment"
table_metadata = self.ds.table_metadata(database, table)
sortable_columns = await self.sortable_columns_for_table(database, table, True)
columns = [
{"name": r[0], "sortable": r[0] in sortable_columns} for r in description
]
pks = await self.ds.execute_against_connection_in_thread(
database, lambda conn: detect_primary_keys(conn, table)
)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await self.ds.foreign_keys_for_table(database, table)
}
cell_rows = []
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value": jinja2.Markup(
'<a href="/{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format(
database=database,
table=urllib.parse.quote_plus(table),
flat_pks=str(
jinja2.escape(
path_from_row_pks(row, pks, not pks, False)
)
),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = pm.hook.render_cell(
value=value,
column=column,
table=table,
database=database,
datasette=self.ds,
)
if plugin_display_value is not None:
display_value = plugin_display_value
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = (
LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
)
display_value = jinja2.Markup(link_template.format(
database=database,
table=urllib.parse.quote_plus(other_table),
link_id=urllib.parse.quote_plus(str(value)),
id=str(jinja2.escape(value)),
label=str(jinja2.escape(label)),
))
elif value in ("", None):
display_value = jinja2.Markup(" ")
elif is_url(str(value).strip()):
display_value = jinja2.Markup(
'<a href="{url}">{url}</a>'.format(
url=jinja2.escape(value.strip())
)
)
elif column in table_metadata.get("units", {}) and value != "":
# Interpret units using pint
value = value * ureg(table_metadata["units"][column])
# Pint uses floating point which sometimes introduces errors in the compact
# representation, which we have to round off to avoid ugliness. In the vast
# majority of cases this rounding will be inconsequential. I hope.
value = round(value.to_compact(), 6)
display_value = jinja2.Markup(
"{:~P}".format(value).replace(" ", " ")
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + u"\u2026"
cells.append({"column": column, "value": display_value})
cell_rows.append(cells)
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
columns = [
{"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1}
] + columns
return columns, cell_rows |
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value) | Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object. | Below is the the instruction that describes the task:
### Input:
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
### Response:
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value) |
def with_mfa(self, mfa_token):
"""Set the MFA token for the next request.
`mfa_token`s are only good for one request. Use this method to chain into
the protected action you want to perform.
Note: Only useful for Application authentication.
Usage:
account.with_mfa(application.totp.now()).pay(...)
Args:
mfa_token (str/function, optional): TOTP token for the Application
OR a callable/function which will generate such a token when called.
Returns:
self
"""
if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2
self.context.mfa_token = mfa_token.__call__()
else:
self.context.mfa_token = mfa_token
return self | Set the MFA token for the next request.
`mfa_token`s are only good for one request. Use this method to chain into
the protected action you want to perform.
Note: Only useful for Application authentication.
Usage:
account.with_mfa(application.totp.now()).pay(...)
Args:
mfa_token (str/function, optional): TOTP token for the Application
OR a callable/function which will generate such a token when called.
Returns:
self | Below is the the instruction that describes the task:
### Input:
Set the MFA token for the next request.
`mfa_token`s are only good for one request. Use this method to chain into
the protected action you want to perform.
Note: Only useful for Application authentication.
Usage:
account.with_mfa(application.totp.now()).pay(...)
Args:
mfa_token (str/function, optional): TOTP token for the Application
OR a callable/function which will generate such a token when called.
Returns:
self
### Response:
def with_mfa(self, mfa_token):
"""Set the MFA token for the next request.
`mfa_token`s are only good for one request. Use this method to chain into
the protected action you want to perform.
Note: Only useful for Application authentication.
Usage:
account.with_mfa(application.totp.now()).pay(...)
Args:
mfa_token (str/function, optional): TOTP token for the Application
OR a callable/function which will generate such a token when called.
Returns:
self
"""
if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2
self.context.mfa_token = mfa_token.__call__()
else:
self.context.mfa_token = mfa_token
return self |
def get_best_splitting_attr(self):
"""
Returns the name of the attribute with the highest gain.
"""
best = (-1e999999, None)
for attr in self.attributes:
best = max(best, (self.get_gain(attr), attr))
best_gain, best_attr = best
return best_attr | Returns the name of the attribute with the highest gain. | Below is the the instruction that describes the task:
### Input:
Returns the name of the attribute with the highest gain.
### Response:
def get_best_splitting_attr(self):
"""
Returns the name of the attribute with the highest gain.
"""
best = (-1e999999, None)
for attr in self.attributes:
best = max(best, (self.get_gain(attr), attr))
best_gain, best_attr = best
return best_attr |
def decrease_writes_in_units(
current_provisioning, units, min_provisioned_writes, log_tag):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
"""
updated_provisioning = int(current_provisioning) - int(units)
min_provisioned_writes = __get_min_writes(
current_provisioning,
min_provisioned_writes,
log_tag)
if updated_provisioning < min_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes min limit: {1:d}'.format(
log_tag,
int(min_provisioned_writes)))
return min_provisioned_writes
logger.debug(
'{0} - Write provisioning will be decreased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log | Below is the the instruction that describes the task:
### Input:
Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
### Response:
def decrease_writes_in_units(
current_provisioning, units, min_provisioned_writes, log_tag):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
"""
updated_provisioning = int(current_provisioning) - int(units)
min_provisioned_writes = __get_min_writes(
current_provisioning,
min_provisioned_writes,
log_tag)
if updated_provisioning < min_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes min limit: {1:d}'.format(
log_tag,
int(min_provisioned_writes)))
return min_provisioned_writes
logger.debug(
'{0} - Write provisioning will be decreased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning |
def safe_call(cls, method, *args):
""" Call a remote api method but don't raise if an error occurred."""
return cls.call(method, *args, safe=True) | Call a remote api method but don't raise if an error occurred. | Below is the the instruction that describes the task:
### Input:
Call a remote api method but don't raise if an error occurred.
### Response:
def safe_call(cls, method, *args):
""" Call a remote api method but don't raise if an error occurred."""
return cls.call(method, *args, safe=True) |
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value | Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.' | Below is the the instruction that describes the task:
### Input:
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
### Response:
def ap_state(value, failure_string=None):
"""
Converts a state's name, postal abbreviation or FIPS to A.P. style.
Example usage:
>> ap_state("California")
'Calif.'
"""
try:
return statestyle.get(value).ap
except:
if failure_string:
return failure_string
else:
return value |
def _get_path(entity_id):
'''Get the entity_id as a string if it is a Reference.
@param entity_id The ID either a reference or a string of the entity
to get.
@return entity_id as a string
'''
try:
path = entity_id.path()
except AttributeError:
path = entity_id
if path.startswith('cs:'):
path = path[3:]
return path | Get the entity_id as a string if it is a Reference.
@param entity_id The ID either a reference or a string of the entity
to get.
@return entity_id as a string | Below is the the instruction that describes the task:
### Input:
Get the entity_id as a string if it is a Reference.
@param entity_id The ID either a reference or a string of the entity
to get.
@return entity_id as a string
### Response:
def _get_path(entity_id):
'''Get the entity_id as a string if it is a Reference.
@param entity_id The ID either a reference or a string of the entity
to get.
@return entity_id as a string
'''
try:
path = entity_id.path()
except AttributeError:
path = entity_id
if path.startswith('cs:'):
path = path[3:]
return path |
def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ | Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply. | Below is the the instruction that describes the task:
### Input:
Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
### Response:
def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ |
def observe(self, seconds=None):
""" Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise.
"""
# Check if observer is already running
if self._observer.isRunning:
return False # Could not start
# Set timeout
if seconds is not None:
timeout = time.time() + seconds
else:
timeout = None
# Start observe loop
while (not self._observer.isStopped) and (seconds is None or time.time() < timeout):
# Check registered events
self._observer.check_events()
# Sleep for scan rate
time.sleep(1/self.getObserveScanRate())
return True | Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise. | Below is the the instruction that describes the task:
### Input:
Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise.
### Response:
def observe(self, seconds=None):
""" Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise.
"""
# Check if observer is already running
if self._observer.isRunning:
return False # Could not start
# Set timeout
if seconds is not None:
timeout = time.time() + seconds
else:
timeout = None
# Start observe loop
while (not self._observer.isStopped) and (seconds is None or time.time() < timeout):
# Check registered events
self._observer.check_events()
# Sleep for scan rate
time.sleep(1/self.getObserveScanRate())
return True |
def infer_named_tuple(node, context=None):
"""Specific inference function for namedtuple Call node"""
tuple_base_name = nodes.Name(name="tuple", parent=node.root())
class_node, name, attributes = infer_func_form(
node, tuple_base_name, context=context
)
call_site = arguments.CallSite.from_call(node)
func = next(extract_node("import collections; collections.namedtuple").infer())
try:
rename = next(call_site.infer_argument(func, "rename", context)).bool_value()
except InferenceError:
rename = False
if rename:
attributes = _get_renamed_namedtuple_attributes(attributes)
replace_args = ", ".join("{arg}=None".format(arg=arg) for arg in attributes)
field_def = (
" {name} = property(lambda self: self[{index:d}], "
"doc='Alias for field number {index:d}')"
)
field_defs = "\n".join(
field_def.format(name=name, index=index)
for index, name in enumerate(attributes)
)
fake = AstroidBuilder(MANAGER).string_build(
"""
class %(name)s(tuple):
__slots__ = ()
_fields = %(fields)r
def _asdict(self):
return self.__dict__
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
return new(cls, iterable)
def _replace(self, %(replace_args)s):
return self
def __getnewargs__(self):
return tuple(self)
%(field_defs)s
"""
% {
"name": name,
"fields": attributes,
"field_defs": field_defs,
"replace_args": replace_args,
}
)
class_node.locals["_asdict"] = fake.body[0].locals["_asdict"]
class_node.locals["_make"] = fake.body[0].locals["_make"]
class_node.locals["_replace"] = fake.body[0].locals["_replace"]
class_node.locals["_fields"] = fake.body[0].locals["_fields"]
for attr in attributes:
class_node.locals[attr] = fake.body[0].locals[attr]
# we use UseInferenceDefault, we can't be a generator so return an iterator
return iter([class_node]) | Specific inference function for namedtuple Call node | Below is the the instruction that describes the task:
### Input:
Specific inference function for namedtuple Call node
### Response:
def infer_named_tuple(node, context=None):
"""Specific inference function for namedtuple Call node"""
tuple_base_name = nodes.Name(name="tuple", parent=node.root())
class_node, name, attributes = infer_func_form(
node, tuple_base_name, context=context
)
call_site = arguments.CallSite.from_call(node)
func = next(extract_node("import collections; collections.namedtuple").infer())
try:
rename = next(call_site.infer_argument(func, "rename", context)).bool_value()
except InferenceError:
rename = False
if rename:
attributes = _get_renamed_namedtuple_attributes(attributes)
replace_args = ", ".join("{arg}=None".format(arg=arg) for arg in attributes)
field_def = (
" {name} = property(lambda self: self[{index:d}], "
"doc='Alias for field number {index:d}')"
)
field_defs = "\n".join(
field_def.format(name=name, index=index)
for index, name in enumerate(attributes)
)
fake = AstroidBuilder(MANAGER).string_build(
"""
class %(name)s(tuple):
__slots__ = ()
_fields = %(fields)r
def _asdict(self):
return self.__dict__
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
return new(cls, iterable)
def _replace(self, %(replace_args)s):
return self
def __getnewargs__(self):
return tuple(self)
%(field_defs)s
"""
% {
"name": name,
"fields": attributes,
"field_defs": field_defs,
"replace_args": replace_args,
}
)
class_node.locals["_asdict"] = fake.body[0].locals["_asdict"]
class_node.locals["_make"] = fake.body[0].locals["_make"]
class_node.locals["_replace"] = fake.body[0].locals["_replace"]
class_node.locals["_fields"] = fake.body[0].locals["_fields"]
for attr in attributes:
class_node.locals[attr] = fake.body[0].locals[attr]
# we use UseInferenceDefault, we can't be a generator so return an iterator
return iter([class_node]) |
def all_devices(cl_device_type=None, platform=None):
"""Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list | Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments. | Below is the the instruction that describes the task:
### Input:
Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
### Response:
def all_devices(cl_device_type=None, platform=None):
"""Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list |
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself | Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place. | Below is the the instruction that describes the task:
### Input:
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
### Response:
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself |
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = np.array(fg_vals)
neg = np.array(bg_vals)
s = scoreatpercentile(neg, 100 - fpr * 100)
neg_matches = float(len(neg[neg >= s]))
if neg_matches == 0:
return float("inf")
return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos)) | Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR. | Below is the the instruction that describes the task:
### Input:
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
### Response:
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = np.array(fg_vals)
neg = np.array(bg_vals)
s = scoreatpercentile(neg, 100 - fpr * 100)
neg_matches = float(len(neg[neg >= s]))
if neg_matches == 0:
return float("inf")
return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos)) |
def main(params=None):
"""
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
A list of i3visio entities.
"""
if params == None:
parser = getParser()
args = parser.parse_args(params)
else:
args = params
results = []
print(general.title(banner.text))
sayingHello = """
Searchfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{}>.
""".format(general.LICENSE_URL)
print(general.info(sayingHello))
if args.license:
general.showLicense()
else:
# Showing the execution time...
startTime= dt.datetime.now()
print(str(startTime) + "\tStarting search in different platform(s)... Relax!\n")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Performing the search
try:
results = performSearch(platformNames=args.platforms, queries=args.queries, process=args.process, excludePlatformNames=args.exclude)
except KeyboardInterrupt:
print(general.error("\n[!] Process manually stopped by the user. Workers terminated without providing any result.\n"))
results = []
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists (args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
fileHeader = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.exportUsufy(results, ext, fileHeader)
# Printing the results if requested
now = dt.datetime.now()
print("\n{}\tResults obtained:\n".format(str(now)))
print(general.success(general.usufyToTextExport(results)))
if args.web_browser:
general.openResultsInBrowser(results)
now = dt.datetime.now()
print("\n{date}\tYou can find all the information collected in the following files:".format(date=str(now)))
for ext in args.extension:
# Showing the output files
print("\t" + general.emphasis(fileHeader + "." + ext))
# Showing the execution time...
endTime= dt.datetime.now()
print("\n{date}\tFinishing execution...\n".format(date=str(endTime)))
print("Total time used:\t" + general.emphasis(str(endTime-startTime)))
print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(args.platforms))) +" seconds\n")
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return results | Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
A list of i3visio entities. | Below is the the instruction that describes the task:
### Input:
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
A list of i3visio entities.
### Response:
def main(params=None):
"""
Main function to launch usufy.
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `getParser()`.
Args:
-----
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
--------
A list of i3visio entities.
"""
if params == None:
parser = getParser()
args = parser.parse_args(params)
else:
args = params
results = []
print(general.title(banner.text))
sayingHello = """
Searchfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{}>.
""".format(general.LICENSE_URL)
print(general.info(sayingHello))
if args.license:
general.showLicense()
else:
# Showing the execution time...
startTime= dt.datetime.now()
print(str(startTime) + "\tStarting search in different platform(s)... Relax!\n")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Performing the search
try:
results = performSearch(platformNames=args.platforms, queries=args.queries, process=args.process, excludePlatformNames=args.exclude)
except KeyboardInterrupt:
print(general.error("\n[!] Process manually stopped by the user. Workers terminated without providing any result.\n"))
results = []
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists (args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
fileHeader = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.exportUsufy(results, ext, fileHeader)
# Printing the results if requested
now = dt.datetime.now()
print("\n{}\tResults obtained:\n".format(str(now)))
print(general.success(general.usufyToTextExport(results)))
if args.web_browser:
general.openResultsInBrowser(results)
now = dt.datetime.now()
print("\n{date}\tYou can find all the information collected in the following files:".format(date=str(now)))
for ext in args.extension:
# Showing the output files
print("\t" + general.emphasis(fileHeader + "." + ext))
# Showing the execution time...
endTime= dt.datetime.now()
print("\n{date}\tFinishing execution...\n".format(date=str(endTime)))
print("Total time used:\t" + general.emphasis(str(endTime-startTime)))
print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(args.platforms))) +" seconds\n")
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return results |
def _find_ancestor(self, task_spec):
"""
Returns the ancestor that has the given task spec assigned.
If no such ancestor was found, the root task is returned.
:type task_spec: TaskSpec
:param task_spec: The wanted task spec.
:rtype: Task
:returns: The ancestor.
"""
if self.parent is None:
return self
if self.parent.task_spec == task_spec:
return self.parent
return self.parent._find_ancestor(task_spec) | Returns the ancestor that has the given task spec assigned.
If no such ancestor was found, the root task is returned.
:type task_spec: TaskSpec
:param task_spec: The wanted task spec.
:rtype: Task
:returns: The ancestor. | Below is the the instruction that describes the task:
### Input:
Returns the ancestor that has the given task spec assigned.
If no such ancestor was found, the root task is returned.
:type task_spec: TaskSpec
:param task_spec: The wanted task spec.
:rtype: Task
:returns: The ancestor.
### Response:
def _find_ancestor(self, task_spec):
"""
Returns the ancestor that has the given task spec assigned.
If no such ancestor was found, the root task is returned.
:type task_spec: TaskSpec
:param task_spec: The wanted task spec.
:rtype: Task
:returns: The ancestor.
"""
if self.parent is None:
return self
if self.parent.task_spec == task_spec:
return self.parent
return self.parent._find_ancestor(task_spec) |
def det_curve(y_true, scores, distances=False):
"""DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate
"""
if distances:
scores = -scores
# compute false positive and false negative rates
# (a.k.a. false alarm and false rejection rates)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
y_true, scores, pos_label=True)
fnr = 1 - tpr
if distances:
thresholds = -thresholds
# estimate equal error rate
eer_index = np.where(fpr > fnr)[0][0]
eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +
fnr[eer_index-1] + fnr[eer_index])
return fpr, fnr, thresholds, eer | DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate | Below is the the instruction that describes the task:
### Input:
DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate
### Response:
def det_curve(y_true, scores, distances=False):
"""DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate
"""
if distances:
scores = -scores
# compute false positive and false negative rates
# (a.k.a. false alarm and false rejection rates)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
y_true, scores, pos_label=True)
fnr = 1 - tpr
if distances:
thresholds = -thresholds
# estimate equal error rate
eer_index = np.where(fpr > fnr)[0][0]
eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +
fnr[eer_index-1] + fnr[eer_index])
return fpr, fnr, thresholds, eer |
def slice(self, start, stop=None, axis=0):
"""Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop
(both inclusive). Returns d dimensional histogram."""
if stop is None:
# Make a 1=bin slice
stop = start
axis = self.get_axis_number(axis)
start_bin = max(0, self.get_axis_bin_index(start, axis))
stop_bin = min(len(self.bin_centers(axis)) - 1, # TODO: test off by one!
self.get_axis_bin_index(stop, axis))
new_bin_edges = self.bin_edges.copy()
new_bin_edges[axis] = new_bin_edges[axis][start_bin:stop_bin + 2] # TODO: Test off by one here!
return Histdd.from_histogram(np.take(self.histogram, np.arange(start_bin, stop_bin + 1), axis=axis),
bin_edges=new_bin_edges, axis_names=self.axis_names) | Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop
(both inclusive). Returns d dimensional histogram. | Below is the the instruction that describes the task:
### Input:
Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop
(both inclusive). Returns d dimensional histogram.
### Response:
def slice(self, start, stop=None, axis=0):
"""Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop
(both inclusive). Returns d dimensional histogram."""
if stop is None:
# Make a 1=bin slice
stop = start
axis = self.get_axis_number(axis)
start_bin = max(0, self.get_axis_bin_index(start, axis))
stop_bin = min(len(self.bin_centers(axis)) - 1, # TODO: test off by one!
self.get_axis_bin_index(stop, axis))
new_bin_edges = self.bin_edges.copy()
new_bin_edges[axis] = new_bin_edges[axis][start_bin:stop_bin + 2] # TODO: Test off by one here!
return Histdd.from_histogram(np.take(self.histogram, np.arange(start_bin, stop_bin + 1), axis=axis),
bin_edges=new_bin_edges, axis_names=self.axis_names) |
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
"""
try:
# checking if times have isoformat() method (datetimes have)
t1 = t1.isoformat()
t2 = t2.isoformat()
except AttributeError:
# if not, should already be a string, so do nothing.
pass
myquery = self._get_time_query(t1, t2)
if target is not None:
myquery["target"] = target
self.create_files_request(myquery, fmt="json")
self.unpack_json_response() | Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids. | Below is the the instruction that describes the task:
### Input:
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
### Response:
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
"""
try:
# checking if times have isoformat() method (datetimes have)
t1 = t1.isoformat()
t2 = t2.isoformat()
except AttributeError:
# if not, should already be a string, so do nothing.
pass
myquery = self._get_time_query(t1, t2)
if target is not None:
myquery["target"] = target
self.create_files_request(myquery, fmt="json")
self.unpack_json_response() |
def filter_queryset(self, request, queryset, view):
"""Apply the relevant behaviors to the view queryset."""
start_value = self.get_start(request)
if start_value:
queryset = self.apply_published_filter(queryset, "after", start_value)
end_value = self.get_end(request)
if end_value:
# Forces the end_value to be the last second of the date provided in the query.
# Necessary currently as our Published filter for es only applies to gte & lte.
queryset = self.apply_published_filter(queryset, "before", end_value)
return queryset | Apply the relevant behaviors to the view queryset. | Below is the the instruction that describes the task:
### Input:
Apply the relevant behaviors to the view queryset.
### Response:
def filter_queryset(self, request, queryset, view):
"""Apply the relevant behaviors to the view queryset."""
start_value = self.get_start(request)
if start_value:
queryset = self.apply_published_filter(queryset, "after", start_value)
end_value = self.get_end(request)
if end_value:
# Forces the end_value to be the last second of the date provided in the query.
# Necessary currently as our Published filter for es only applies to gte & lte.
queryset = self.apply_published_filter(queryset, "before", end_value)
return queryset |
def read(self, nrml_file, validate=False,
simple_fault_spacing=1.0, complex_mesh_spacing=5.0,
mfd_spacing=0.1):
"""
Build the source model from nrml format
"""
self.source_file = nrml_file
if validate:
converter = SourceConverter(1.0, simple_fault_spacing,
complex_mesh_spacing,
mfd_spacing,
10.0)
converter.fname = nrml_file
root = nrml.read(nrml_file)
if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
sg_nodes = [root.sourceModel.nodes]
else: # NRML 0.5
sg_nodes = root.sourceModel.nodes
sources = []
for sg_node in sg_nodes:
for no, src_node in enumerate(sg_node, 1):
if validate:
print("Validating Source %s" % src_node.attrib["id"])
converter.convert_node(src_node)
sources.append(src_node)
return SourceModel(sources) | Build the source model from nrml format | Below is the the instruction that describes the task:
### Input:
Build the source model from nrml format
### Response:
def read(self, nrml_file, validate=False,
simple_fault_spacing=1.0, complex_mesh_spacing=5.0,
mfd_spacing=0.1):
"""
Build the source model from nrml format
"""
self.source_file = nrml_file
if validate:
converter = SourceConverter(1.0, simple_fault_spacing,
complex_mesh_spacing,
mfd_spacing,
10.0)
converter.fname = nrml_file
root = nrml.read(nrml_file)
if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
sg_nodes = [root.sourceModel.nodes]
else: # NRML 0.5
sg_nodes = root.sourceModel.nodes
sources = []
for sg_node in sg_nodes:
for no, src_node in enumerate(sg_node, 1):
if validate:
print("Validating Source %s" % src_node.attrib["id"])
converter.convert_node(src_node)
sources.append(src_node)
return SourceModel(sources) |
def local_temp_dir():
"""
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode
"""
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path, ignore_errors=True) | Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode
### Response:
def local_temp_dir():
"""
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode
"""
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path, ignore_errors=True) |
def _incr_exceptions(self, conn):
"""Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).exceptions += 1 | Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection | Below is the the instruction that describes the task:
### Input:
Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
### Response:
def _incr_exceptions(self, conn):
"""Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).exceptions += 1 |
def QRatio(s1, s2, force_ascii=True, full_process=True):
"""
Quick ratio comparison between two strings.
Runs full_process from utils on both strings
Short circuits if either of the strings is empty after processing.
:param s1:
:param s2:
:param force_ascii: Allow only ASCII characters (Default: True)
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
:return: similarity ratio
"""
if full_process:
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
else:
p1 = s1
p2 = s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2) | Quick ratio comparison between two strings.
Runs full_process from utils on both strings
Short circuits if either of the strings is empty after processing.
:param s1:
:param s2:
:param force_ascii: Allow only ASCII characters (Default: True)
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
:return: similarity ratio | Below is the the instruction that describes the task:
### Input:
Quick ratio comparison between two strings.
Runs full_process from utils on both strings
Short circuits if either of the strings is empty after processing.
:param s1:
:param s2:
:param force_ascii: Allow only ASCII characters (Default: True)
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
:return: similarity ratio
### Response:
def QRatio(s1, s2, force_ascii=True, full_process=True):
"""
Quick ratio comparison between two strings.
Runs full_process from utils on both strings
Short circuits if either of the strings is empty after processing.
:param s1:
:param s2:
:param force_ascii: Allow only ASCII characters (Default: True)
:full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)
:return: similarity ratio
"""
if full_process:
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
else:
p1 = s1
p2 = s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2) |
def _parse_handler_result(self, result):
"""Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
"""
if isinstance(result, (list, tuple)):
payload = result[0]
list_result = list(result)
else:
payload = result
list_result = [""]
return payload, list_result | Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer. | Below is the the instruction that describes the task:
### Input:
Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
### Response:
def _parse_handler_result(self, result):
"""Parses the item(s) returned by your handler implementation.
Handlers may return a single item (payload), or a tuple that gets
passed to the Response class __init__ method of your HTTP layer.
_parse_handler_result separates the payload from the rest the tuple,
as well as providing the tuple so that it can be re-composed after
the payload has been run through the `_returns` Resource's renderer.
"""
if isinstance(result, (list, tuple)):
payload = result[0]
list_result = list(result)
else:
payload = result
list_result = [""]
return payload, list_result |
def merge_keywords(x,y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z | Given two dicts, merge them into a new dict as a shallow copy. | Below is the the instruction that describes the task:
### Input:
Given two dicts, merge them into a new dict as a shallow copy.
### Response:
def merge_keywords(x,y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z |
def get_function_from_config(item):
"""
Import the function to get profile by handle.
"""
config = get_configuration()
func_path = config.get(item)
module_path, func_name = func_path.rsplit(".", 1)
module = importlib.import_module(module_path)
func = getattr(module, func_name)
return func | Import the function to get profile by handle. | Below is the the instruction that describes the task:
### Input:
Import the function to get profile by handle.
### Response:
def get_function_from_config(item):
"""
Import the function to get profile by handle.
"""
config = get_configuration()
func_path = config.get(item)
module_path, func_name = func_path.rsplit(".", 1)
module = importlib.import_module(module_path)
func = getattr(module, func_name)
return func |
def split_bezier(bpoints, t):
"""Uses deCasteljau's recursion to split the Bezier curve at t into two
Bezier curves of the same order."""
def split_bezier_recursion(bpoints_left_, bpoints_right_, bpoints_, t_):
if len(bpoints_) == 1:
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[0])
else:
new_points = [None]*(len(bpoints_) - 1)
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[-1])
for i in range(len(bpoints_) - 1):
new_points[i] = (1 - t_)*bpoints_[i] + t_*bpoints_[i + 1]
bpoints_left_, bpoints_right_ = split_bezier_recursion(
bpoints_left_, bpoints_right_, new_points, t_)
return bpoints_left_, bpoints_right_
bpoints_left = []
bpoints_right = []
bpoints_left, bpoints_right = \
split_bezier_recursion(bpoints_left, bpoints_right, bpoints, t)
bpoints_right.reverse()
return bpoints_left, bpoints_right | Uses deCasteljau's recursion to split the Bezier curve at t into two
Bezier curves of the same order. | Below is the the instruction that describes the task:
### Input:
Uses deCasteljau's recursion to split the Bezier curve at t into two
Bezier curves of the same order.
### Response:
def split_bezier(bpoints, t):
"""Uses deCasteljau's recursion to split the Bezier curve at t into two
Bezier curves of the same order."""
def split_bezier_recursion(bpoints_left_, bpoints_right_, bpoints_, t_):
if len(bpoints_) == 1:
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[0])
else:
new_points = [None]*(len(bpoints_) - 1)
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[-1])
for i in range(len(bpoints_) - 1):
new_points[i] = (1 - t_)*bpoints_[i] + t_*bpoints_[i + 1]
bpoints_left_, bpoints_right_ = split_bezier_recursion(
bpoints_left_, bpoints_right_, new_points, t_)
return bpoints_left_, bpoints_right_
bpoints_left = []
bpoints_right = []
bpoints_left, bpoints_right = \
split_bezier_recursion(bpoints_left, bpoints_right, bpoints, t)
bpoints_right.reverse()
return bpoints_left, bpoints_right |
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter)) | Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified. | Below is the the instruction that describes the task:
### Input:
Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
### Response:
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter)) |
def _read_opt_type(self, kind):
"""Read option type field.
Positional arguments:
* kind -- int, option kind value
Returns:
* dict -- extracted IPv6_Opts option
Structure of option type field [RFC 791]:
Octets Bits Name Descriptions
0 0 ipv6_opts.opt.type.value Option Number
0 0 ipv6_opts.opt.type.action Action (00-11)
0 2 ipv6_opts.opt.type.change Change Flag (0/1)
"""
bin_ = bin(kind)[2:].zfill(8)
type_ = dict(
value=kind,
action=_IPv6_Opts_ACT.get(bin_[:2]),
change=True if int(bin_[2], base=2) else False,
)
return type_ | Read option type field.
Positional arguments:
* kind -- int, option kind value
Returns:
* dict -- extracted IPv6_Opts option
Structure of option type field [RFC 791]:
Octets Bits Name Descriptions
0 0 ipv6_opts.opt.type.value Option Number
0 0 ipv6_opts.opt.type.action Action (00-11)
0 2 ipv6_opts.opt.type.change Change Flag (0/1) | Below is the the instruction that describes the task:
### Input:
Read option type field.
Positional arguments:
* kind -- int, option kind value
Returns:
* dict -- extracted IPv6_Opts option
Structure of option type field [RFC 791]:
Octets Bits Name Descriptions
0 0 ipv6_opts.opt.type.value Option Number
0 0 ipv6_opts.opt.type.action Action (00-11)
0 2 ipv6_opts.opt.type.change Change Flag (0/1)
### Response:
def _read_opt_type(self, kind):
"""Read option type field.
Positional arguments:
* kind -- int, option kind value
Returns:
* dict -- extracted IPv6_Opts option
Structure of option type field [RFC 791]:
Octets Bits Name Descriptions
0 0 ipv6_opts.opt.type.value Option Number
0 0 ipv6_opts.opt.type.action Action (00-11)
0 2 ipv6_opts.opt.type.change Change Flag (0/1)
"""
bin_ = bin(kind)[2:].zfill(8)
type_ = dict(
value=kind,
action=_IPv6_Opts_ACT.get(bin_[:2]),
change=True if int(bin_[2], base=2) else False,
)
return type_ |
def find_types_removed_from_unions(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_removed_from_union = []
for old_type_name, old_type in old_type_map.items():
new_type = new_type_map.get(old_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_new_union = {type_.name for type_ in new_type.types}
for type_ in old_type.types:
type_name = type_.name
if type_name not in type_names_in_new_union:
types_removed_from_union.append(
BreakingChange(
BreakingChangeType.TYPE_REMOVED_FROM_UNION,
f"{type_name} was removed from union type {old_type_name}.",
)
)
return types_removed_from_union | Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type. | Below is the the instruction that describes the task:
### Input:
Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
### Response:
def find_types_removed_from_unions(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_removed_from_union = []
for old_type_name, old_type in old_type_map.items():
new_type = new_type_map.get(old_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_new_union = {type_.name for type_ in new_type.types}
for type_ in old_type.types:
type_name = type_.name
if type_name not in type_names_in_new_union:
types_removed_from_union.append(
BreakingChange(
BreakingChangeType.TYPE_REMOVED_FROM_UNION,
f"{type_name} was removed from union type {old_type_name}.",
)
)
return types_removed_from_union |
def strip_possessives(self, word):
"""
Get rid of apostrophes indicating possession.
"""
if word.endswith("'s'"):
return word[:-3]
elif word.endswith("'s"):
return word[:-2]
elif word.endswith("'"):
return word[:-1]
else:
return word | Get rid of apostrophes indicating possession. | Below is the the instruction that describes the task:
### Input:
Get rid of apostrophes indicating possession.
### Response:
def strip_possessives(self, word):
"""
Get rid of apostrophes indicating possession.
"""
if word.endswith("'s'"):
return word[:-3]
elif word.endswith("'s"):
return word[:-2]
elif word.endswith("'"):
return word[:-1]
else:
return word |
def list_snapshots(self):
"""
Returns a list of all snapshots of this volume.
"""
return [snap for snap in self.manager.list_snapshots()
if snap.volume_id == self.id] | Returns a list of all snapshots of this volume. | Below is the the instruction that describes the task:
### Input:
Returns a list of all snapshots of this volume.
### Response:
def list_snapshots(self):
"""
Returns a list of all snapshots of this volume.
"""
return [snap for snap in self.manager.list_snapshots()
if snap.volume_id == self.id] |
def matches(self, pattern, flags=0):
"""
Ensures :attr:`subject` matches regular expression *pattern*.
"""
if not re.match(pattern, self._subject, flags):
raise self._error_factory(_format("Expected {} to match {}", self._subject, pattern))
return ChainInspector(self._subject) | Ensures :attr:`subject` matches regular expression *pattern*. | Below is the the instruction that describes the task:
### Input:
Ensures :attr:`subject` matches regular expression *pattern*.
### Response:
def matches(self, pattern, flags=0):
"""
Ensures :attr:`subject` matches regular expression *pattern*.
"""
if not re.match(pattern, self._subject, flags):
raise self._error_factory(_format("Expected {} to match {}", self._subject, pattern))
return ChainInspector(self._subject) |
def describe_config_variable(self, config_id):
"""Describe the config variable by its id."""
config = self._config_variables.get(config_id)
if config is None:
return [Error.INVALID_ARRAY_KEY, 0, 0, 0, 0]
packed_size = config.total_size
packed_size |= int(config.variable) << 15
return [0, 0, 0, config_id, packed_size] | Describe the config variable by its id. | Below is the the instruction that describes the task:
### Input:
Describe the config variable by its id.
### Response:
def describe_config_variable(self, config_id):
"""Describe the config variable by its id."""
config = self._config_variables.get(config_id)
if config is None:
return [Error.INVALID_ARRAY_KEY, 0, 0, 0, 0]
packed_size = config.total_size
packed_size |= int(config.variable) << 15
return [0, 0, 0, config_id, packed_size] |
def loadPng(varNumVol, tplPngSize, strPathPng):
"""Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
"""
print('------Load PNGs')
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png')
# Load png files. The png data will be saved in a numpy array of the
# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The
# sp.misc.imread function actually contains three values per pixel (RGB),
# but since the stimuli are black-and-white, any one of these is sufficient
# and we discard the others.
aryPngData = np.zeros((tplPngSize[0],
tplPngSize[1],
varNumVol))
for idx01 in range(0, varNumVol):
aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01]))
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 0).astype(int)
return aryPngData | Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data. | Below is the the instruction that describes the task:
### Input:
Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
### Response:
def loadPng(varNumVol, tplPngSize, strPathPng):
"""Load PNG files.
Parameters
----------
varNumVol : float
Number of volumes, i.e. number of time points in all runs.
tplPngSize : tuple
Shape of the stimulus image (i.e. png).
strPathPng: str
Path to the folder cointaining the png files.
Returns
-------
aryPngData : 2d numpy array, shape [png_x, png_y, n_vols]
Stack of stimulus data.
"""
print('------Load PNGs')
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png')
# Load png files. The png data will be saved in a numpy array of the
# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The
# sp.misc.imread function actually contains three values per pixel (RGB),
# but since the stimuli are black-and-white, any one of these is sufficient
# and we discard the others.
aryPngData = np.zeros((tplPngSize[0],
tplPngSize[1],
varNumVol))
for idx01 in range(0, varNumVol):
aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01]))
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 0).astype(int)
return aryPngData |
def print_output(self, per_identity_data: 'RDD') -> None:
"""
Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call.
"""
if not self._window_bts:
data = per_identity_data.flatMap(
lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()])
else:
# Convert to a DataFrame first so that the data can be saved as a CSV
data = per_identity_data.map(
lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder))
for row in data.collect():
print(row) | Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call. | Below is the the instruction that describes the task:
### Input:
Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call.
### Response:
def print_output(self, per_identity_data: 'RDD') -> None:
"""
Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call.
"""
if not self._window_bts:
data = per_identity_data.flatMap(
lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()])
else:
# Convert to a DataFrame first so that the data can be saved as a CSV
data = per_identity_data.map(
lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder))
for row in data.collect():
print(row) |
def _type_string(label, case=None):
"""Shortcut for string like fields"""
return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), "" | Shortcut for string like fields | Below is the the instruction that describes the task:
### Input:
Shortcut for string like fields
### Response:
def _type_string(label, case=None):
"""Shortcut for string like fields"""
return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), "" |
def serialize(self, data=None):
"""
Transforms the object into an acceptable format for transmission.
@throws ValueError
To indicate this serializer does not support the encoding of the
specified object.
"""
if data is not None and self.response is not None:
# Set the content type.
self.response['Content-Type'] = self.media_types[0]
# Write the encoded and prepared data to the response.
self.response.write(data)
# Return the serialized data.
# This has normally been transformed by a base class.
return data | Transforms the object into an acceptable format for transmission.
@throws ValueError
To indicate this serializer does not support the encoding of the
specified object. | Below is the the instruction that describes the task:
### Input:
Transforms the object into an acceptable format for transmission.
@throws ValueError
To indicate this serializer does not support the encoding of the
specified object.
### Response:
def serialize(self, data=None):
"""
Transforms the object into an acceptable format for transmission.
@throws ValueError
To indicate this serializer does not support the encoding of the
specified object.
"""
if data is not None and self.response is not None:
# Set the content type.
self.response['Content-Type'] = self.media_types[0]
# Write the encoded and prepared data to the response.
self.response.write(data)
# Return the serialized data.
# This has normally been transformed by a base class.
return data |
def is_vert_aligned(c):
"""Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) | Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
### Response:
def is_vert_aligned(c):
"""Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
def from_function(cls, function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
# If we are running a script or are in IPython, include the source
# code in the hash.
source = inspect.getsource(function)
if sys.version_info[0] >= 3:
source = source.encode()
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
# Source code may not be available:
# e.g. Cython or Python interpreter.
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) | Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function. | Below is the the instruction that describes the task:
### Input:
Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
### Response:
def from_function(cls, function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
# If we are running a script or are in IPython, include the source
# code in the hash.
source = inspect.getsource(function)
if sys.version_info[0] >= 3:
source = source.encode()
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
# Source code may not be available:
# e.g. Cython or Python interpreter.
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) |
def write_to_file(filename, content):
"""
Writes content to the given file. The file's directory will be created if needed.
:param filename: name of the output file, relative to the "destination folder" provided by the user
:param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each
line will be appended with a "\n". Lines containing None will be skipped.
"""
if not config["destdir"]:
print("{destdir} config variable not present. Did you forget to run init()?")
sys.exit(8)
abs_filename = os.path.abspath(config["destdir"] + "/" + filename)
abs_filepath = os.path.dirname(abs_filename)
if not os.path.exists(abs_filepath):
try:
os.makedirs(abs_filepath)
except OSError as e:
print("Cannot create directory " + abs_filepath)
print("Error %d: %s" % (e.errno, e.strerror))
sys.exit(6)
with codecs.open(abs_filename, "w", "utf-8") as out:
if isinstance(content, str): content = [content]
for line in content:
if line is not None:
out.write(line)
out.write("\n") | Writes content to the given file. The file's directory will be created if needed.
:param filename: name of the output file, relative to the "destination folder" provided by the user
:param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each
line will be appended with a "\n". Lines containing None will be skipped. | Below is the the instruction that describes the task:
### Input:
Writes content to the given file. The file's directory will be created if needed.
:param filename: name of the output file, relative to the "destination folder" provided by the user
:param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each
line will be appended with a "\n". Lines containing None will be skipped.
### Response:
def write_to_file(filename, content):
"""
Writes content to the given file. The file's directory will be created if needed.
:param filename: name of the output file, relative to the "destination folder" provided by the user
:param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each
line will be appended with a "\n". Lines containing None will be skipped.
"""
if not config["destdir"]:
print("{destdir} config variable not present. Did you forget to run init()?")
sys.exit(8)
abs_filename = os.path.abspath(config["destdir"] + "/" + filename)
abs_filepath = os.path.dirname(abs_filename)
if not os.path.exists(abs_filepath):
try:
os.makedirs(abs_filepath)
except OSError as e:
print("Cannot create directory " + abs_filepath)
print("Error %d: %s" % (e.errno, e.strerror))
sys.exit(6)
with codecs.open(abs_filename, "w", "utf-8") as out:
if isinstance(content, str): content = [content]
for line in content:
if line is not None:
out.write(line)
out.write("\n") |
def namer(cls, imageUrl, pageUrl):
"""Use strip index number for image name."""
index = int(compile(r'id=(\d+)').search(pageUrl).group(1))
ext = imageUrl.rsplit('.', 1)[1]
return "SnowFlakes-%d.%s" % (index, ext) | Use strip index number for image name. | Below is the the instruction that describes the task:
### Input:
Use strip index number for image name.
### Response:
def namer(cls, imageUrl, pageUrl):
"""Use strip index number for image name."""
index = int(compile(r'id=(\d+)').search(pageUrl).group(1))
ext = imageUrl.rsplit('.', 1)[1]
return "SnowFlakes-%d.%s" % (index, ext) |
def visit(self, event):
"""
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
"""
to_visit = False
if event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time():
to_visit = True
else:
for ve in self.visit_events:
if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut:
to_visit = True
if to_visit:
self.visit_events.append(event)
min_time = self.get_min_visit_time()
# remove any visits that are 'too old'
self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time]
return to_visit | Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False | Below is the the instruction that describes the task:
### Input:
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
### Response:
def visit(self, event):
"""
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
"""
to_visit = False
if event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time():
to_visit = True
else:
for ve in self.visit_events:
if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut:
to_visit = True
if to_visit:
self.visit_events.append(event)
min_time = self.get_min_visit_time()
# remove any visits that are 'too old'
self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time]
return to_visit |
def autocommit(data_access):
"""Make statements autocommit.
:param data_access: a DataAccess instance
"""
if not data_access.autocommit:
data_access.commit()
old_autocommit = data_access.autocommit
data_access.autocommit = True
try:
yield data_access
finally:
data_access.autocommit = old_autocommit | Make statements autocommit.
:param data_access: a DataAccess instance | Below is the the instruction that describes the task:
### Input:
Make statements autocommit.
:param data_access: a DataAccess instance
### Response:
def autocommit(data_access):
"""Make statements autocommit.
:param data_access: a DataAccess instance
"""
if not data_access.autocommit:
data_access.commit()
old_autocommit = data_access.autocommit
data_access.autocommit = True
try:
yield data_access
finally:
data_access.autocommit = old_autocommit |
def _node_le(self, node_self, node_other):
'''_node_le
Low-level api: Return True if all descendants of one node exist in the
other node. Otherwise False. This is a recursive method.
Parameters
----------
node_self : `Element`
A node to be compared.
node_other : `Element`
Another node to be compared.
Returns
-------
bool
True if all descendants of node_self exist in node_other, otherwise
False.
'''
for x in ['tag', 'text', 'tail']:
if node_self.__getattribute__(x) != node_other.__getattribute__(x):
return False
for a in node_self.attrib:
if a not in node_other.attrib or \
node_self.attrib[a] != node_other.attrib[a]:
return False
for child in node_self.getchildren():
peers = self._get_peers(child, node_other)
if len(peers) < 1:
return False
elif len(peers) > 1:
raise ConfigError('not unique peer of node {}' \
.format(self.device.get_xpath(child)))
else:
schma_node = self.device.get_schema_node(child)
if schma_node.get('ordered-by') == 'user' and \
schma_node.get('type') == 'leaf-list' or \
schma_node.get('ordered-by') == 'user' and \
schma_node.get('type') == 'list':
elder_siblings = list(child.itersiblings(tag=child.tag,
preceding=True))
if elder_siblings:
immediate_elder_sibling = elder_siblings[0]
peers_of_immediate_elder_sibling = \
self._get_peers(immediate_elder_sibling,
node_other)
if len(peers_of_immediate_elder_sibling) < 1:
return False
elif len(peers_of_immediate_elder_sibling) > 1:
p = self.device.get_xpath(immediate_elder_sibling)
raise ConfigError('not unique peer of node {}' \
.format(p))
elder_siblings_of_peer = \
list(peers[0].itersiblings(tag=child.tag,
preceding=True))
if peers_of_immediate_elder_sibling[0] not in \
elder_siblings_of_peer:
return False
if not self._node_le(child, peers[0]):
return False
return True | _node_le
Low-level api: Return True if all descendants of one node exist in the
other node. Otherwise False. This is a recursive method.
Parameters
----------
node_self : `Element`
A node to be compared.
node_other : `Element`
Another node to be compared.
Returns
-------
bool
True if all descendants of node_self exist in node_other, otherwise
False. | Below is the the instruction that describes the task:
### Input:
_node_le
Low-level api: Return True if all descendants of one node exist in the
other node. Otherwise False. This is a recursive method.
Parameters
----------
node_self : `Element`
A node to be compared.
node_other : `Element`
Another node to be compared.
Returns
-------
bool
True if all descendants of node_self exist in node_other, otherwise
False.
### Response:
def _node_le(self, node_self, node_other):
'''_node_le
Low-level api: Return True if all descendants of one node exist in the
other node. Otherwise False. This is a recursive method.
Parameters
----------
node_self : `Element`
A node to be compared.
node_other : `Element`
Another node to be compared.
Returns
-------
bool
True if all descendants of node_self exist in node_other, otherwise
False.
'''
for x in ['tag', 'text', 'tail']:
if node_self.__getattribute__(x) != node_other.__getattribute__(x):
return False
for a in node_self.attrib:
if a not in node_other.attrib or \
node_self.attrib[a] != node_other.attrib[a]:
return False
for child in node_self.getchildren():
peers = self._get_peers(child, node_other)
if len(peers) < 1:
return False
elif len(peers) > 1:
raise ConfigError('not unique peer of node {}' \
.format(self.device.get_xpath(child)))
else:
schma_node = self.device.get_schema_node(child)
if schma_node.get('ordered-by') == 'user' and \
schma_node.get('type') == 'leaf-list' or \
schma_node.get('ordered-by') == 'user' and \
schma_node.get('type') == 'list':
elder_siblings = list(child.itersiblings(tag=child.tag,
preceding=True))
if elder_siblings:
immediate_elder_sibling = elder_siblings[0]
peers_of_immediate_elder_sibling = \
self._get_peers(immediate_elder_sibling,
node_other)
if len(peers_of_immediate_elder_sibling) < 1:
return False
elif len(peers_of_immediate_elder_sibling) > 1:
p = self.device.get_xpath(immediate_elder_sibling)
raise ConfigError('not unique peer of node {}' \
.format(p))
elder_siblings_of_peer = \
list(peers[0].itersiblings(tag=child.tag,
preceding=True))
if peers_of_immediate_elder_sibling[0] not in \
elder_siblings_of_peer:
return False
if not self._node_le(child, peers[0]):
return False
return True |
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Adds a new load balancer service."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
if len(ip_record) > 0:
ip_address_id = ip_record['id']
mgr.add_service(loadbal_id,
group_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
env.fout('Load balancer service is being added!') | Adds a new load balancer service. | Below is the the instruction that describes the task:
### Input:
Adds a new load balancer service.
### Response:
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Adds a new load balancer service."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
if len(ip_record) > 0:
ip_address_id = ip_record['id']
mgr.add_service(loadbal_id,
group_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
env.fout('Load balancer service is being added!') |
def update_checkplotdict_nbrlcs(
checkplotdict,
timecol, magcol, errcol,
lcformat='hat-sql',
lcformatdir=None,
verbose=True,
):
'''For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return checkplotdict
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return checkplotdict
if not ('neighbors' in checkplotdict and
checkplotdict['neighbors'] and
len(checkplotdict['neighbors']) > 0):
LOGERROR('no neighbors for %s, not updating...' %
(checkplotdict['objectid']))
return checkplotdict
# get our object's magkeys to compare to the neighbor
objmagkeys = {}
# handle diff generations of checkplots
if 'available_bands' in checkplotdict['objectinfo']:
mclist = checkplotdict['objectinfo']['available_bands']
else:
mclist = ('bmag','vmag','rmag','imag','jmag','hmag','kmag',
'sdssu','sdssg','sdssr','sdssi','sdssz')
for mc in mclist:
if (mc in checkplotdict['objectinfo'] and
checkplotdict['objectinfo'][mc] is not None and
np.isfinite(checkplotdict['objectinfo'][mc])):
objmagkeys[mc] = checkplotdict['objectinfo'][mc]
# if there are actually neighbors, go through them in order
for nbr in checkplotdict['neighbors']:
objectid, lcfpath = (nbr['objectid'],
nbr['lcfpath'])
# get the light curve
if not os.path.exists(lcfpath):
LOGERROR('objectid: %s, neighbor: %s, '
'lightcurve: %s not found, skipping...' %
(checkplotdict['objectid'], objectid, lcfpath))
continue
lcdict = readerfunc(lcfpath)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# 0. get this neighbor's magcols and get the magdiff and colordiff
# between it and the object
nbrmagkeys = {}
for mc in objmagkeys:
if (('objectinfo' in lcdict) and
(isinstance(lcdict['objectinfo'], dict)) and
(mc in lcdict['objectinfo']) and
(lcdict['objectinfo'][mc] is not None) and
(np.isfinite(lcdict['objectinfo'][mc]))):
nbrmagkeys[mc] = lcdict['objectinfo'][mc]
# now calculate the magdiffs
magdiffs = {}
for omc in objmagkeys:
if omc in nbrmagkeys:
magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc]
# calculate colors and colordiffs
colordiffs = {}
# generate the list of colors to get
# NOTE: here, we don't really bother with new/old gen checkplots
# maybe change this later to handle arbitrary colors
for ctrio in (['bmag','vmag','bvcolor'],
['vmag','kmag','vkcolor'],
['jmag','kmag','jkcolor'],
['sdssi','jmag','ijcolor'],
['sdssg','kmag','gkcolor'],
['sdssg','sdssr','grcolor']):
m1, m2, color = ctrio
if (m1 in objmagkeys and
m2 in objmagkeys and
m1 in nbrmagkeys and
m2 in nbrmagkeys):
objcolor = objmagkeys[m1] - objmagkeys[m2]
nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2]
colordiffs[color] = objcolor - nbrcolor
# finally, add all the color and magdiff info to the nbr dict
nbr.update({'magdiffs':magdiffs,
'colordiffs':colordiffs})
#
# process magcols
#
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
try:
# get the times, mags, and errs
# dereference the columns and get them from the lcdict
if '.' in timecol:
timecolget = timecol.split('.')
else:
timecolget = [timecol]
times = _dict_get(lcdict, timecolget)
if '.' in magcol:
magcolget = magcol.split('.')
else:
magcolget = [magcol]
mags = _dict_get(lcdict, magcolget)
if '.' in errcol:
errcolget = errcol.split('.')
else:
errcolget = [errcol]
errs = _dict_get(lcdict, errcolget)
except KeyError:
LOGERROR('LC for neighbor: %s (target object: %s) does not '
'have one or more of the required columns: %s, '
'skipping...' %
(objectid, checkplotdict['objectid'],
', '.join([timecol, magcol, errcol])))
continue
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=4.0)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
stimes, smags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, serrs
else:
xtimes, xmags, xerrs = stimes, smags, serrs
# check if this neighbor has enough finite points in its LC
# fail early if not enough light curve points
if ((xtimes is None) or (xmags is None) or (xerrs is None) or
(xtimes.size < 49) or (xmags.size < 49) or (xerrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make neighbor light curve plots "
"for target: %s, neighbor: %s, neighbor LC: %s" %
(checkplotdict['objectid'],
nbr['objectid'],
nbr['lcfpath']))
continue
#
# now we can start doing stuff if everything checks out
#
# make an unphased mag-series plot
nbrdict = _pkl_magseries_plot(xtimes,
xmags,
xerrs,
magsarefluxes=magsarefluxes)
# update the nbr
nbr.update(nbrdict)
# for each lspmethod in the checkplot, make a corresponding plot for
# this neighbor
# figure out the period finder methods present
if 'pfmethods' in checkplotdict:
pfmethods = checkplotdict['pfmethods']
else:
pfmethods = []
for cpkey in checkplotdict:
for pfkey in PFMETHODS:
if pfkey in cpkey:
pfmethods.append(pfkey)
for lspt in pfmethods:
# initialize this lspmethod entry
nbr[lspt] = {}
# we only care about the best period and its options
operiod, oepoch = (checkplotdict[lspt][0]['period'],
checkplotdict[lspt][0]['epoch'])
(ophasewrap, ophasesort, ophasebin,
ominbinelems, oplotxlim) = (
checkplotdict[lspt][0]['phasewrap'],
checkplotdict[lspt][0]['phasesort'],
checkplotdict[lspt][0]['phasebin'],
checkplotdict[lspt][0]['minbinelems'],
checkplotdict[lspt][0]['plotxlim'],
)
# make the phasedlc plot for this period
nbr = _pkl_phased_magseries_plot(
nbr,
lspt.split('-')[1], # this splits '<pfindex>-<pfmethod>'
0,
xtimes, xmags, xerrs,
operiod, oepoch,
phasewrap=ophasewrap,
phasesort=ophasesort,
phasebin=ophasebin,
minbinelems=ominbinelems,
plotxlim=oplotxlim,
magsarefluxes=magsarefluxes,
verbose=verbose,
override_pfmethod=lspt
)
# at this point, this neighbor's dict should be up to date with all
# info, magseries plot, and all phased LC plots
# return the updated checkplotdict
return checkplotdict | For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in. | Below is the the instruction that describes the task:
### Input:
For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
### Response:
def update_checkplotdict_nbrlcs(
checkplotdict,
timecol, magcol, errcol,
lcformat='hat-sql',
lcformatdir=None,
verbose=True,
):
'''For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return checkplotdict
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return checkplotdict
if not ('neighbors' in checkplotdict and
checkplotdict['neighbors'] and
len(checkplotdict['neighbors']) > 0):
LOGERROR('no neighbors for %s, not updating...' %
(checkplotdict['objectid']))
return checkplotdict
# get our object's magkeys to compare to the neighbor
objmagkeys = {}
# handle diff generations of checkplots
if 'available_bands' in checkplotdict['objectinfo']:
mclist = checkplotdict['objectinfo']['available_bands']
else:
mclist = ('bmag','vmag','rmag','imag','jmag','hmag','kmag',
'sdssu','sdssg','sdssr','sdssi','sdssz')
for mc in mclist:
if (mc in checkplotdict['objectinfo'] and
checkplotdict['objectinfo'][mc] is not None and
np.isfinite(checkplotdict['objectinfo'][mc])):
objmagkeys[mc] = checkplotdict['objectinfo'][mc]
# if there are actually neighbors, go through them in order
for nbr in checkplotdict['neighbors']:
objectid, lcfpath = (nbr['objectid'],
nbr['lcfpath'])
# get the light curve
if not os.path.exists(lcfpath):
LOGERROR('objectid: %s, neighbor: %s, '
'lightcurve: %s not found, skipping...' %
(checkplotdict['objectid'], objectid, lcfpath))
continue
lcdict = readerfunc(lcfpath)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# 0. get this neighbor's magcols and get the magdiff and colordiff
# between it and the object
nbrmagkeys = {}
for mc in objmagkeys:
if (('objectinfo' in lcdict) and
(isinstance(lcdict['objectinfo'], dict)) and
(mc in lcdict['objectinfo']) and
(lcdict['objectinfo'][mc] is not None) and
(np.isfinite(lcdict['objectinfo'][mc]))):
nbrmagkeys[mc] = lcdict['objectinfo'][mc]
# now calculate the magdiffs
magdiffs = {}
for omc in objmagkeys:
if omc in nbrmagkeys:
magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc]
# calculate colors and colordiffs
colordiffs = {}
# generate the list of colors to get
# NOTE: here, we don't really bother with new/old gen checkplots
# maybe change this later to handle arbitrary colors
for ctrio in (['bmag','vmag','bvcolor'],
['vmag','kmag','vkcolor'],
['jmag','kmag','jkcolor'],
['sdssi','jmag','ijcolor'],
['sdssg','kmag','gkcolor'],
['sdssg','sdssr','grcolor']):
m1, m2, color = ctrio
if (m1 in objmagkeys and
m2 in objmagkeys and
m1 in nbrmagkeys and
m2 in nbrmagkeys):
objcolor = objmagkeys[m1] - objmagkeys[m2]
nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2]
colordiffs[color] = objcolor - nbrcolor
# finally, add all the color and magdiff info to the nbr dict
nbr.update({'magdiffs':magdiffs,
'colordiffs':colordiffs})
#
# process magcols
#
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
try:
# get the times, mags, and errs
# dereference the columns and get them from the lcdict
if '.' in timecol:
timecolget = timecol.split('.')
else:
timecolget = [timecol]
times = _dict_get(lcdict, timecolget)
if '.' in magcol:
magcolget = magcol.split('.')
else:
magcolget = [magcol]
mags = _dict_get(lcdict, magcolget)
if '.' in errcol:
errcolget = errcol.split('.')
else:
errcolget = [errcol]
errs = _dict_get(lcdict, errcolget)
except KeyError:
LOGERROR('LC for neighbor: %s (target object: %s) does not '
'have one or more of the required columns: %s, '
'skipping...' %
(objectid, checkplotdict['objectid'],
', '.join([timecol, magcol, errcol])))
continue
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=4.0)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
stimes, smags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, serrs
else:
xtimes, xmags, xerrs = stimes, smags, serrs
# check if this neighbor has enough finite points in its LC
# fail early if not enough light curve points
if ((xtimes is None) or (xmags is None) or (xerrs is None) or
(xtimes.size < 49) or (xmags.size < 49) or (xerrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make neighbor light curve plots "
"for target: %s, neighbor: %s, neighbor LC: %s" %
(checkplotdict['objectid'],
nbr['objectid'],
nbr['lcfpath']))
continue
#
# now we can start doing stuff if everything checks out
#
# make an unphased mag-series plot
nbrdict = _pkl_magseries_plot(xtimes,
xmags,
xerrs,
magsarefluxes=magsarefluxes)
# update the nbr
nbr.update(nbrdict)
# for each lspmethod in the checkplot, make a corresponding plot for
# this neighbor
# figure out the period finder methods present
if 'pfmethods' in checkplotdict:
pfmethods = checkplotdict['pfmethods']
else:
pfmethods = []
for cpkey in checkplotdict:
for pfkey in PFMETHODS:
if pfkey in cpkey:
pfmethods.append(pfkey)
for lspt in pfmethods:
# initialize this lspmethod entry
nbr[lspt] = {}
# we only care about the best period and its options
operiod, oepoch = (checkplotdict[lspt][0]['period'],
checkplotdict[lspt][0]['epoch'])
(ophasewrap, ophasesort, ophasebin,
ominbinelems, oplotxlim) = (
checkplotdict[lspt][0]['phasewrap'],
checkplotdict[lspt][0]['phasesort'],
checkplotdict[lspt][0]['phasebin'],
checkplotdict[lspt][0]['minbinelems'],
checkplotdict[lspt][0]['plotxlim'],
)
# make the phasedlc plot for this period
nbr = _pkl_phased_magseries_plot(
nbr,
lspt.split('-')[1], # this splits '<pfindex>-<pfmethod>'
0,
xtimes, xmags, xerrs,
operiod, oepoch,
phasewrap=ophasewrap,
phasesort=ophasesort,
phasebin=ophasebin,
minbinelems=ominbinelems,
plotxlim=oplotxlim,
magsarefluxes=magsarefluxes,
verbose=verbose,
override_pfmethod=lspt
)
# at this point, this neighbor's dict should be up to date with all
# info, magseries plot, and all phased LC plots
# return the updated checkplotdict
return checkplotdict |
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'):
""" Parse a field object from yaml into a sqlalchemy expression """
# An aggregation is a callable that takes a single field expression
# None will perform no aggregation
aggregation_lookup = {
'sum': func.sum,
'min': func.min,
'max': func.max,
'avg': func.avg,
'count': func.count,
'count_distinct': lambda fld: func.count(distinct(fld)),
'month': lambda fld: func.date_trunc('month', fld),
'week': lambda fld: func.date_trunc('week', fld),
'year': lambda fld: func.date_trunc('year', fld),
'quarter': lambda fld: func.date_trunc('quarter', fld),
'age': lambda fld: func.date_part('year', func.age(fld)),
None: lambda fld: fld,
}
# Ensure that the dictionary contains:
# {
# 'value': str,
# 'aggregation': str|None,
# 'condition': dict|None
# }
if isinstance(fld, basestring):
fld = {
'value': fld,
}
if not isinstance(fld, dict):
raise BadIngredient('fields must be a string or a dict')
if 'value' not in fld:
raise BadIngredient('fields must contain a value')
if not isinstance(fld['value'], basestring):
raise BadIngredient('field value must be a string')
# Ensure a condition
if 'condition' in fld:
if not isinstance(fld['condition'], dict) and \
not fld['condition'] is None:
raise BadIngredient('condition must be null or an object')
else:
fld['condition'] = None
# Ensure an aggregation
initial_aggregation = default_aggregation if aggregated else None
if 'aggregation' in fld:
if not isinstance(fld['aggregation'], basestring) and \
not fld['aggregation'] is None:
raise BadIngredient('aggregation must be null or an string')
if fld['aggregation'] is None:
fld['aggregation'] = initial_aggregation
else:
fld['aggregation'] = initial_aggregation
value = fld.get('value', None)
if value is None:
raise BadIngredient('field value is not defined')
field_parts = []
for word in tokenize(value):
if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'):
field_parts.append(word)
else:
field_parts.append(find_column(selectable, word))
if len(field_parts) is None:
raise BadIngredient('field is not defined.')
# Fields should have an odd number of parts
if len(field_parts) % 2 != 1:
raise BadIngredient('field does not have the right number of parts')
field = field_parts[0]
if len(field_parts) > 1:
# if we need to add and subtract from the field
# join the field parts into pairs, for instance if field parts is
# [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third]
# we will get two pairs here
# [('MINUS', MyTable.second), ('PLUS', MyTable.third)]
for operator, other_field in zip(field_parts[1::2], field_parts[2::2]):
if operator == 'PLUS':
field = field.__add__(other_field)
elif operator == 'MINUS':
field = field.__sub__(other_field)
elif operator == 'DIVIDE':
field = field.__div__(other_field)
elif operator == 'MULTIPLY':
field = field.__mul__(other_field)
else:
raise BadIngredient('Unknown operator {}'.format(operator))
# Handle the aggregator
aggr = fld.get('aggregation', 'sum')
if aggr is not None:
aggr = aggr.strip()
if aggr not in aggregation_lookup:
raise BadIngredient('unknown aggregation {}'.format(aggr))
aggregator = aggregation_lookup[aggr]
condition = parse_condition(
fld.get('condition', None),
selectable,
aggregated=False,
default_aggregation=default_aggregation
)
if condition is not None:
field = case([(condition, field)])
return aggregator(field) | Parse a field object from yaml into a sqlalchemy expression | Below is the the instruction that describes the task:
### Input:
Parse a field object from yaml into a sqlalchemy expression
### Response:
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'):
""" Parse a field object from yaml into a sqlalchemy expression """
# An aggregation is a callable that takes a single field expression
# None will perform no aggregation
aggregation_lookup = {
'sum': func.sum,
'min': func.min,
'max': func.max,
'avg': func.avg,
'count': func.count,
'count_distinct': lambda fld: func.count(distinct(fld)),
'month': lambda fld: func.date_trunc('month', fld),
'week': lambda fld: func.date_trunc('week', fld),
'year': lambda fld: func.date_trunc('year', fld),
'quarter': lambda fld: func.date_trunc('quarter', fld),
'age': lambda fld: func.date_part('year', func.age(fld)),
None: lambda fld: fld,
}
# Ensure that the dictionary contains:
# {
# 'value': str,
# 'aggregation': str|None,
# 'condition': dict|None
# }
if isinstance(fld, basestring):
fld = {
'value': fld,
}
if not isinstance(fld, dict):
raise BadIngredient('fields must be a string or a dict')
if 'value' not in fld:
raise BadIngredient('fields must contain a value')
if not isinstance(fld['value'], basestring):
raise BadIngredient('field value must be a string')
# Ensure a condition
if 'condition' in fld:
if not isinstance(fld['condition'], dict) and \
not fld['condition'] is None:
raise BadIngredient('condition must be null or an object')
else:
fld['condition'] = None
# Ensure an aggregation
initial_aggregation = default_aggregation if aggregated else None
if 'aggregation' in fld:
if not isinstance(fld['aggregation'], basestring) and \
not fld['aggregation'] is None:
raise BadIngredient('aggregation must be null or an string')
if fld['aggregation'] is None:
fld['aggregation'] = initial_aggregation
else:
fld['aggregation'] = initial_aggregation
value = fld.get('value', None)
if value is None:
raise BadIngredient('field value is not defined')
field_parts = []
for word in tokenize(value):
if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'):
field_parts.append(word)
else:
field_parts.append(find_column(selectable, word))
if len(field_parts) is None:
raise BadIngredient('field is not defined.')
# Fields should have an odd number of parts
if len(field_parts) % 2 != 1:
raise BadIngredient('field does not have the right number of parts')
field = field_parts[0]
if len(field_parts) > 1:
# if we need to add and subtract from the field
# join the field parts into pairs, for instance if field parts is
# [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third]
# we will get two pairs here
# [('MINUS', MyTable.second), ('PLUS', MyTable.third)]
for operator, other_field in zip(field_parts[1::2], field_parts[2::2]):
if operator == 'PLUS':
field = field.__add__(other_field)
elif operator == 'MINUS':
field = field.__sub__(other_field)
elif operator == 'DIVIDE':
field = field.__div__(other_field)
elif operator == 'MULTIPLY':
field = field.__mul__(other_field)
else:
raise BadIngredient('Unknown operator {}'.format(operator))
# Handle the aggregator
aggr = fld.get('aggregation', 'sum')
if aggr is not None:
aggr = aggr.strip()
if aggr not in aggregation_lookup:
raise BadIngredient('unknown aggregation {}'.format(aggr))
aggregator = aggregation_lookup[aggr]
condition = parse_condition(
fld.get('condition', None),
selectable,
aggregated=False,
default_aggregation=default_aggregation
)
if condition is not None:
field = case([(condition, field)])
return aggregator(field) |
def stop(opts, bot, event):
"""Usage: stop [--name=<name>] [--notify=<slack_username>]
Stop a timer.
_name_ works the same as for `start`.
If given _slack_username_, reply with an at-mention to the given user.
"""
name = opts['--name']
slack_username = opts['--notify']
now = datetime.datetime.now()
delta = now - bot.timers.pop(name)
response = bot.stop_fmt.format(delta)
if slack_username:
mention = ''
# The slack api (provided by https://github.com/os/slacker) is available on all bots.
users = bot.slack.users.list().body['members']
for user in users:
if user['name'] == slack_username:
mention = "<@%s>" % user['id']
break
response = "%s: %s" % (mention, response)
return response | Usage: stop [--name=<name>] [--notify=<slack_username>]
Stop a timer.
_name_ works the same as for `start`.
If given _slack_username_, reply with an at-mention to the given user. | Below is the the instruction that describes the task:
### Input:
Usage: stop [--name=<name>] [--notify=<slack_username>]
Stop a timer.
_name_ works the same as for `start`.
If given _slack_username_, reply with an at-mention to the given user.
### Response:
def stop(opts, bot, event):
"""Usage: stop [--name=<name>] [--notify=<slack_username>]
Stop a timer.
_name_ works the same as for `start`.
If given _slack_username_, reply with an at-mention to the given user.
"""
name = opts['--name']
slack_username = opts['--notify']
now = datetime.datetime.now()
delta = now - bot.timers.pop(name)
response = bot.stop_fmt.format(delta)
if slack_username:
mention = ''
# The slack api (provided by https://github.com/os/slacker) is available on all bots.
users = bot.slack.users.list().body['members']
for user in users:
if user['name'] == slack_username:
mention = "<@%s>" % user['id']
break
response = "%s: %s" % (mention, response)
return response |
def _gpdfit(x):
"""Estimate the parameters for the Generalized Pareto Distribution (GPD).
Empirical Bayes estimate for the parameters of the generalized Pareto
distribution given the data.
Parameters
----------
x : array
sorted 1D data array
Returns
-------
k : float
estimated shape parameter
sigma : float
estimated scale parameter
"""
prior_bs = 3
prior_k = 10
len_x = len(x)
m_est = 30 + int(len_x ** 0.5)
b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5))
b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1]
b_ary += 1 / x[-1]
k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member
len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1)
weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1)
# remove negligible weights
real_idxs = weights >= 10 * np.finfo(float).eps
if not np.all(real_idxs):
weights = weights[real_idxs]
b_ary = b_ary[real_idxs]
# normalise weights
weights /= weights.sum()
# posterior mean for b
b_post = np.sum(b_ary * weights)
# estimate for k
k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member
# add prior for k_post
k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k)
sigma = -k_post / b_post
return k_post, sigma | Estimate the parameters for the Generalized Pareto Distribution (GPD).
Empirical Bayes estimate for the parameters of the generalized Pareto
distribution given the data.
Parameters
----------
x : array
sorted 1D data array
Returns
-------
k : float
estimated shape parameter
sigma : float
estimated scale parameter | Below is the the instruction that describes the task:
### Input:
Estimate the parameters for the Generalized Pareto Distribution (GPD).
Empirical Bayes estimate for the parameters of the generalized Pareto
distribution given the data.
Parameters
----------
x : array
sorted 1D data array
Returns
-------
k : float
estimated shape parameter
sigma : float
estimated scale parameter
### Response:
def _gpdfit(x):
"""Estimate the parameters for the Generalized Pareto Distribution (GPD).
Empirical Bayes estimate for the parameters of the generalized Pareto
distribution given the data.
Parameters
----------
x : array
sorted 1D data array
Returns
-------
k : float
estimated shape parameter
sigma : float
estimated scale parameter
"""
prior_bs = 3
prior_k = 10
len_x = len(x)
m_est = 30 + int(len_x ** 0.5)
b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5))
b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1]
b_ary += 1 / x[-1]
k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member
len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1)
weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1)
# remove negligible weights
real_idxs = weights >= 10 * np.finfo(float).eps
if not np.all(real_idxs):
weights = weights[real_idxs]
b_ary = b_ary[real_idxs]
# normalise weights
weights /= weights.sum()
# posterior mean for b
b_post = np.sum(b_ary * weights)
# estimate for k
k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member
# add prior for k_post
k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k)
sigma = -k_post / b_post
return k_post, sigma |
def l2traceroute_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def l2traceroute_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute = ET.Element("l2traceroute")
config = l2traceroute
input = ET.SubElement(l2traceroute, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def to_grpc_address(target: str) -> str:
"""Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address.
"""
u = urlparse(target)
if u.scheme == "dns":
raise ValueError("dns:// not supported")
if u.scheme == "unix":
return "unix:"+u.path
return u.netloc | Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address. | Below is the the instruction that describes the task:
### Input:
Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address.
### Response:
def to_grpc_address(target: str) -> str:
"""Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address.
"""
u = urlparse(target)
if u.scheme == "dns":
raise ValueError("dns:// not supported")
if u.scheme == "unix":
return "unix:"+u.path
return u.netloc |
def download(ctx):
"""Download code of the current project."""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
PolyaxonClient().project.download_repo(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download code for project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.') | Download code of the current project. | Below is the the instruction that describes the task:
### Input:
Download code of the current project.
### Response:
def download(ctx):
"""Download code of the current project."""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
PolyaxonClient().project.download_repo(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download code for project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.') |
def passed(self):
"""
Return all the passing testcases
:return:
"""
return [test for test in self.all() if not test.failed() and not test.skipped()] | Return all the passing testcases
:return: | Below is the the instruction that describes the task:
### Input:
Return all the passing testcases
:return:
### Response:
def passed(self):
"""
Return all the passing testcases
:return:
"""
return [test for test in self.all() if not test.failed() and not test.skipped()] |
def user_identity_show(self, user_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity"
api_path = "/api/v2/users/{user_id}/identities/{id}.json"
api_path = api_path.format(user_id=user_id, id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity
### Response:
def user_identity_show(self, user_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity"
api_path = "/api/v2/users/{user_id}/identities/{id}.json"
api_path = api_path.format(user_id=user_id, id=id)
return self.call(api_path, **kwargs) |
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent) | Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11). | Below is the the instruction that describes the task:
### Input:
Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11).
### Response:
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent) |
def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs | Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised. | Below is the the instruction that describes the task:
### Input:
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
### Response:
def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs |
def rgb_to_websafe(r, g=None, b=None, alt=False):
"""Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0)
'(1, 0.6, 0)'
"""
if type(r) in [list,tuple]:
r, g, b = r
websafeComponent = _websafe_component
return tuple((websafeComponent(v, alt) for v in (r, g, b))) | Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0)
'(1, 0.6, 0)' | Below is the the instruction that describes the task:
### Input:
Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0)
'(1, 0.6, 0)'
### Response:
def rgb_to_websafe(r, g=None, b=None, alt=False):
"""Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0)
'(1, 0.6, 0)'
"""
if type(r) in [list,tuple]:
r, g, b = r
websafeComponent = _websafe_component
return tuple((websafeComponent(v, alt) for v in (r, g, b))) |
def add_device_callback(self, devices, callback):
"""Register a device callback."""
if not devices:
return False
if not isinstance(devices, (tuple, list)):
devices = [devices]
for device in devices:
# Device may be a device_id
device_id = device
# If they gave us an actual device, get that devices ID
if isinstance(device, AbodeDevice):
device_id = device.device_id
# Validate the device is valid
if not self._abode.get_device(device_id):
raise AbodeException((ERROR.EVENT_DEVICE_INVALID))
_LOGGER.debug(
"Subscribing to updated for device_id: %s", device_id)
self._device_callbacks[device_id].append((callback))
return True | Register a device callback. | Below is the the instruction that describes the task:
### Input:
Register a device callback.
### Response:
def add_device_callback(self, devices, callback):
"""Register a device callback."""
if not devices:
return False
if not isinstance(devices, (tuple, list)):
devices = [devices]
for device in devices:
# Device may be a device_id
device_id = device
# If they gave us an actual device, get that devices ID
if isinstance(device, AbodeDevice):
device_id = device.device_id
# Validate the device is valid
if not self._abode.get_device(device_id):
raise AbodeException((ERROR.EVENT_DEVICE_INVALID))
_LOGGER.debug(
"Subscribing to updated for device_id: %s", device_id)
self._device_callbacks[device_id].append((callback))
return True |
def calf(self, spec):
"""
Typical safe usage is this, which sets everything that could be
problematic up.
Requires the filename which everything will be produced to.
"""
if not isinstance(spec, Spec):
raise TypeError('spec must be of type Spec')
if not spec.get(BUILD_DIR):
tempdir = realpath(mkdtemp())
spec.advise(CLEANUP, shutil.rmtree, tempdir)
build_dir = join(tempdir, 'build')
mkdir(build_dir)
spec[BUILD_DIR] = build_dir
else:
build_dir = self.realpath(spec, BUILD_DIR)
if not isdir(build_dir):
logger.error("build_dir '%s' is not a directory", build_dir)
raise_os_error(errno.ENOTDIR, build_dir)
self.realpath(spec, EXPORT_TARGET)
# Finally, handle setup which may set up the deferred advices,
# as all the toolchain (and its runtime and/or its parent
# runtime and related toolchains) spec advises should have been
# done.
spec.handle(SETUP)
try:
process = ('prepare', 'compile', 'assemble', 'link', 'finalize')
for p in process:
spec.handle('before_' + p)
getattr(self, p)(spec)
spec.handle('after_' + p)
spec.handle(SUCCESS)
except ToolchainCancel:
# quietly handle the issue and move on out of here.
pass
finally:
spec.handle(CLEANUP) | Typical safe usage is this, which sets everything that could be
problematic up.
Requires the filename which everything will be produced to. | Below is the the instruction that describes the task:
### Input:
Typical safe usage is this, which sets everything that could be
problematic up.
Requires the filename which everything will be produced to.
### Response:
def calf(self, spec):
"""
Typical safe usage is this, which sets everything that could be
problematic up.
Requires the filename which everything will be produced to.
"""
if not isinstance(spec, Spec):
raise TypeError('spec must be of type Spec')
if not spec.get(BUILD_DIR):
tempdir = realpath(mkdtemp())
spec.advise(CLEANUP, shutil.rmtree, tempdir)
build_dir = join(tempdir, 'build')
mkdir(build_dir)
spec[BUILD_DIR] = build_dir
else:
build_dir = self.realpath(spec, BUILD_DIR)
if not isdir(build_dir):
logger.error("build_dir '%s' is not a directory", build_dir)
raise_os_error(errno.ENOTDIR, build_dir)
self.realpath(spec, EXPORT_TARGET)
# Finally, handle setup which may set up the deferred advices,
# as all the toolchain (and its runtime and/or its parent
# runtime and related toolchains) spec advises should have been
# done.
spec.handle(SETUP)
try:
process = ('prepare', 'compile', 'assemble', 'link', 'finalize')
for p in process:
spec.handle('before_' + p)
getattr(self, p)(spec)
spec.handle('after_' + p)
spec.handle(SUCCESS)
except ToolchainCancel:
# quietly handle the issue and move on out of here.
pass
finally:
spec.handle(CLEANUP) |
def hops(node1, node2):
"""returns # of hops it takes to get from node1 to node2, 1 means they're on the same link"""
if node1 == node2:
return 0
elif set(node1.interfaces) & set(node2.interfaces):
# they share a common interface
return 1
else:
# Not implemented yet, graphsearch to find min hops between two nodes
return 0 | returns # of hops it takes to get from node1 to node2, 1 means they're on the same link | Below is the the instruction that describes the task:
### Input:
returns # of hops it takes to get from node1 to node2, 1 means they're on the same link
### Response:
def hops(node1, node2):
"""returns # of hops it takes to get from node1 to node2, 1 means they're on the same link"""
if node1 == node2:
return 0
elif set(node1.interfaces) & set(node2.interfaces):
# they share a common interface
return 1
else:
# Not implemented yet, graphsearch to find min hops between two nodes
return 0 |
def qnh_estimate(self):
'''estimate QNH pressure from GPS altitude and scaled pressure'''
alt_gps = self.master.field('GPS_RAW_INT', 'alt', 0) * 0.001
pressure2 = self.master.field('SCALED_PRESSURE', 'press_abs', 0)
ground_temp = self.get_mav_param('GND_TEMP', 21)
temp = ground_temp + 273.15
pressure1 = pressure2 / math.exp(math.log(1.0 - (alt_gps / (153.8462 * temp))) / 0.190259)
return pressure1 | estimate QNH pressure from GPS altitude and scaled pressure | Below is the the instruction that describes the task:
### Input:
estimate QNH pressure from GPS altitude and scaled pressure
### Response:
def qnh_estimate(self):
'''estimate QNH pressure from GPS altitude and scaled pressure'''
alt_gps = self.master.field('GPS_RAW_INT', 'alt', 0) * 0.001
pressure2 = self.master.field('SCALED_PRESSURE', 'press_abs', 0)
ground_temp = self.get_mav_param('GND_TEMP', 21)
temp = ground_temp + 273.15
pressure1 = pressure2 / math.exp(math.log(1.0 - (alt_gps / (153.8462 * temp))) / 0.190259)
return pressure1 |
def device_statistics(fritz, args):
"""Command that prints the device statistics."""
stats = fritz.get_device_statistics(args.ain)
print(stats) | Command that prints the device statistics. | Below is the the instruction that describes the task:
### Input:
Command that prints the device statistics.
### Response:
def device_statistics(fritz, args):
"""Command that prints the device statistics."""
stats = fritz.get_device_statistics(args.ain)
print(stats) |
def get_role_by_code(role_code,**kwargs):
"""
Get a role by its code
"""
try:
role = db.DBSession.query(Role).filter(Role.code==role_code).one()
return role
except NoResultFound:
raise ResourceNotFoundError("Role not found (role_code={})".format(role_code)) | Get a role by its code | Below is the the instruction that describes the task:
### Input:
Get a role by its code
### Response:
def get_role_by_code(role_code,**kwargs):
"""
Get a role by its code
"""
try:
role = db.DBSession.query(Role).filter(Role.code==role_code).one()
return role
except NoResultFound:
raise ResourceNotFoundError("Role not found (role_code={})".format(role_code)) |
def is_text_visible(driver, text, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified text is visible in the specified selector.
@Params
driver - the webdriver object (required)
text - the text string to search for
selector - the locator that is used (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
@Returns
Boolean (is text visible)
"""
try:
element = driver.find_element(by=by, value=selector)
return element.is_displayed() and text in element.text
except Exception:
return False | Returns whether the specified text is visible in the specified selector.
@Params
driver - the webdriver object (required)
text - the text string to search for
selector - the locator that is used (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
@Returns
Boolean (is text visible) | Below is the the instruction that describes the task:
### Input:
Returns whether the specified text is visible in the specified selector.
@Params
driver - the webdriver object (required)
text - the text string to search for
selector - the locator that is used (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
@Returns
Boolean (is text visible)
### Response:
def is_text_visible(driver, text, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified text is visible in the specified selector.
@Params
driver - the webdriver object (required)
text - the text string to search for
selector - the locator that is used (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
@Returns
Boolean (is text visible)
"""
try:
element = driver.find_element(by=by, value=selector)
return element.is_displayed() and text in element.text
except Exception:
return False |
def get_active_for(self, user, user_agent=_MARK, ip_address=_MARK):
"""Return last known session for given user.
:param user: user session
:type user: `abilian.core.models.subjects.User`
:param user_agent: *exact* user agent string to lookup, or `None` to have
user_agent extracted from request object. If not provided at all, no
filtering on user_agent.
:type user_agent: string or None, or absent
:param ip_address: client IP, or `None` to have ip_address extracted from
request object (requires header 'X-Forwarded-For'). If not provided at
all, no filtering on ip_address.
:type ip_address: string or None, or absent
:rtype: `LoginSession` or `None` if no session is found.
"""
conditions = [LoginSession.user == user]
if user_agent is not _MARK:
if user_agent is None:
user_agent = request.environ.get("HTTP_USER_AGENT", "")
conditions.append(LoginSession.user_agent == user_agent)
if ip_address is not _MARK:
if ip_address is None:
ip_addresses = request.headers.getlist("X-Forwarded-For")
ip_address = ip_addresses[0] if ip_addresses else request.remote_addr
conditions.append(LoginSession.ip_address == ip_address)
session = (
LoginSession.query.filter(*conditions)
.order_by(LoginSession.id.desc())
.first()
)
return session | Return last known session for given user.
:param user: user session
:type user: `abilian.core.models.subjects.User`
:param user_agent: *exact* user agent string to lookup, or `None` to have
user_agent extracted from request object. If not provided at all, no
filtering on user_agent.
:type user_agent: string or None, or absent
:param ip_address: client IP, or `None` to have ip_address extracted from
request object (requires header 'X-Forwarded-For'). If not provided at
all, no filtering on ip_address.
:type ip_address: string or None, or absent
:rtype: `LoginSession` or `None` if no session is found. | Below is the the instruction that describes the task:
### Input:
Return last known session for given user.
:param user: user session
:type user: `abilian.core.models.subjects.User`
:param user_agent: *exact* user agent string to lookup, or `None` to have
user_agent extracted from request object. If not provided at all, no
filtering on user_agent.
:type user_agent: string or None, or absent
:param ip_address: client IP, or `None` to have ip_address extracted from
request object (requires header 'X-Forwarded-For'). If not provided at
all, no filtering on ip_address.
:type ip_address: string or None, or absent
:rtype: `LoginSession` or `None` if no session is found.
### Response:
def get_active_for(self, user, user_agent=_MARK, ip_address=_MARK):
"""Return last known session for given user.
:param user: user session
:type user: `abilian.core.models.subjects.User`
:param user_agent: *exact* user agent string to lookup, or `None` to have
user_agent extracted from request object. If not provided at all, no
filtering on user_agent.
:type user_agent: string or None, or absent
:param ip_address: client IP, or `None` to have ip_address extracted from
request object (requires header 'X-Forwarded-For'). If not provided at
all, no filtering on ip_address.
:type ip_address: string or None, or absent
:rtype: `LoginSession` or `None` if no session is found.
"""
conditions = [LoginSession.user == user]
if user_agent is not _MARK:
if user_agent is None:
user_agent = request.environ.get("HTTP_USER_AGENT", "")
conditions.append(LoginSession.user_agent == user_agent)
if ip_address is not _MARK:
if ip_address is None:
ip_addresses = request.headers.getlist("X-Forwarded-For")
ip_address = ip_addresses[0] if ip_addresses else request.remote_addr
conditions.append(LoginSession.ip_address == ip_address)
session = (
LoginSession.query.filter(*conditions)
.order_by(LoginSession.id.desc())
.first()
)
return session |
def clean_headers(headers):
"""Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that
may make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings.
"""
clean = {}
try:
for k, v in six.iteritems(headers):
if not isinstance(k, six.binary_type):
k = str(k)
if not isinstance(v, six.binary_type):
v = str(v)
clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)
except UnicodeEncodeError:
from oauth2client.client import NonAsciiHeaderError
raise NonAsciiHeaderError(k, ': ', v)
return clean | Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that
may make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings. | Below is the the instruction that describes the task:
### Input:
Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that
may make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings.
### Response:
def clean_headers(headers):
"""Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that
may make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings.
"""
clean = {}
try:
for k, v in six.iteritems(headers):
if not isinstance(k, six.binary_type):
k = str(k)
if not isinstance(v, six.binary_type):
v = str(v)
clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)
except UnicodeEncodeError:
from oauth2client.client import NonAsciiHeaderError
raise NonAsciiHeaderError(k, ': ', v)
return clean |
def get_step(self, grad):
"""Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`.
"""
if self._momentum is None:
self._momentum = self.initial_accumulator_value * np.ones_like(grad)
self._momentum += grad ** 2
return self.learning_rate * grad / np.sqrt(self._momentum) | Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`. | Below is the the instruction that describes the task:
### Input:
Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`.
### Response:
def get_step(self, grad):
"""Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`.
"""
if self._momentum is None:
self._momentum = self.initial_accumulator_value * np.ones_like(grad)
self._momentum += grad ** 2
return self.learning_rate * grad / np.sqrt(self._momentum) |
def parse_args(self, args, scope):
"""Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
"""
arguments = list(zip(args,
[' '] * len(args))) if args and args[0] else None
zl = itertools.zip_longest if sys.version_info[
0] == 3 else itertools.izip_longest
if self.args:
parsed = [
v if hasattr(v, 'parse') else v for v in copy.copy(self.args)
]
args = args if isinstance(args, list) else [args]
vars = [
self._parse_arg(var, arg, scope)
for arg, var in zl([a for a in args], parsed)
]
for var in vars:
if var:
var.parse(scope)
if not arguments:
arguments = [v.value for v in vars if v]
if not arguments:
arguments = ''
Variable(['@arguments', None, arguments]).parse(scope) | Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError | Below is the the instruction that describes the task:
### Input:
Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
### Response:
def parse_args(self, args, scope):
"""Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
"""
arguments = list(zip(args,
[' '] * len(args))) if args and args[0] else None
zl = itertools.zip_longest if sys.version_info[
0] == 3 else itertools.izip_longest
if self.args:
parsed = [
v if hasattr(v, 'parse') else v for v in copy.copy(self.args)
]
args = args if isinstance(args, list) else [args]
vars = [
self._parse_arg(var, arg, scope)
for arg, var in zl([a for a in args], parsed)
]
for var in vars:
if var:
var.parse(scope)
if not arguments:
arguments = [v.value for v in vars if v]
if not arguments:
arguments = ''
Variable(['@arguments', None, arguments]).parse(scope) |
def put_settings(self, body=None, params=None):
"""
Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request('PUT', '/_cluster/settings',
params=params, body=body) | Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout | Below is the the instruction that describes the task:
### Input:
Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
### Response:
def put_settings(self, body=None, params=None):
"""
Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request('PUT', '/_cluster/settings',
params=params, body=body) |
def get_distribute_verbatim_metadata(self):
"""Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_verbatim'])
metadata.update({'existing_boolean_values': self._my_map['distributeVerbatim']})
return Metadata(**metadata) | Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_distribute_verbatim_metadata(self):
"""Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_verbatim'])
metadata.update({'existing_boolean_values': self._my_map['distributeVerbatim']})
return Metadata(**metadata) |
def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet,
workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run move the given outlets to stream"""
fname = TauDEM.func_name('moveoutletstostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-src': streamRaster, '-o': outlet},
workingdir,
None,
{'-om': modifiedOutlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | Run move the given outlets to stream | Below is the the instruction that describes the task:
### Input:
Run move the given outlets to stream
### Response:
def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet,
workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run move the given outlets to stream"""
fname = TauDEM.func_name('moveoutletstostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-src': streamRaster, '-o': outlet},
workingdir,
None,
{'-om': modifiedOutlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) |
def find_primitive(cell, symprec=1e-5):
"""
A primitive cell is searched in the input cell. When a primitive
cell is found, an object of Atoms class of the primitive cell is
returned. When not, None is returned.
"""
lattice, positions, numbers = spg.find_primitive(cell.totuple(), symprec)
if lattice is None:
return None
else:
return Atoms(numbers=numbers,
scaled_positions=positions,
cell=lattice,
pbc=True) | A primitive cell is searched in the input cell. When a primitive
cell is found, an object of Atoms class of the primitive cell is
returned. When not, None is returned. | Below is the the instruction that describes the task:
### Input:
A primitive cell is searched in the input cell. When a primitive
cell is found, an object of Atoms class of the primitive cell is
returned. When not, None is returned.
### Response:
def find_primitive(cell, symprec=1e-5):
"""
A primitive cell is searched in the input cell. When a primitive
cell is found, an object of Atoms class of the primitive cell is
returned. When not, None is returned.
"""
lattice, positions, numbers = spg.find_primitive(cell.totuple(), symprec)
if lattice is None:
return None
else:
return Atoms(numbers=numbers,
scaled_positions=positions,
cell=lattice,
pbc=True) |
def output(self, value):
"""SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
return super(Map, self).output(self.stream, value) | SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator. | Below is the the instruction that describes the task:
### Input:
SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
### Response:
def output(self, value):
"""SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
"""
return super(Map, self).output(self.stream, value) |
def get(self, name, acc=None, default=None):
"""Return the named config for the given account.
If an account is given, first checks the account space for the name.
If no account given, or if the name not found in the account space,
look for the name in the global config space. If still not found,
return the default, if given, otherwise ``None``.
"""
if acc in self.data['accounts'] and name in self.data['accounts'][acc]:
return self.data['accounts'][acc][name]
if name in self.data:
return self.data[name]
return default | Return the named config for the given account.
If an account is given, first checks the account space for the name.
If no account given, or if the name not found in the account space,
look for the name in the global config space. If still not found,
return the default, if given, otherwise ``None``. | Below is the the instruction that describes the task:
### Input:
Return the named config for the given account.
If an account is given, first checks the account space for the name.
If no account given, or if the name not found in the account space,
look for the name in the global config space. If still not found,
return the default, if given, otherwise ``None``.
### Response:
def get(self, name, acc=None, default=None):
"""Return the named config for the given account.
If an account is given, first checks the account space for the name.
If no account given, or if the name not found in the account space,
look for the name in the global config space. If still not found,
return the default, if given, otherwise ``None``.
"""
if acc in self.data['accounts'] and name in self.data['accounts'][acc]:
return self.data['accounts'][acc][name]
if name in self.data:
return self.data[name]
return default |
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend):
"""
This command includes SQLAlchemy DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header) | This command includes SQLAlchemy DB Session | Below is the the instruction that describes the task:
### Input:
This command includes SQLAlchemy DB Session
### Response:
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend):
"""
This command includes SQLAlchemy DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header) |
def count_leases_by_owner(self, leases): # pylint: disable=no-self-use
"""
Returns a dictionary of leases by current owner.
"""
owners = [l.owner for l in leases]
return dict(Counter(owners)) | Returns a dictionary of leases by current owner. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of leases by current owner.
### Response:
def count_leases_by_owner(self, leases): # pylint: disable=no-self-use
"""
Returns a dictionary of leases by current owner.
"""
owners = [l.owner for l in leases]
return dict(Counter(owners)) |
def _get_network(project_id, network_name, service):
'''
Fetch network selfLink from network name.
'''
return service.networks().get(project=project_id,
network=network_name).execute() | Fetch network selfLink from network name. | Below is the the instruction that describes the task:
### Input:
Fetch network selfLink from network name.
### Response:
def _get_network(project_id, network_name, service):
'''
Fetch network selfLink from network name.
'''
return service.networks().get(project=project_id,
network=network_name).execute() |
def get_attrs(cls):
"""
Get all class attributes ordered by definition
"""
ignore = dir(type('dummy', (object,), {})) + ['__metaclass__']
attrs = [
item for item in inspect.getmembers(cls) if item[0] not in ignore
and not isinstance(
item[1], (
types.FunctionType,
types.MethodType,
classmethod,
staticmethod,
property))]
# sort by idx and use attribute name to break ties
attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0]))
return attrs | Get all class attributes ordered by definition | Below is the the instruction that describes the task:
### Input:
Get all class attributes ordered by definition
### Response:
def get_attrs(cls):
"""
Get all class attributes ordered by definition
"""
ignore = dir(type('dummy', (object,), {})) + ['__metaclass__']
attrs = [
item for item in inspect.getmembers(cls) if item[0] not in ignore
and not isinstance(
item[1], (
types.FunctionType,
types.MethodType,
classmethod,
staticmethod,
property))]
# sort by idx and use attribute name to break ties
attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0]))
return attrs |
def update_dataset_marker(self):
"""Update markers which are in the dataset. It always updates the list
of events. Depending on the settings, it might add the markers to
overview and traces.
"""
start_time = self.parent.overview.start_time
markers = []
if self.parent.info.markers is not None:
markers = self.parent.info.markers
self.idx_marker.clearContents()
self.idx_marker.setRowCount(len(markers))
for i, mrk in enumerate(markers):
abs_time = (start_time +
timedelta(seconds=mrk['start'])).strftime('%H:%M:%S')
dur = timedelta(seconds=mrk['end'] - mrk['start'])
duration = '{0:02d}.{1:03d}'.format(dur.seconds,
round(dur.microseconds / 1000))
item_time = QTableWidgetItem(abs_time)
item_duration = QTableWidgetItem(duration)
item_name = QTableWidgetItem(mrk['name'])
color = self.parent.value('marker_color')
item_time.setForeground(QColor(color))
item_duration.setForeground(QColor(color))
item_name.setForeground(QColor(color))
self.idx_marker.setItem(i, 0, item_time)
self.idx_marker.setItem(i, 1, item_duration)
self.idx_marker.setItem(i, 2, item_name)
# store information about the time as list (easy to access)
marker_start = [mrk['start'] for mrk in markers]
marker_end = [mrk['end'] for mrk in markers]
self.idx_marker.setProperty('start', marker_start)
self.idx_marker.setProperty('end', marker_end)
if self.parent.traces.data is not None:
self.parent.traces.display()
self.parent.overview.display_markers() | Update markers which are in the dataset. It always updates the list
of events. Depending on the settings, it might add the markers to
overview and traces. | Below is the the instruction that describes the task:
### Input:
Update markers which are in the dataset. It always updates the list
of events. Depending on the settings, it might add the markers to
overview and traces.
### Response:
def update_dataset_marker(self):
"""Update markers which are in the dataset. It always updates the list
of events. Depending on the settings, it might add the markers to
overview and traces.
"""
start_time = self.parent.overview.start_time
markers = []
if self.parent.info.markers is not None:
markers = self.parent.info.markers
self.idx_marker.clearContents()
self.idx_marker.setRowCount(len(markers))
for i, mrk in enumerate(markers):
abs_time = (start_time +
timedelta(seconds=mrk['start'])).strftime('%H:%M:%S')
dur = timedelta(seconds=mrk['end'] - mrk['start'])
duration = '{0:02d}.{1:03d}'.format(dur.seconds,
round(dur.microseconds / 1000))
item_time = QTableWidgetItem(abs_time)
item_duration = QTableWidgetItem(duration)
item_name = QTableWidgetItem(mrk['name'])
color = self.parent.value('marker_color')
item_time.setForeground(QColor(color))
item_duration.setForeground(QColor(color))
item_name.setForeground(QColor(color))
self.idx_marker.setItem(i, 0, item_time)
self.idx_marker.setItem(i, 1, item_duration)
self.idx_marker.setItem(i, 2, item_name)
# store information about the time as list (easy to access)
marker_start = [mrk['start'] for mrk in markers]
marker_end = [mrk['end'] for mrk in markers]
self.idx_marker.setProperty('start', marker_start)
self.idx_marker.setProperty('end', marker_end)
if self.parent.traces.data is not None:
self.parent.traces.display()
self.parent.overview.display_markers() |
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(toks[0])
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag) | Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object | Below is the the instruction that describes the task:
### Input:
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
### Response:
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(toks[0])
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag) |
def sequence(context, data):
"""Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler.
"""
number = data.get('number', context.params.get('start', 1))
stop = context.params.get('stop')
step = context.params.get('step', 1)
delay = context.params.get('delay')
prefix = context.params.get('tag')
while True:
tag = None if prefix is None else '%s:%s' % (prefix, number)
if tag is None or not context.check_tag(tag):
context.emit(data={'number': number})
if tag is not None:
context.set_tag(tag, True)
number = number + step
if step > 0 and number >= stop:
break
if step < 0 and number <= stop:
break
if delay is not None:
context.recurse(data={'number': number}, delay=delay)
break | Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler. | Below is the the instruction that describes the task:
### Input:
Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler.
### Response:
def sequence(context, data):
"""Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler.
"""
number = data.get('number', context.params.get('start', 1))
stop = context.params.get('stop')
step = context.params.get('step', 1)
delay = context.params.get('delay')
prefix = context.params.get('tag')
while True:
tag = None if prefix is None else '%s:%s' % (prefix, number)
if tag is None or not context.check_tag(tag):
context.emit(data={'number': number})
if tag is not None:
context.set_tag(tag, True)
number = number + step
if step > 0 and number >= stop:
break
if step < 0 and number <= stop:
break
if delay is not None:
context.recurse(data={'number': number}, delay=delay)
break |
def _updateWordSet(self):
"""Make a set of words, which shall be completed, from text
"""
self._wordSet = set(self._keywords) | set(self._customCompletions)
start = time.time()
for line in self._qpart.lines:
for match in _wordRegExp.findall(line):
self._wordSet.add(match)
if time.time() - start > self._WORD_SET_UPDATE_MAX_TIME_SEC:
"""It is better to have incomplete word set, than to freeze the GUI"""
break | Make a set of words, which shall be completed, from text | Below is the the instruction that describes the task:
### Input:
Make a set of words, which shall be completed, from text
### Response:
def _updateWordSet(self):
"""Make a set of words, which shall be completed, from text
"""
self._wordSet = set(self._keywords) | set(self._customCompletions)
start = time.time()
for line in self._qpart.lines:
for match in _wordRegExp.findall(line):
self._wordSet.add(match)
if time.time() - start > self._WORD_SET_UPDATE_MAX_TIME_SEC:
"""It is better to have incomplete word set, than to freeze the GUI"""
break |
def rename_acquisition(self, plate_name, name, new_name):
'''Renames an acquisition.
Parameters
----------
plate_name: str
name of the parent plate
name: str
name of the acquisition that should be renamed
new_name: str
name that should be given to the acquisition
See also
--------
:func:`tmserver.api.acquisition.update_acquisition`
:class:`tmlib.models.acquisition.Acquisition`
'''
logger.info(
'rename acquisistion "%s" of experiment "%s", plate "%s"',
name, self.experiment_name, plate_name
)
content = {'name': new_name}
acquisition_id = self._get_acquisition_id(plate_name, name)
url = self._build_api_url(
'/experiments/{experiment_id}/acquisitions/{acquisition_id}'.format(
experiment_id=self._experiment_id, acquisition_id=acquisition_id
)
)
res = self._session.put(url, json=content)
res.raise_for_status() | Renames an acquisition.
Parameters
----------
plate_name: str
name of the parent plate
name: str
name of the acquisition that should be renamed
new_name: str
name that should be given to the acquisition
See also
--------
:func:`tmserver.api.acquisition.update_acquisition`
:class:`tmlib.models.acquisition.Acquisition` | Below is the the instruction that describes the task:
### Input:
Renames an acquisition.
Parameters
----------
plate_name: str
name of the parent plate
name: str
name of the acquisition that should be renamed
new_name: str
name that should be given to the acquisition
See also
--------
:func:`tmserver.api.acquisition.update_acquisition`
:class:`tmlib.models.acquisition.Acquisition`
### Response:
def rename_acquisition(self, plate_name, name, new_name):
'''Renames an acquisition.
Parameters
----------
plate_name: str
name of the parent plate
name: str
name of the acquisition that should be renamed
new_name: str
name that should be given to the acquisition
See also
--------
:func:`tmserver.api.acquisition.update_acquisition`
:class:`tmlib.models.acquisition.Acquisition`
'''
logger.info(
'rename acquisistion "%s" of experiment "%s", plate "%s"',
name, self.experiment_name, plate_name
)
content = {'name': new_name}
acquisition_id = self._get_acquisition_id(plate_name, name)
url = self._build_api_url(
'/experiments/{experiment_id}/acquisitions/{acquisition_id}'.format(
experiment_id=self._experiment_id, acquisition_id=acquisition_id
)
)
res = self._session.put(url, json=content)
res.raise_for_status() |
def backlink(node):
"""Given a CFG with outgoing links, create incoming links."""
seen = set()
to_see = [node]
while to_see:
node = to_see.pop()
seen.add(node)
for succ in node.next:
succ.prev.add(node)
if succ not in seen:
to_see.append(succ) | Given a CFG with outgoing links, create incoming links. | Below is the the instruction that describes the task:
### Input:
Given a CFG with outgoing links, create incoming links.
### Response:
def backlink(node):
"""Given a CFG with outgoing links, create incoming links."""
seen = set()
to_see = [node]
while to_see:
node = to_see.pop()
seen.add(node)
for succ in node.next:
succ.prev.add(node)
if succ not in seen:
to_see.append(succ) |
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry) | Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values | Below is the the instruction that describes the task:
### Input:
Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
### Response:
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry) |
def hide(self):
"""Hides all annotation artists associated with the DataCursor. Returns
self to allow "chaining". (e.g. ``datacursor.hide().disable()``)"""
self._hidden = True
for artist in self.annotations.values():
artist.set_visible(False)
for fig in self.figures:
fig.canvas.draw()
return self | Hides all annotation artists associated with the DataCursor. Returns
self to allow "chaining". (e.g. ``datacursor.hide().disable()``) | Below is the the instruction that describes the task:
### Input:
Hides all annotation artists associated with the DataCursor. Returns
self to allow "chaining". (e.g. ``datacursor.hide().disable()``)
### Response:
def hide(self):
"""Hides all annotation artists associated with the DataCursor. Returns
self to allow "chaining". (e.g. ``datacursor.hide().disable()``)"""
self._hidden = True
for artist in self.annotations.values():
artist.set_visible(False)
for fig in self.figures:
fig.canvas.draw()
return self |
def _straight_line_vertices(adjacency_mat, node_coords, directed=False):
"""
Generate the vertices for straight lines between nodes.
If it is a directed graph, it also generates the vertices which can be
passed to an :class:`ArrowVisual`.
Parameters
----------
adjacency_mat : array
The adjacency matrix of the graph
node_coords : array
The current coordinates of all nodes in the graph
directed : bool
Wether the graph is directed. If this is true it will also generate
the vertices for arrows which can be passed to :class:`ArrowVisual`.
Returns
-------
vertices : tuple
Returns a tuple containing containing (`line_vertices`,
`arrow_vertices`)
"""
if not issparse(adjacency_mat):
adjacency_mat = np.asarray(adjacency_mat, float)
if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] !=
adjacency_mat.shape[1]):
raise ValueError("Adjacency matrix should be square.")
arrow_vertices = np.array([])
edges = _get_edges(adjacency_mat)
line_vertices = node_coords[edges.ravel()]
if directed:
arrows = np.array(list(_get_directed_edges(adjacency_mat)))
arrow_vertices = node_coords[arrows.ravel()]
arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))
return line_vertices, arrow_vertices | Generate the vertices for straight lines between nodes.
If it is a directed graph, it also generates the vertices which can be
passed to an :class:`ArrowVisual`.
Parameters
----------
adjacency_mat : array
The adjacency matrix of the graph
node_coords : array
The current coordinates of all nodes in the graph
directed : bool
Wether the graph is directed. If this is true it will also generate
the vertices for arrows which can be passed to :class:`ArrowVisual`.
Returns
-------
vertices : tuple
Returns a tuple containing containing (`line_vertices`,
`arrow_vertices`) | Below is the the instruction that describes the task:
### Input:
Generate the vertices for straight lines between nodes.
If it is a directed graph, it also generates the vertices which can be
passed to an :class:`ArrowVisual`.
Parameters
----------
adjacency_mat : array
The adjacency matrix of the graph
node_coords : array
The current coordinates of all nodes in the graph
directed : bool
Wether the graph is directed. If this is true it will also generate
the vertices for arrows which can be passed to :class:`ArrowVisual`.
Returns
-------
vertices : tuple
Returns a tuple containing containing (`line_vertices`,
`arrow_vertices`)
### Response:
def _straight_line_vertices(adjacency_mat, node_coords, directed=False):
"""
Generate the vertices for straight lines between nodes.
If it is a directed graph, it also generates the vertices which can be
passed to an :class:`ArrowVisual`.
Parameters
----------
adjacency_mat : array
The adjacency matrix of the graph
node_coords : array
The current coordinates of all nodes in the graph
directed : bool
Wether the graph is directed. If this is true it will also generate
the vertices for arrows which can be passed to :class:`ArrowVisual`.
Returns
-------
vertices : tuple
Returns a tuple containing containing (`line_vertices`,
`arrow_vertices`)
"""
if not issparse(adjacency_mat):
adjacency_mat = np.asarray(adjacency_mat, float)
if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] !=
adjacency_mat.shape[1]):
raise ValueError("Adjacency matrix should be square.")
arrow_vertices = np.array([])
edges = _get_edges(adjacency_mat)
line_vertices = node_coords[edges.ravel()]
if directed:
arrows = np.array(list(_get_directed_edges(adjacency_mat)))
arrow_vertices = node_coords[arrows.ravel()]
arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))
return line_vertices, arrow_vertices |
def list(self,table, **kparams):
"""
get a collection of records by table name.
returns a dict (the json map) for python 3.4
"""
result = self.table_api_get(table, **kparams)
return self.to_records(result, table) | get a collection of records by table name.
returns a dict (the json map) for python 3.4 | Below is the the instruction that describes the task:
### Input:
get a collection of records by table name.
returns a dict (the json map) for python 3.4
### Response:
def list(self,table, **kparams):
"""
get a collection of records by table name.
returns a dict (the json map) for python 3.4
"""
result = self.table_api_get(table, **kparams)
return self.to_records(result, table) |
def have_thumbnail(self, fitsimage, image):
"""Returns True if we already have a thumbnail version of this image
cached, False otherwise.
"""
chname = self.fv.get_channel_name(fitsimage)
# Look up our version of the thumb
idx = image.get('idx', None)
path = image.get('path', None)
if path is not None:
path = os.path.abspath(path)
name = iohelper.name_image_from_path(path, idx=idx)
else:
name = 'NoName'
# get image name
name = image.get('name', name)
thumbkey = self.get_thumb_key(chname, name, path)
with self.thmblock:
return thumbkey in self.thumb_dict | Returns True if we already have a thumbnail version of this image
cached, False otherwise. | Below is the the instruction that describes the task:
### Input:
Returns True if we already have a thumbnail version of this image
cached, False otherwise.
### Response:
def have_thumbnail(self, fitsimage, image):
"""Returns True if we already have a thumbnail version of this image
cached, False otherwise.
"""
chname = self.fv.get_channel_name(fitsimage)
# Look up our version of the thumb
idx = image.get('idx', None)
path = image.get('path', None)
if path is not None:
path = os.path.abspath(path)
name = iohelper.name_image_from_path(path, idx=idx)
else:
name = 'NoName'
# get image name
name = image.get('name', name)
thumbkey = self.get_thumb_key(chname, name, path)
with self.thmblock:
return thumbkey in self.thumb_dict |
def setupNodding(self):
"""
Setup Nodding for GTC
"""
g = get_root(self).globals
if not self.nod():
# re-enable clear mode box if not drift
if not self.isDrift():
self.clear.enable()
# clear existing nod pattern
self.nodPattern = {}
self.check()
return
# Do nothing if we're not at the GTC
if g.cpars['telins_name'] != 'GTC':
messagebox.showerror('Error', 'Cannot dither WHT')
self.nod.set(False)
self.nodPattern = {}
return
# check for drift mode and bomb out
if self.isDrift():
messagebox.showerror('Error', 'Cannot dither telescope in drift mode')
self.nod.set(False)
self.nodPattern = {}
return
# check for clear not enabled and warn
if not self.clear():
if not messagebox.askokcancel('Warning',
'Dithering telescope will enable clear mode. Continue?'):
self.nod.set(False)
self.nodPattern = {}
return
# Ask for nod pattern
try:
home = expanduser('~')
fname = filedialog.askopenfilename(
title='Open offsets text file',
defaultextension='.txt',
filetypes=[('text files', '.txt')],
initialdir=home)
if not fname:
g.clog.warn('Aborted load from disk')
raise ValueError
ra, dec = np.loadtxt(fname).T
if len(ra) != len(dec):
g.clog.warn('Mismatched lengths of RA and Dec offsets')
raise ValueError
data = dict(
ra=ra.tolist(),
dec=dec.tolist()
)
except:
g.clog.warn('Setting dither pattern failed. Disabling dithering')
self.nod.set(False)
self.nodPattern = {}
return
# store nodding on ipars object
self.nodPattern = data
# enable clear mode
self.clear.set(True)
# update
self.check() | Setup Nodding for GTC | Below is the the instruction that describes the task:
### Input:
Setup Nodding for GTC
### Response:
def setupNodding(self):
"""
Setup Nodding for GTC
"""
g = get_root(self).globals
if not self.nod():
# re-enable clear mode box if not drift
if not self.isDrift():
self.clear.enable()
# clear existing nod pattern
self.nodPattern = {}
self.check()
return
# Do nothing if we're not at the GTC
if g.cpars['telins_name'] != 'GTC':
messagebox.showerror('Error', 'Cannot dither WHT')
self.nod.set(False)
self.nodPattern = {}
return
# check for drift mode and bomb out
if self.isDrift():
messagebox.showerror('Error', 'Cannot dither telescope in drift mode')
self.nod.set(False)
self.nodPattern = {}
return
# check for clear not enabled and warn
if not self.clear():
if not messagebox.askokcancel('Warning',
'Dithering telescope will enable clear mode. Continue?'):
self.nod.set(False)
self.nodPattern = {}
return
# Ask for nod pattern
try:
home = expanduser('~')
fname = filedialog.askopenfilename(
title='Open offsets text file',
defaultextension='.txt',
filetypes=[('text files', '.txt')],
initialdir=home)
if not fname:
g.clog.warn('Aborted load from disk')
raise ValueError
ra, dec = np.loadtxt(fname).T
if len(ra) != len(dec):
g.clog.warn('Mismatched lengths of RA and Dec offsets')
raise ValueError
data = dict(
ra=ra.tolist(),
dec=dec.tolist()
)
except:
g.clog.warn('Setting dither pattern failed. Disabling dithering')
self.nod.set(False)
self.nodPattern = {}
return
# store nodding on ipars object
self.nodPattern = data
# enable clear mode
self.clear.set(True)
# update
self.check() |
def copy(self, key):
"""Copy the set to another key and return the new Set.
WARNING: If the key exists, it overwrites it.
"""
copy = Set(key=key, db=self.db)
copy.clear()
copy |= self
return copy | Copy the set to another key and return the new Set.
WARNING: If the key exists, it overwrites it. | Below is the the instruction that describes the task:
### Input:
Copy the set to another key and return the new Set.
WARNING: If the key exists, it overwrites it.
### Response:
def copy(self, key):
"""Copy the set to another key and return the new Set.
WARNING: If the key exists, it overwrites it.
"""
copy = Set(key=key, db=self.db)
copy.clear()
copy |= self
return copy |
def parameterize(
self,
country: Optional[str] = "South Sudan",
state: Optional[str] = None,
year: Optional[int] = None,
month: Optional[int] = None,
unit: Optional[str] = None,
fallback_aggaxes: List[str] = ["year", "month"],
aggfunc: Callable = np.mean,
):
""" Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches.
"""
valid_axes = ("country", "state", "year", "month")
if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)):
raise ValueError(
"All elements of the fallback_aggaxes set must be one of the "
f"following: {valid_axes}"
)
for n in self.nodes(data=True):
for indicator in n[1]["indicators"].values():
indicator.mean, indicator.unit = get_indicator_value(
indicator,
country,
state,
year,
month,
unit,
fallback_aggaxes,
aggfunc,
)
indicator.stdev = 0.1 * abs(indicator.mean) | Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches. | Below is the the instruction that describes the task:
### Input:
Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches.
### Response:
def parameterize(
self,
country: Optional[str] = "South Sudan",
state: Optional[str] = None,
year: Optional[int] = None,
month: Optional[int] = None,
unit: Optional[str] = None,
fallback_aggaxes: List[str] = ["year", "month"],
aggfunc: Callable = np.mean,
):
""" Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches.
"""
valid_axes = ("country", "state", "year", "month")
if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)):
raise ValueError(
"All elements of the fallback_aggaxes set must be one of the "
f"following: {valid_axes}"
)
for n in self.nodes(data=True):
for indicator in n[1]["indicators"].values():
indicator.mean, indicator.unit = get_indicator_value(
indicator,
country,
state,
year,
month,
unit,
fallback_aggaxes,
aggfunc,
)
indicator.stdev = 0.1 * abs(indicator.mean) |
def kill_process(procname, scriptname):
"""kill WSGI processes that may be running in development"""
# from http://stackoverflow.com/a/2940878
import signal
import subprocess
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.decode().splitlines():
if procname in line and scriptname in line:
pid = int(line.split()[1])
info('Stopping %s %s %d' % (procname, scriptname, pid))
os.kill(pid, signal.SIGKILL) | kill WSGI processes that may be running in development | Below is the the instruction that describes the task:
### Input:
kill WSGI processes that may be running in development
### Response:
def kill_process(procname, scriptname):
"""kill WSGI processes that may be running in development"""
# from http://stackoverflow.com/a/2940878
import signal
import subprocess
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.decode().splitlines():
if procname in line and scriptname in line:
pid = int(line.split()[1])
info('Stopping %s %s %d' % (procname, scriptname, pid))
os.kill(pid, signal.SIGKILL) |
def check_snmp(self):
"""Chek if SNMP is available on the server."""
# Import the SNMP client class
from glances.snmp import GlancesSNMPClient
# Create an instance of the SNMP client
clientsnmp = GlancesSNMPClient(host=self.args.client,
port=self.args.snmp_port,
version=self.args.snmp_version,
community=self.args.snmp_community,
user=self.args.snmp_user,
auth=self.args.snmp_auth)
# If we cannot grab the hostname, then exit...
ret = clientsnmp.get_by_oid("1.3.6.1.2.1.1.5.0") != {}
if ret:
# Get the OS name (need to grab the good OID...)
oid_os_name = clientsnmp.get_by_oid("1.3.6.1.2.1.1.1.0")
try:
self.system_name = self.get_system_name(oid_os_name['1.3.6.1.2.1.1.1.0'])
logger.info("SNMP system name detected: {}".format(self.system_name))
except KeyError:
self.system_name = None
logger.warning("Cannot detect SNMP system name")
return ret | Chek if SNMP is available on the server. | Below is the the instruction that describes the task:
### Input:
Chek if SNMP is available on the server.
### Response:
def check_snmp(self):
"""Chek if SNMP is available on the server."""
# Import the SNMP client class
from glances.snmp import GlancesSNMPClient
# Create an instance of the SNMP client
clientsnmp = GlancesSNMPClient(host=self.args.client,
port=self.args.snmp_port,
version=self.args.snmp_version,
community=self.args.snmp_community,
user=self.args.snmp_user,
auth=self.args.snmp_auth)
# If we cannot grab the hostname, then exit...
ret = clientsnmp.get_by_oid("1.3.6.1.2.1.1.5.0") != {}
if ret:
# Get the OS name (need to grab the good OID...)
oid_os_name = clientsnmp.get_by_oid("1.3.6.1.2.1.1.1.0")
try:
self.system_name = self.get_system_name(oid_os_name['1.3.6.1.2.1.1.1.0'])
logger.info("SNMP system name detected: {}".format(self.system_name))
except KeyError:
self.system_name = None
logger.warning("Cannot detect SNMP system name")
return ret |
def next_page(self, max_=None):
"""
Return a query set which requests the page after this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the next page.
Must be called on a result set which has :attr:`last` set.
"""
result = type(self)()
result.after = After(self.last.value)
result.max_ = max_
return result | Return a query set which requests the page after this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the next page.
Must be called on a result set which has :attr:`last` set. | Below is the the instruction that describes the task:
### Input:
Return a query set which requests the page after this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the next page.
Must be called on a result set which has :attr:`last` set.
### Response:
def next_page(self, max_=None):
"""
Return a query set which requests the page after this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the next page.
Must be called on a result set which has :attr:`last` set.
"""
result = type(self)()
result.after = After(self.last.value)
result.max_ = max_
return result |
def read(self):
"""Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled"""
if self._is_initialized:
return
self._is_initialized = True
if not isinstance(self._file_or_files, (tuple, list)):
files_to_read = [self._file_or_files]
else:
files_to_read = list(self._file_or_files)
# end assure we have a copy of the paths to handle
seen = set(files_to_read)
num_read_include_files = 0
while files_to_read:
file_path = files_to_read.pop(0)
fp = file_path
file_ok = False
if hasattr(fp, "seek"):
self._read(fp, fp.name)
else:
# assume a path if it is not a file-object
try:
with open(file_path, 'rb') as fp:
file_ok = True
self._read(fp, fp.name)
except IOError:
continue
# Read includes and append those that we didn't handle yet
# We expect all paths to be normalized and absolute (and will assure that is the case)
if self._has_includes():
for _, include_path in self.items('include'):
if include_path.startswith('~'):
include_path = osp.expanduser(include_path)
if not osp.isabs(include_path):
if not file_ok:
continue
# end ignore relative paths if we don't know the configuration file path
assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work"
include_path = osp.join(osp.dirname(file_path), include_path)
# end make include path absolute
include_path = osp.normpath(include_path)
if include_path in seen or not os.access(include_path, os.R_OK):
continue
seen.add(include_path)
# insert included file to the top to be considered first
files_to_read.insert(0, include_path)
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included, we can safely write back (potentially) the configuration file
# without altering it's meaning
if num_read_include_files == 0:
self._merge_includes = False | Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled | Below is the the instruction that describes the task:
### Input:
Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled
### Response:
def read(self):
"""Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled"""
if self._is_initialized:
return
self._is_initialized = True
if not isinstance(self._file_or_files, (tuple, list)):
files_to_read = [self._file_or_files]
else:
files_to_read = list(self._file_or_files)
# end assure we have a copy of the paths to handle
seen = set(files_to_read)
num_read_include_files = 0
while files_to_read:
file_path = files_to_read.pop(0)
fp = file_path
file_ok = False
if hasattr(fp, "seek"):
self._read(fp, fp.name)
else:
# assume a path if it is not a file-object
try:
with open(file_path, 'rb') as fp:
file_ok = True
self._read(fp, fp.name)
except IOError:
continue
# Read includes and append those that we didn't handle yet
# We expect all paths to be normalized and absolute (and will assure that is the case)
if self._has_includes():
for _, include_path in self.items('include'):
if include_path.startswith('~'):
include_path = osp.expanduser(include_path)
if not osp.isabs(include_path):
if not file_ok:
continue
# end ignore relative paths if we don't know the configuration file path
assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work"
include_path = osp.join(osp.dirname(file_path), include_path)
# end make include path absolute
include_path = osp.normpath(include_path)
if include_path in seen or not os.access(include_path, os.R_OK):
continue
seen.add(include_path)
# insert included file to the top to be considered first
files_to_read.insert(0, include_path)
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included, we can safely write back (potentially) the configuration file
# without altering it's meaning
if num_read_include_files == 0:
self._merge_includes = False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.