response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Convert pandas.Styler translated style to CSS.
Parameters
----------
style_type : str
Either "table_styles" or "cell_style".
style : dict
pandas.Styler translated style.
uuid : str
pandas.Styler uuid.
separator : str
A string separator used between table and cell selectors.
|
def _pandas_style_to_css(
style_type: str,
style: Mapping[str, Any],
uuid: str,
separator: str = "",
) -> str:
"""Convert pandas.Styler translated style to CSS.
Parameters
----------
style_type : str
Either "table_styles" or "cell_style".
style : dict
pandas.Styler translated style.
uuid : str
pandas.Styler uuid.
separator : str
A string separator used between table and cell selectors.
"""
declarations = []
for css_property, css_value in style["props"]:
declaration = css_property.strip() + ": " + css_value.strip()
declarations.append(declaration)
table_selector = f"#T_{uuid}"
# In pandas >= 1.1.0
# translated_style["cellstyle"] has the following shape:
# [
# {
# "props": [("color", " black"), ("background-color", "orange"), ("", "")],
# "selectors": ["row0_col0"]
# }
# ...
# ]
if style_type == "table_styles":
cell_selectors = [style["selector"]]
else:
cell_selectors = style["selectors"]
selectors = []
for cell_selector in cell_selectors:
selectors.append(table_selector + separator + cell_selector)
selector = ", ".join(selectors)
declaration_block = "; ".join(declarations)
rule_set = selector + " { " + declaration_block + " }"
return rule_set
|
Marshall pandas.Styler display values into an Arrow proto.
Parameters
----------
proto : proto.Arrow
Output. The protobuf for Streamlit Arrow proto.
df : pandas.DataFrame
A dataframe with original values.
styles : dict
pandas.Styler translated styles.
|
def _marshall_display_values(
proto: ArrowProto, df: DataFrame, styles: Mapping[str, Any]
) -> None:
"""Marshall pandas.Styler display values into an Arrow proto.
Parameters
----------
proto : proto.Arrow
Output. The protobuf for Streamlit Arrow proto.
df : pandas.DataFrame
A dataframe with original values.
styles : dict
pandas.Styler translated styles.
"""
new_df = _use_display_values(df, styles)
proto.styler.display_values = type_util.data_frame_to_bytes(new_df)
|
Create a new pandas.DataFrame where display values are used instead of original ones.
Parameters
----------
df : pandas.DataFrame
A dataframe with original values.
styles : dict
pandas.Styler translated styles.
|
def _use_display_values(df: DataFrame, styles: Mapping[str, Any]) -> DataFrame:
"""Create a new pandas.DataFrame where display values are used instead of original ones.
Parameters
----------
df : pandas.DataFrame
A dataframe with original values.
styles : dict
pandas.Styler translated styles.
"""
import re
# If values in a column are not of the same type, Arrow
# serialization would fail. Thus, we need to cast all values
# of the dataframe to strings before assigning them display values.
new_df = df.astype(str)
cell_selector_regex = re.compile(r"row(\d+)_col(\d+)")
if "body" in styles:
rows = styles["body"]
for row in rows:
for cell in row:
if "id" in cell:
if match := cell_selector_regex.match(cell["id"]):
r, c = map(int, match.groups())
new_df.iat[r, c] = str(cell["display_value"])
return new_df
|
Configure the Streamlit chart theme for Plotly.
The theme is only configured if Plotly is installed.
|
def configure_streamlit_plotly_theme() -> None:
"""Configure the Streamlit chart theme for Plotly.
The theme is only configured if Plotly is installed.
"""
# We do nothing if Plotly is not installed. This is expected since Plotly is an optional dependency.
with contextlib.suppress(ImportError):
import plotly.graph_objects as go
import plotly.io as pio
# This is the streamlit theme for plotly where we pass in a template.data
# and a template.layout.
# Template.data is for changing specific graph properties in a general aspect
# such as Contour plots or Waterfall plots.
# Template.layout is for changing things such as the x axis and fonts and other
# general layout properties for general graphs.
# We pass in temporary colors to the frontend and the frontend will replace
# those colors because we want to change colors based on the background color.
# Start at #0000001 because developers may be likely to use #000000
CATEGORY_0 = "#000001"
CATEGORY_1 = "#000002"
CATEGORY_2 = "#000003"
CATEGORY_3 = "#000004"
CATEGORY_4 = "#000005"
CATEGORY_5 = "#000006"
CATEGORY_6 = "#000007"
CATEGORY_7 = "#000008"
CATEGORY_8 = "#000009"
CATEGORY_9 = "#000010"
SEQUENTIAL_0 = "#000011"
SEQUENTIAL_1 = "#000012"
SEQUENTIAL_2 = "#000013"
SEQUENTIAL_3 = "#000014"
SEQUENTIAL_4 = "#000015"
SEQUENTIAL_5 = "#000016"
SEQUENTIAL_6 = "#000017"
SEQUENTIAL_7 = "#000018"
SEQUENTIAL_8 = "#000019"
SEQUENTIAL_9 = "#000020"
DIVERGING_0 = "#000021"
DIVERGING_1 = "#000022"
DIVERGING_2 = "#000023"
DIVERGING_3 = "#000024"
DIVERGING_4 = "#000025"
DIVERGING_5 = "#000026"
DIVERGING_6 = "#000027"
DIVERGING_7 = "#000028"
DIVERGING_8 = "#000029"
DIVERGING_9 = "#000030"
DIVERGING_10 = "#000031"
INCREASING = "#000032"
DECREASING = "#000033"
TOTAL = "#000034"
GRAY_70 = "#000036"
GRAY_90 = "#000037"
BG_COLOR = "#000038"
FADED_TEXT_05 = "#000039"
BG_MIX = "#000040"
# Plotly represents continuous colorscale through an array of pairs.
# The pair's first index is the starting point and the next pair's first index is the end point.
# The pair's second index is the starting color and the next pair's second index is the end color.
# For more information, please refer to https://plotly.com/python/colorscales/
streamlit_colorscale = [
[0.0, SEQUENTIAL_0],
[0.1111111111111111, SEQUENTIAL_1],
[0.2222222222222222, SEQUENTIAL_2],
[0.3333333333333333, SEQUENTIAL_3],
[0.4444444444444444, SEQUENTIAL_4],
[0.5555555555555556, SEQUENTIAL_5],
[0.6666666666666666, SEQUENTIAL_6],
[0.7777777777777778, SEQUENTIAL_7],
[0.8888888888888888, SEQUENTIAL_8],
[1.0, SEQUENTIAL_9],
]
pio.templates["streamlit"] = go.layout.Template(
data=go.layout.template.Data(
candlestick=[
go.layout.template.data.Candlestick(
decreasing=go.candlestick.Decreasing(
line=go.candlestick.decreasing.Line(color=DECREASING)
),
increasing=go.candlestick.Increasing(
line=go.candlestick.increasing.Line(color=INCREASING)
),
)
],
contour=[
go.layout.template.data.Contour(colorscale=streamlit_colorscale)
],
contourcarpet=[
go.layout.template.data.Contourcarpet(
colorscale=streamlit_colorscale
)
],
heatmap=[
go.layout.template.data.Heatmap(colorscale=streamlit_colorscale)
],
histogram2d=[
go.layout.template.data.Histogram2d(colorscale=streamlit_colorscale)
],
icicle=[
go.layout.template.data.Icicle(
textfont=go.icicle.Textfont(color="white")
)
],
sankey=[
go.layout.template.data.Sankey(
textfont=go.sankey.Textfont(color=GRAY_70)
)
],
scatter=[
go.layout.template.data.Scatter(
marker=go.scatter.Marker(line=go.scatter.marker.Line(width=0))
)
],
table=[
go.layout.template.data.Table(
cells=go.table.Cells(
fill=go.table.cells.Fill(color=BG_COLOR),
font=go.table.cells.Font(color=GRAY_90),
line=go.table.cells.Line(color=FADED_TEXT_05),
),
header=go.table.Header(
font=go.table.header.Font(color=GRAY_70),
line=go.table.header.Line(color=FADED_TEXT_05),
fill=go.table.header.Fill(color=BG_MIX),
),
)
],
waterfall=[
go.layout.template.data.Waterfall(
increasing=go.waterfall.Increasing(
marker=go.waterfall.increasing.Marker(color=INCREASING)
),
decreasing=go.waterfall.Decreasing(
marker=go.waterfall.decreasing.Marker(color=DECREASING)
),
totals=go.waterfall.Totals(
marker=go.waterfall.totals.Marker(color=TOTAL)
),
connector=go.waterfall.Connector(
line=go.waterfall.connector.Line(color=GRAY_70, width=2)
),
)
],
),
layout=go.Layout(
colorway=[
CATEGORY_0,
CATEGORY_1,
CATEGORY_2,
CATEGORY_3,
CATEGORY_4,
CATEGORY_5,
CATEGORY_6,
CATEGORY_7,
CATEGORY_8,
CATEGORY_9,
],
colorscale=go.layout.Colorscale(
sequential=streamlit_colorscale,
sequentialminus=streamlit_colorscale,
diverging=[
[0.0, DIVERGING_0],
[0.1, DIVERGING_1],
[0.2, DIVERGING_2],
[0.3, DIVERGING_3],
[0.4, DIVERGING_4],
[0.5, DIVERGING_5],
[0.6, DIVERGING_6],
[0.7, DIVERGING_7],
[0.8, DIVERGING_8],
[0.9, DIVERGING_9],
[1.0, DIVERGING_10],
],
),
coloraxis=go.layout.Coloraxis(colorscale=streamlit_colorscale),
),
)
pio.templates.default = "streamlit"
|
Convert subtitles from SubRip (.srt) format to WebVTT (.vtt) format.
This function accepts the content of the .srt file either as a string
or as a BytesIO stream.
Parameters
----------
srt_data : str or bytes
The content of the .srt file as a string or a bytes stream.
Returns
-------
bytes
The content converted into .vtt format.
|
def _srt_to_vtt(srt_data: str | bytes) -> bytes:
"""
Convert subtitles from SubRip (.srt) format to WebVTT (.vtt) format.
This function accepts the content of the .srt file either as a string
or as a BytesIO stream.
Parameters
----------
srt_data : str or bytes
The content of the .srt file as a string or a bytes stream.
Returns
-------
bytes
The content converted into .vtt format.
"""
# If the input is a bytes stream, convert it to a string
if isinstance(srt_data, bytes):
# Decode the bytes to a UTF-8 string
try:
srt_data = srt_data.decode("utf-8")
except UnicodeDecodeError as e:
raise ValueError("Could not decode the input stream as UTF-8.") from e
if not isinstance(srt_data, str):
# If it's not a string by this point, something is wrong.
raise TypeError(
f"Input must be a string or a bytes stream, not {type(srt_data)}."
)
# Replace SubRip timing with WebVTT timing
vtt_data = re.sub(SRT_CONVERSION_REGEX, r"\1.\2", srt_data)
# Add WebVTT file header
vtt_content = "WEBVTT\n\n" + vtt_data
# Convert the vtt content to bytes
vtt_content = vtt_content.strip().encode("utf-8")
return vtt_content
|
Handles string data, either as a file path or raw content.
|
def _handle_string_or_path_data(data_or_path: str | Path) -> bytes:
"""Handles string data, either as a file path or raw content."""
if os.path.isfile(data_or_path):
path = Path(data_or_path)
file_extension = path.suffix.lower()
if file_extension not in SUBTITLE_ALLOWED_FORMATS:
raise ValueError(
f"Incorrect subtitle format {file_extension}. Subtitles must be in "
f"one of the following formats: {', '.join(SUBTITLE_ALLOWED_FORMATS)}"
)
with open(data_or_path, "rb") as file:
content = file.read()
return _srt_to_vtt(content) if file_extension == ".srt" else content
elif isinstance(data_or_path, Path):
raise ValueError(f"File {data_or_path} does not exist.")
content_string = data_or_path.strip()
if content_string.startswith("WEBVTT") or content_string == "":
return content_string.encode("utf-8")
elif _is_srt(content_string):
return _srt_to_vtt(content_string)
raise ValueError("The provided string neither matches valid VTT nor SRT format.")
|
Handles io.BytesIO data, converting SRT to VTT content if needed.
|
def _handle_stream_data(stream: io.BytesIO) -> bytes:
"""Handles io.BytesIO data, converting SRT to VTT content if needed."""
stream.seek(0)
stream_data = stream.getvalue()
return _srt_to_vtt(stream_data) if _is_srt(stream) else stream_data
|
Handles io.BytesIO data, converting SRT to VTT content if needed.
|
def _handle_bytes_data(data: bytes) -> bytes:
"""Handles io.BytesIO data, converting SRT to VTT content if needed."""
return _srt_to_vtt(data) if _is_srt(data) else data
|
Detects the avatar type and prepares the avatar data for the frontend.
Parameters
----------
avatar :
The avatar that was provided by the user.
delta_path : str
The delta path is used as media ID when a local image is served via the media
file manager.
Returns
-------
Tuple[AvatarType, str]
The detected avatar type and the prepared avatar data.
|
def _process_avatar_input(
avatar: str | AtomicImage | None, delta_path: str
) -> tuple[BlockProto.ChatMessage.AvatarType.ValueType, str]:
"""Detects the avatar type and prepares the avatar data for the frontend.
Parameters
----------
avatar :
The avatar that was provided by the user.
delta_path : str
The delta path is used as media ID when a local image is served via the media
file manager.
Returns
-------
Tuple[AvatarType, str]
The detected avatar type and the prepared avatar data.
"""
AvatarType = BlockProto.ChatMessage.AvatarType
if avatar is None:
return AvatarType.ICON, ""
elif isinstance(avatar, str) and avatar in {item.value for item in PresetNames}:
# On the frontend, we only support "assistant" and "user" for the avatar.
return (
AvatarType.ICON,
(
"assistant"
if avatar in [PresetNames.AI, PresetNames.ASSISTANT]
else "user"
),
)
elif isinstance(avatar, str) and is_emoji(avatar):
return AvatarType.EMOJI, avatar
elif isinstance(avatar, str) and avatar.startswith(":material"):
return AvatarType.ICON, validate_material_icon(avatar)
else:
try:
return AvatarType.IMAGE, image_to_url(
avatar,
width=WidthBehaviour.ORIGINAL,
clamp=False,
channels="RGB",
output_format="auto",
image_id=delta_path,
)
except Exception as ex:
raise StreamlitAPIException(
"Failed to load the provided avatar value as an image."
) from ex
|
Convert a value to the correct type.
Parameters
----------
value : str | int | float | bool | None
The value to convert.
column_data_kind : ColumnDataKind
The determined data kind of the column. The column data kind refers to the
shared data type of the values in the column (e.g. int, float, str).
Returns
-------
The converted value.
|
def _parse_value(
value: str | int | float | bool | None,
column_data_kind: ColumnDataKind,
) -> Any:
"""Convert a value to the correct type.
Parameters
----------
value : str | int | float | bool | None
The value to convert.
column_data_kind : ColumnDataKind
The determined data kind of the column. The column data kind refers to the
shared data type of the values in the column (e.g. int, float, str).
Returns
-------
The converted value.
"""
if value is None:
return None
import pandas as pd
try:
if column_data_kind == ColumnDataKind.STRING:
return str(value)
if column_data_kind == ColumnDataKind.INTEGER:
return int(value)
if column_data_kind == ColumnDataKind.FLOAT:
return float(value)
if column_data_kind == ColumnDataKind.BOOLEAN:
return bool(value)
if column_data_kind == ColumnDataKind.DECIMAL:
# Decimal theoretically can also be initialized via number values.
# However, using number values here seems to cause issues with Arrow
# serialization, once you try to render the returned dataframe.
return Decimal(str(value))
if column_data_kind == ColumnDataKind.TIMEDELTA:
return pd.Timedelta(value)
if column_data_kind in [
ColumnDataKind.DATETIME,
ColumnDataKind.DATE,
ColumnDataKind.TIME,
]:
datetime_value = pd.Timestamp(value)
if datetime_value is pd.NaT:
return None
if column_data_kind == ColumnDataKind.DATETIME:
return datetime_value
if column_data_kind == ColumnDataKind.DATE:
return datetime_value.date()
if column_data_kind == ColumnDataKind.TIME:
return datetime_value.time()
except (ValueError, pd.errors.ParserError) as ex:
_LOGGER.warning(
"Failed to parse value %s as %s. Exception: %s", value, column_data_kind, ex
)
return None
return value
|
Apply cell edits to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the cell edits to.
edited_rows : Mapping[int, Mapping[str, str | int | float | bool | None]]
A hierarchical mapping based on row position -> column name -> value
dataframe_schema: DataframeSchema
The schema of the dataframe.
|
def _apply_cell_edits(
df: pd.DataFrame,
edited_rows: Mapping[int, Mapping[str, str | int | float | bool | None]],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply cell edits to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the cell edits to.
edited_rows : Mapping[int, Mapping[str, str | int | float | bool | None]]
A hierarchical mapping based on row position -> column name -> value
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
for row_id, row_changes in edited_rows.items():
row_pos = int(row_id)
for col_name, value in row_changes.items():
if col_name == INDEX_IDENTIFIER:
# The edited cell is part of the index
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
df.index.values[row_pos] = _parse_value(
value, dataframe_schema[INDEX_IDENTIFIER]
)
else:
col_pos = df.columns.get_loc(col_name)
df.iat[row_pos, col_pos] = _parse_value(
value, dataframe_schema[col_name]
)
|
Apply row additions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row additions to.
added_rows : List[Dict[str, Any]]
A list of row additions. Each row addition is a dictionary with the
column position as key and the new cell value as value.
dataframe_schema: DataframeSchema
The schema of the dataframe.
|
def _apply_row_additions(
df: pd.DataFrame,
added_rows: list[dict[str, Any]],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply row additions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row additions to.
added_rows : List[Dict[str, Any]]
A list of row additions. Each row addition is a dictionary with the
column position as key and the new cell value as value.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if not added_rows:
return
import pandas as pd
# This is only used if the dataframe has a range index:
# There seems to be a bug in older pandas versions with RangeIndex in
# combination with loc. As a workaround, we manually track the values here:
range_index_stop = None
range_index_step = None
if isinstance(df.index, pd.RangeIndex):
range_index_stop = df.index.stop
range_index_step = df.index.step
for added_row in added_rows:
index_value = None
new_row: list[Any] = [None for _ in range(df.shape[1])]
for col_name in added_row.keys():
value = added_row[col_name]
if col_name == INDEX_IDENTIFIER:
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
index_value = _parse_value(value, dataframe_schema[INDEX_IDENTIFIER])
else:
col_pos = df.columns.get_loc(col_name)
new_row[col_pos] = _parse_value(value, dataframe_schema[col_name])
# Append the new row to the dataframe
if range_index_stop is not None:
df.loc[range_index_stop, :] = new_row
# Increment to the next range index value
range_index_stop += range_index_step
elif index_value is not None:
# TODO(lukasmasuch): we are only adding rows that have a non-None index
# value to prevent issues in the frontend component. Also, it just overwrites
# the row in case the index value already exists in the dataframe.
# In the future, it would be better to require users to provide unique
# non-None values for the index with some kind of visual indications.
df.loc[index_value, :] = new_row
|
Apply row deletions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row deletions to.
deleted_rows : List[int]
A list of row numbers to delete.
|
def _apply_row_deletions(df: pd.DataFrame, deleted_rows: list[int]) -> None:
"""Apply row deletions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row deletions to.
deleted_rows : List[int]
A list of row numbers to delete.
"""
# Drop rows based in numeric row positions
df.drop(df.index[deleted_rows], inplace=True)
|
Apply edits to the provided dataframe (inplace).
This includes cell edits, row additions and row deletions.
Parameters
----------
df : pd.DataFrame
The dataframe to apply the edits to.
data_editor_state : EditingState
The editing state of the data editor component.
dataframe_schema: DataframeSchema
The schema of the dataframe.
|
def _apply_dataframe_edits(
df: pd.DataFrame,
data_editor_state: EditingState,
dataframe_schema: DataframeSchema,
) -> None:
"""Apply edits to the provided dataframe (inplace).
This includes cell edits, row additions and row deletions.
Parameters
----------
df : pd.DataFrame
The dataframe to apply the edits to.
data_editor_state : EditingState
The editing state of the data editor component.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if data_editor_state.get("edited_rows"):
_apply_cell_edits(df, data_editor_state["edited_rows"], dataframe_schema)
if data_editor_state.get("added_rows"):
_apply_row_additions(df, data_editor_state["added_rows"], dataframe_schema)
if data_editor_state.get("deleted_rows"):
_apply_row_deletions(df, data_editor_state["deleted_rows"])
|
Check if the index is supported by the data editor component.
Parameters
----------
df_index : pd.Index
The index to check.
Returns
-------
bool
True if the index is supported, False otherwise.
|
def _is_supported_index(df_index: pd.Index) -> bool:
"""Check if the index is supported by the data editor component.
Parameters
----------
df_index : pd.Index
The index to check.
Returns
-------
bool
True if the index is supported, False otherwise.
"""
import pandas as pd
return (
type(df_index)
in [
pd.RangeIndex,
pd.Index,
pd.DatetimeIndex,
# Categorical index doesn't work since arrow
# does serialize the options:
# pd.CategoricalIndex,
# Interval type isn't editable currently:
# pd.IntervalIndex,
# Period type isn't editable currently:
# pd.PeriodIndex,
]
# We need to check these index types without importing, since they are deprecated
# and planned to be removed soon.
or is_type(df_index, "pandas.core.indexes.numeric.Int64Index")
or is_type(df_index, "pandas.core.indexes.numeric.Float64Index")
or is_type(df_index, "pandas.core.indexes.numeric.UInt64Index")
)
|
Fix the column headers of the provided dataframe inplace to work
correctly for data editing.
|
def _fix_column_headers(data_df: pd.DataFrame) -> None:
"""Fix the column headers of the provided dataframe inplace to work
correctly for data editing."""
import pandas as pd
if isinstance(data_df.columns, pd.MultiIndex):
# Flatten hierarchical column headers to a single level:
data_df.columns = [
"_".join(map(str, header)) for header in data_df.columns.to_flat_index()
]
elif pd.api.types.infer_dtype(data_df.columns) != "string":
# If the column names are not all strings, we need to convert them to strings
# to avoid issues with editing:
data_df.rename(
columns={column: str(column) for column in data_df.columns},
inplace=True,
)
|
Check if the column names in the provided dataframe are valid.
It's not allowed to have duplicate column names or column names that are
named ``_index``. If the column names are not valid, a ``StreamlitAPIException``
is raised.
|
def _check_column_names(data_df: pd.DataFrame):
"""Check if the column names in the provided dataframe are valid.
It's not allowed to have duplicate column names or column names that are
named ``_index``. If the column names are not valid, a ``StreamlitAPIException``
is raised.
"""
if data_df.columns.empty:
return
# Check if the column names are unique and raise an exception if not.
# Add the names of the duplicated columns to the exception message.
duplicated_columns = data_df.columns[data_df.columns.duplicated()]
if len(duplicated_columns) > 0:
raise StreamlitAPIException(
f"All column names are required to be unique for usage with data editor. "
f"The following column names are duplicated: {list(duplicated_columns)}. "
f"Please rename the duplicated columns in the provided data."
)
# Check if the column names are not named "_index" and raise an exception if so.
if INDEX_IDENTIFIER in data_df.columns:
raise StreamlitAPIException(
f"The column name '{INDEX_IDENTIFIER}' is reserved for the index column "
f"and can't be used for data columns. Please rename the column in the "
f"provided data."
)
|
Check column type to data type compatibility.
Iterates the index and all columns of the dataframe to check if
the configured column types are compatible with the underlying data types.
Parameters
----------
data_df : pd.DataFrame
The dataframe to check the type compatibilities for.
columns_config : ColumnConfigMapping
A mapping of column to column configurations.
dataframe_schema : DataframeSchema
The schema of the dataframe.
Raises
------
StreamlitAPIException
If a configured column type is editable and not compatible with the
underlying data type.
|
def _check_type_compatibilities(
data_df: pd.DataFrame,
columns_config: ColumnConfigMapping,
dataframe_schema: DataframeSchema,
):
"""Check column type to data type compatibility.
Iterates the index and all columns of the dataframe to check if
the configured column types are compatible with the underlying data types.
Parameters
----------
data_df : pd.DataFrame
The dataframe to check the type compatibilities for.
columns_config : ColumnConfigMapping
A mapping of column to column configurations.
dataframe_schema : DataframeSchema
The schema of the dataframe.
Raises
------
StreamlitAPIException
If a configured column type is editable and not compatible with the
underlying data type.
"""
# TODO(lukasmasuch): Update this here to support multi-index in the future:
indices = [(INDEX_IDENTIFIER, data_df.index)]
for column in indices + list(data_df.items()):
column_name, _ = column
column_data_kind = dataframe_schema[column_name]
# TODO(lukasmasuch): support column config via numerical index here?
if column_name in columns_config:
column_config = columns_config[column_name]
if column_config.get("disabled") is True:
# Disabled columns are not checked for compatibility.
# This might change in the future.
continue
type_config = column_config.get("type_config")
if type_config is None:
continue
configured_column_type = type_config.get("type")
if configured_column_type is None:
continue
if is_type_compatible(configured_column_type, column_data_kind) is False:
raise StreamlitAPIException(
f"The configured column type `{configured_column_type}` for column "
f"`{column_name}` is not compatible for editing the underlying "
f"data type `{column_data_kind}`.\n\nYou have following options to "
f"fix this: 1) choose a compatible type 2) disable the column "
f"3) convert the column into a compatible data type."
)
|
Perform validation checks and return indices based on the default values.
|
def _check_and_convert_to_indices(
opt: Sequence[Any], default_values: Sequence[Any] | Any | None
) -> list[int] | None:
"""Perform validation checks and return indices based on the default values."""
if default_values is None and None not in opt:
return None
if not isinstance(default_values, list):
# This if is done before others because calling if not x (done
# right below) when x is of type pd.Series() or np.array() throws a
# ValueError exception.
if is_type(default_values, "numpy.ndarray") or is_type(
default_values, "pandas.core.series.Series"
):
default_values = list(cast(Sequence[Any], default_values))
elif (
isinstance(default_values, (tuple, set))
or default_values
and default_values not in opt
):
default_values = list(default_values)
else:
default_values = [default_values]
for value in default_values:
if value not in opt:
raise StreamlitAPIException(
f"The default value '{value}' is part of the options. "
"Please make sure that every default values also exists in the options."
)
return [opt.index(value) for value in default_values]
|
Restore times/datetimes to original timezone (dates are always naive)
|
def _micros_to_datetime(micros: int, orig_tz: tzinfo | None) -> datetime:
"""Restore times/datetimes to original timezone (dates are always naive)"""
utc_dt = UTC_EPOCH + timedelta(microseconds=micros)
# Add the original timezone. No conversion is required here,
# since in the serialization, we also just replace the timestamp with UTC.
return utc_dt.replace(tzinfo=orig_tz)
|
Convert newline characters to markdown newline sequences
(space, space, newline).
|
def _convert_newlines(text: str) -> str:
"""Convert newline characters to markdown newline sequences
(space, space, newline).
"""
return text.replace("\n", " \n")
|
Showing the code of the demo.
|
def show_code(demo):
"""Showing the code of the demo."""
show_code = st.sidebar.checkbox("Show code", True)
if show_code:
# Showing the code of the demo.
st.markdown("## Code")
sourcelines, _ = inspect.getsourcelines(demo)
st.code(textwrap.dedent("".join(sourcelines[1:])))
|
Randomly generate a unique ID for a script execution.
|
def _generate_scriptrun_id() -> str:
"""Randomly generate a unique ID for a script execution."""
return str(uuid.uuid4())
|
Create an instance of connection_class with the given name and kwargs.
The weird implementation of this function with the @cache_resource annotated
function defined internally is done to:
* Always @gather_metrics on the call even if the return value is a cached one.
* Allow the user to specify ttl and max_entries when calling st.connection.
|
def _create_connection(
name: str,
connection_class: type[ConnectionClass],
max_entries: int | None = None,
ttl: float | timedelta | None = None,
**kwargs,
) -> ConnectionClass:
"""Create an instance of connection_class with the given name and kwargs.
The weird implementation of this function with the @cache_resource annotated
function defined internally is done to:
* Always @gather_metrics on the call even if the return value is a cached one.
* Allow the user to specify ttl and max_entries when calling st.connection.
"""
def __create_connection(
name: str, connection_class: type[ConnectionClass], **kwargs
) -> ConnectionClass:
return connection_class(connection_name=name, **kwargs)
if not issubclass(connection_class, BaseConnection):
raise StreamlitAPIException(
f"{connection_class} is not a subclass of BaseConnection!"
)
# We modify our helper function's `__qualname__` here to work around default
# `@st.cache_resource` behavior. Otherwise, `st.connection` being called with
# different `ttl` or `max_entries` values will reset the cache with each call.
ttl_str = str(ttl).replace( # Avoid adding extra `.` characters to `__qualname__`
".", "_"
)
__create_connection.__qualname__ = (
f"{__create_connection.__qualname__}_{ttl_str}_{max_entries}"
)
__create_connection = cache_resource(
max_entries=max_entries,
show_spinner="Running `st.connection(...)`.",
ttl=ttl,
)(__create_connection)
return __create_connection(name, connection_class, **kwargs)
|
Create a new connection to a data store or API, or return an existing one.
Config options, credentials, secrets, etc. for connections are taken from various
sources:
- Any connection-specific configuration files.
- An app's ``secrets.toml`` files.
- The kwargs passed to this function.
Parameters
----------
name : str
The connection name used for secrets lookup in ``[connections.<name>]``.
Type will be inferred from passing ``"sql"``, ``"snowflake"``, or ``"snowpark"``.
type : str, connection class, or None
The type of connection to create. It can be a keyword (``"sql"``, ``"snowflake"``,
or ``"snowpark"``), a path to an importable class, or an imported class reference.
All classes must extend ``st.connections.BaseConnection`` and implement the
``_connect()`` method. If the type kwarg is None, a ``type`` field must be set in
the connection's section in ``secrets.toml``.
max_entries : int or None
The maximum number of connections to keep in the cache, or None
for an unbounded cache. (When a new entry is added to a full cache,
the oldest cached entry will be removed.) The default is None.
ttl : float, timedelta, or None
The maximum number of seconds to keep results in the cache, or
None if cached results should not expire. The default is None.
**kwargs : any
Additional connection specific kwargs that are passed to the Connection's
``_connect()`` method. Learn more from the specific Connection's documentation.
Returns
-------
Connection object
An initialized Connection object of the specified type.
Examples
--------
The easiest way to create a first-party (SQL, Snowflake, or Snowpark) connection is
to use their default names and define corresponding sections in your ``secrets.toml``
file.
>>> import streamlit as st
>>> conn = st.connection("sql") # Config section defined in [connections.sql] in secrets.toml.
Creating a SQLConnection with a custom name requires you to explicitly specify the
type. If type is not passed as a kwarg, it must be set in the appropriate section of
``secrets.toml``.
>>> import streamlit as st
>>> conn1 = st.connection("my_sql_connection", type="sql") # Config section defined in [connections.my_sql_connection].
>>> conn2 = st.connection("my_other_sql_connection") # type must be set in [connections.my_other_sql_connection].
Passing the full module path to the connection class that you want to use can be
useful, especially when working with a custom connection:
>>> import streamlit as st
>>> conn = st.connection("my_sql_connection", type="streamlit.connections.SQLConnection")
Finally, you can pass the connection class to use directly to this function. Doing
so allows static type checking tools such as ``mypy`` to infer the exact return
type of ``st.connection``.
>>> import streamlit as st
>>> from streamlit.connections import SQLConnection
>>> conn = st.connection("my_sql_connection", type=SQLConnection)
|
def connection_factory(
name,
type=None,
max_entries=None,
ttl=None,
**kwargs,
):
"""Create a new connection to a data store or API, or return an existing one.
Config options, credentials, secrets, etc. for connections are taken from various
sources:
- Any connection-specific configuration files.
- An app's ``secrets.toml`` files.
- The kwargs passed to this function.
Parameters
----------
name : str
The connection name used for secrets lookup in ``[connections.<name>]``.
Type will be inferred from passing ``"sql"``, ``"snowflake"``, or ``"snowpark"``.
type : str, connection class, or None
The type of connection to create. It can be a keyword (``"sql"``, ``"snowflake"``,
or ``"snowpark"``), a path to an importable class, or an imported class reference.
All classes must extend ``st.connections.BaseConnection`` and implement the
``_connect()`` method. If the type kwarg is None, a ``type`` field must be set in
the connection's section in ``secrets.toml``.
max_entries : int or None
The maximum number of connections to keep in the cache, or None
for an unbounded cache. (When a new entry is added to a full cache,
the oldest cached entry will be removed.) The default is None.
ttl : float, timedelta, or None
The maximum number of seconds to keep results in the cache, or
None if cached results should not expire. The default is None.
**kwargs : any
Additional connection specific kwargs that are passed to the Connection's
``_connect()`` method. Learn more from the specific Connection's documentation.
Returns
-------
Connection object
An initialized Connection object of the specified type.
Examples
--------
The easiest way to create a first-party (SQL, Snowflake, or Snowpark) connection is
to use their default names and define corresponding sections in your ``secrets.toml``
file.
>>> import streamlit as st
>>> conn = st.connection("sql") # Config section defined in [connections.sql] in secrets.toml.
Creating a SQLConnection with a custom name requires you to explicitly specify the
type. If type is not passed as a kwarg, it must be set in the appropriate section of
``secrets.toml``.
>>> import streamlit as st
>>> conn1 = st.connection("my_sql_connection", type="sql") # Config section defined in [connections.my_sql_connection].
>>> conn2 = st.connection("my_other_sql_connection") # type must be set in [connections.my_other_sql_connection].
Passing the full module path to the connection class that you want to use can be
useful, especially when working with a custom connection:
>>> import streamlit as st
>>> conn = st.connection("my_sql_connection", type="streamlit.connections.SQLConnection")
Finally, you can pass the connection class to use directly to this function. Doing
so allows static type checking tools such as ``mypy`` to infer the exact return
type of ``st.connection``.
>>> import streamlit as st
>>> from streamlit.connections import SQLConnection
>>> conn = st.connection("my_sql_connection", type=SQLConnection)
"""
USE_ENV_PREFIX = "env:"
if name.startswith(USE_ENV_PREFIX):
# It'd be nice to use str.removeprefix() here, but we won't be able to do that
# until the minimium Python version we support is 3.9.
envvar_name = name[len(USE_ENV_PREFIX) :]
name = os.environ[envvar_name]
if type is None:
if name in FIRST_PARTY_CONNECTIONS:
# We allow users to simply write `st.connection("sql")` instead of
# `st.connection("sql", type="sql")`.
type = _get_first_party_connection(name)
else:
# The user didn't specify a type, so we try to pull it out from their
# secrets.toml file. NOTE: we're okay with any of the dict lookups below
# exploding with a KeyError since, if type isn't explicitly specified here,
# it must be the case that it's defined in secrets.toml and should raise an
# Exception otherwise.
secrets_singleton.load_if_toml_exists()
type = secrets_singleton["connections"][name]["type"]
# type is a nice kwarg name for the st.connection user but is annoying to work with
# since it conflicts with the builtin function name and thus gets syntax
# highlighted.
connection_class = type
if isinstance(connection_class, str):
# We assume that a connection_class specified via string is either the fully
# qualified name of a class (its module and exported classname) or the string
# literal shorthand for one of our first party connections. In the former case,
# connection_class will always contain a "." in its name.
if "." in connection_class:
parts = connection_class.split(".")
classname = parts.pop()
import importlib
connection_module = importlib.import_module(".".join(parts))
connection_class = getattr(connection_module, classname)
else:
connection_class = _get_first_party_connection(connection_class)
# At this point, connection_class should be of type Type[ConnectionClass].
try:
conn = _create_connection(
name, connection_class, max_entries=max_entries, ttl=ttl, **kwargs
)
if isinstance(conn, SnowparkConnection):
conn = deprecate_obj_name(
conn,
'connection("snowpark")',
'connection("snowflake")',
"2024-04-01",
)
return conn
except ModuleNotFoundError as e:
err_string = str(e)
missing_module = re.search(MODULE_EXTRACTION_REGEX, err_string)
extra_info = "You may be missing a dependency required to use this connection."
if missing_module:
pypi_package = MODULES_TO_PYPI_PACKAGES.get(missing_module.group(1))
if pypi_package:
extra_info = f"You need to install the '{pypi_package}' package to use this connection."
raise ModuleNotFoundError(f"{str(e)}. {extra_info}")
|
Send the user's email to segment.io, if submitted
|
def _send_email(email: str) -> None:
"""Send the user's email to segment.io, if submitted"""
import requests
if email is None or "@" not in email:
return
headers = {
"authority": "api.segment.io",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "text/plain",
"origin": "localhost:8501",
"referer": "localhost:8501/",
}
dt = datetime.utcnow().isoformat() + "+00:00"
data = {
"anonymous_id": None,
"context": {
"library": {"name": "analytics-python", "version": "2.2.2"},
},
"messageId": str(uuid4()),
"timestamp": dt,
"event": "submittedEmail",
"traits": {
"authoremail": email,
"source": "provided_email",
},
"type": "track",
"userId": email,
"writeKey": "iCkMy7ymtJ9qYzQRXkQpnAJEq7D4NyMU",
}
response = requests.post(
"https://api.segment.io/v1/t",
headers=headers,
data=json.dumps(data).encode(),
)
response.raise_for_status()
|
Verify the user's email address.
The email can either be an empty string (if the user chooses not to enter
it), or a string with a single '@' somewhere in it.
Parameters
----------
email : str
Returns
-------
_Activation
An _Activation object. Its 'is_valid' property will be True only if
the email was validated.
|
def _verify_email(email: str) -> _Activation:
"""Verify the user's email address.
The email can either be an empty string (if the user chooses not to enter
it), or a string with a single '@' somewhere in it.
Parameters
----------
email : str
Returns
-------
_Activation
An _Activation object. Its 'is_valid' property will be True only if
the email was validated.
"""
email = email.strip()
# We deliberately use simple email validation here
# since we do not use email address anywhere to send emails.
if len(email) > 0 and email.count("@") != 1:
_LOGGER.error("That doesn't look like an email :(")
return _Activation(None, False)
return _Activation(email, True)
|
Exit program with error.
|
def _exit(message: str) -> NoReturn:
"""Exit program with error."""
_LOGGER.error(message)
sys.exit(-1)
|
Check credentials and potentially activate.
Note
----
If there is no credential file and we are in headless mode, we should not
check, since credential would be automatically set to an empty string.
|
def check_credentials() -> None:
"""Check credentials and potentially activate.
Note
----
If there is no credential file and we are in headless mode, we should not
check, since credential would be automatically set to an empty string.
"""
from streamlit import config
if not _check_credential_file_exists() and config.get_option("server.headless"):
if not config.is_manually_set("browser.gatherUsageStats"):
# If not manually defined, show short message about usage stats gathering.
cli_util.print_to_cli(_TELEMETRY_HEADLESS_TEXT)
return
Credentials.get_current()._check_activated()
|
Computes and assigns the unique hash for a ForwardMsg.
If the ForwardMsg already has a hash, this is a no-op.
Parameters
----------
msg : ForwardMsg
Returns
-------
string
The message's hash, returned here for convenience. (The hash
will also be assigned to the ForwardMsg; callers do not need
to do this.)
|
def populate_hash_if_needed(msg: ForwardMsg) -> str:
"""Computes and assigns the unique hash for a ForwardMsg.
If the ForwardMsg already has a hash, this is a no-op.
Parameters
----------
msg : ForwardMsg
Returns
-------
string
The message's hash, returned here for convenience. (The hash
will also be assigned to the ForwardMsg; callers do not need
to do this.)
"""
if msg.hash == "":
# Move the message's metadata aside. It's not part of the
# hash calculation.
metadata = msg.metadata
msg.ClearField("metadata")
# MD5 is good enough for what we need, which is uniqueness.
hasher = hashlib.md5(**HASHLIB_KWARGS)
hasher.update(msg.SerializeToString())
msg.hash = hasher.hexdigest()
# Restore metadata.
msg.metadata.CopyFrom(metadata)
return msg.hash
|
Create a ForwardMsg that refers to the given message via its hash.
The reference message will also get a copy of the source message's
metadata.
Parameters
----------
msg : ForwardMsg
The ForwardMsg to create the reference to.
Returns
-------
ForwardMsg
A new ForwardMsg that "points" to the original message via the
ref_hash field.
|
def create_reference_msg(msg: ForwardMsg) -> ForwardMsg:
"""Create a ForwardMsg that refers to the given message via its hash.
The reference message will also get a copy of the source message's
metadata.
Parameters
----------
msg : ForwardMsg
The ForwardMsg to create the reference to.
Returns
-------
ForwardMsg
A new ForwardMsg that "points" to the original message via the
ref_hash field.
"""
ref_msg = ForwardMsg()
ref_msg.ref_hash = populate_hash_if_needed(msg)
ref_msg.metadata.CopyFrom(msg.metadata)
return ref_msg
|
True if the ForwardMsg is potentially composable with other ForwardMsgs.
|
def _is_composable_message(msg: ForwardMsg) -> bool:
"""True if the ForwardMsg is potentially composable with other ForwardMsgs."""
if not msg.HasField("delta"):
# Non-delta messages are never composable.
return False
# We never compose add_rows messages in Python, because the add_rows
# operation can raise errors, and we don't have a good way of handling
# those errors in the message queue.
delta_type = msg.delta.WhichOneof("type")
return delta_type != "add_rows" and delta_type != "arrow_add_rows"
|
Combines new_delta onto old_delta if possible.
If the combination takes place, the function returns a new Delta that
should replace old_delta in the queue.
If the new_delta is incompatible with old_delta, the function returns None.
In this case, the new_delta should just be appended to the queue as normal.
|
def _maybe_compose_deltas(old_delta: Delta, new_delta: Delta) -> Delta | None:
"""Combines new_delta onto old_delta if possible.
If the combination takes place, the function returns a new Delta that
should replace old_delta in the queue.
If the new_delta is incompatible with old_delta, the function returns None.
In this case, the new_delta should just be appended to the queue as normal.
"""
old_delta_type = old_delta.WhichOneof("type")
if old_delta_type == "add_block":
# We never replace add_block deltas, because blocks can have
# other dependent deltas later in the queue. For example:
#
# placeholder = st.empty()
# placeholder.columns(1)
# placeholder.empty()
#
# The call to "placeholder.columns(1)" creates two blocks, a parent
# container with delta_path (0, 0), and a column child with
# delta_path (0, 0, 0). If the final "placeholder.empty()" Delta
# is composed with the parent container Delta, the frontend will
# throw an error when it tries to add that column child to what is
# now just an element, and not a block.
return None
new_delta_type = new_delta.WhichOneof("type")
if new_delta_type == "new_element":
return new_delta
if new_delta_type == "add_block":
return new_delta
return None
|
Decorator to turn a function into a fragment which can rerun independently of the full script.
When a user interacts with an input widget created by a fragment, Streamlit
only reruns the fragment instead of the full script. If ``run_every`` is set,
Streamlit will also rerun the fragment at the specified interval while the
session is active, even if the user is not interacting with your app.
To trigger a full script rerun from inside a fragment, call ``st.rerun()``
directly. Any values from the fragment that need to be accessed from
the wider app should generally be stored in Session State.
When Streamlit element commands are called directly in a fragment, the
elements are cleared and redrawn on each fragment rerun, just like all
elements are redrawn on each full-script rerun. The rest of the app is
persisted during a fragment rerun. When a fragment renders elements into
externally created containers, the elements will not be cleared with each
fragment rerun. In this case, elements will accumulate in those containers
with each fragment rerun, until the next full-script rerun.
Calling `st.sidebar` in a fragment is not supported. To write elements to
the sidebar with a fragment, call your fragment function inside a
`with st.sidebar` context manager.
Fragment code can interact with Session State, imported modules, and
other Streamlit elements created outside the fragment. Note that these
interactions are additive across multiple fragment reruns. You are
responsible for handling any side effects of that behavior.
Parameters
----------
func: callable
The function to turn into a fragment.
run_every: int, float, timedelta, str, or None
The time interval between automatic fragment reruns. This can be one of
the following:
* ``None`` (default).
* An ``int`` or ``float`` specifying the interval in seconds.
* A string specifying the time in a format supported by `Pandas'
Timedelta constructor <https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html>`_,
e.g. ``"1d"``, ``"1.5 days"``, or ``"1h23s"``.
* A ``timedelta`` object from `Python's built-in datetime library
<https://docs.python.org/3/library/datetime.html#timedelta-objects>`_,
e.g. ``timedelta(days=1)``.
If ``run_every`` is ``None``, the fragment will only rerun from
user-triggered events.
Examples
--------
The following example demonstrates basic usage of ``@st.experimental_fragment``. In
this app, clicking "Rerun full script" will increment both counters and
update all values displayed in the app. In contrast, clicking "Rerun fragment"
will only increment the counter within the fragment. In this case, the
``st.write`` command inside the fragment will update the app's frontend,
but the two ``st.write`` commands outside the fragment will not update the
frontend.
>>> import streamlit as st
>>>
>>> if "script_runs" not in st.session_state:
>>> st.session_state.script_runs = 0
>>> st.session_state.fragment_runs = 0
>>>
>>> @st.experimental_fragment
>>> def fragment():
>>> st.session_state.fragment_runs += 1
>>> st.button("Rerun fragment")
>>> st.write(f"Fragment says it ran {st.session_state.fragment_runs} times.")
>>>
>>> st.session_state.script_runs += 1
>>> fragment()
>>> st.button("Rerun full script")
>>> st.write(f"Full script says it ran {st.session_state.script_runs} times.")
>>> st.write(f"Full script sees that fragment ran {st.session_state.fragment_runs} times.")
.. output::
https://doc-fragment.streamlit.app/
height: 400px
You can also trigger a full-script rerun from inside a fragment by calling
``st.rerun``.
>>> import streamlit as st
>>>
>>> if "clicks" not in st.session_state:
>>> st.session_state.clicks = 0
>>>
>>> @st.experimental_fragment
>>> def count_to_five():
>>> if st.button("Plus one!"):
>>> st.session_state.clicks += 1
>>> if st.session_state.clicks % 5 == 0:
>>> st.rerun()
>>> return
>>>
>>> count_to_five()
>>> st.header(f"Multiples of five clicks: {st.session_state.clicks // 5}")
>>>
>>> if st.button("Check click count"):
>>> st.toast(f"## Total clicks: {st.session_state.clicks}")
.. output::
https://doc-fragment-rerun.streamlit.app/
height: 400px
|
def fragment(
func: F | None = None,
*,
run_every: int | float | timedelta | str | None = None,
) -> Callable[[F], F] | F:
"""Decorator to turn a function into a fragment which can rerun independently\
of the full script.
When a user interacts with an input widget created by a fragment, Streamlit
only reruns the fragment instead of the full script. If ``run_every`` is set,
Streamlit will also rerun the fragment at the specified interval while the
session is active, even if the user is not interacting with your app.
To trigger a full script rerun from inside a fragment, call ``st.rerun()``
directly. Any values from the fragment that need to be accessed from
the wider app should generally be stored in Session State.
When Streamlit element commands are called directly in a fragment, the
elements are cleared and redrawn on each fragment rerun, just like all
elements are redrawn on each full-script rerun. The rest of the app is
persisted during a fragment rerun. When a fragment renders elements into
externally created containers, the elements will not be cleared with each
fragment rerun. In this case, elements will accumulate in those containers
with each fragment rerun, until the next full-script rerun.
Calling `st.sidebar` in a fragment is not supported. To write elements to
the sidebar with a fragment, call your fragment function inside a
`with st.sidebar` context manager.
Fragment code can interact with Session State, imported modules, and
other Streamlit elements created outside the fragment. Note that these
interactions are additive across multiple fragment reruns. You are
responsible for handling any side effects of that behavior.
Parameters
----------
func: callable
The function to turn into a fragment.
run_every: int, float, timedelta, str, or None
The time interval between automatic fragment reruns. This can be one of
the following:
* ``None`` (default).
* An ``int`` or ``float`` specifying the interval in seconds.
* A string specifying the time in a format supported by `Pandas'
Timedelta constructor <https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html>`_,
e.g. ``"1d"``, ``"1.5 days"``, or ``"1h23s"``.
* A ``timedelta`` object from `Python's built-in datetime library
<https://docs.python.org/3/library/datetime.html#timedelta-objects>`_,
e.g. ``timedelta(days=1)``.
If ``run_every`` is ``None``, the fragment will only rerun from
user-triggered events.
Examples
--------
The following example demonstrates basic usage of ``@st.experimental_fragment``. In
this app, clicking "Rerun full script" will increment both counters and
update all values displayed in the app. In contrast, clicking "Rerun fragment"
will only increment the counter within the fragment. In this case, the
``st.write`` command inside the fragment will update the app's frontend,
but the two ``st.write`` commands outside the fragment will not update the
frontend.
>>> import streamlit as st
>>>
>>> if "script_runs" not in st.session_state:
>>> st.session_state.script_runs = 0
>>> st.session_state.fragment_runs = 0
>>>
>>> @st.experimental_fragment
>>> def fragment():
>>> st.session_state.fragment_runs += 1
>>> st.button("Rerun fragment")
>>> st.write(f"Fragment says it ran {st.session_state.fragment_runs} times.")
>>>
>>> st.session_state.script_runs += 1
>>> fragment()
>>> st.button("Rerun full script")
>>> st.write(f"Full script says it ran {st.session_state.script_runs} times.")
>>> st.write(f"Full script sees that fragment ran {st.session_state.fragment_runs} times.")
.. output::
https://doc-fragment.streamlit.app/
height: 400px
You can also trigger a full-script rerun from inside a fragment by calling
``st.rerun``.
>>> import streamlit as st
>>>
>>> if "clicks" not in st.session_state:
>>> st.session_state.clicks = 0
>>>
>>> @st.experimental_fragment
>>> def count_to_five():
>>> if st.button("Plus one!"):
>>> st.session_state.clicks += 1
>>> if st.session_state.clicks % 5 == 0:
>>> st.rerun()
>>> return
>>>
>>> count_to_five()
>>> st.header(f"Multiples of five clicks: {st.session_state.clicks // 5}")
>>>
>>> if st.button("Check click count"):
>>> st.toast(f"## Total clicks: {st.session_state.clicks}")
.. output::
https://doc-fragment-rerun.streamlit.app/
height: 400px
"""
if func is None:
# Support passing the params via function decorator
def wrapper(f: F) -> F:
return fragment(
func=f,
run_every=run_every,
)
return wrapper
else:
non_optional_func = func
@wraps(non_optional_func)
def wrap(*args, **kwargs):
from streamlit.delta_generator import dg_stack
ctx = get_script_run_ctx()
if ctx is None:
return
cursors_snapshot = deepcopy(ctx.cursors)
dg_stack_snapshot = deepcopy(dg_stack.get())
active_dg = dg_stack_snapshot[-1]
h = hashlib.new("md5")
h.update(
f"{non_optional_func.__module__}.{non_optional_func.__qualname__}{active_dg._get_delta_path_str()}".encode(
"utf-8"
)
)
fragment_id = h.hexdigest()
def wrapped_fragment():
import streamlit as st
# NOTE: We need to call get_script_run_ctx here again and can't just use the
# value of ctx from above captured by the closure because subsequent
# fragment runs will generally run in a new script run, thus we'll have a
# new ctx.
ctx = get_script_run_ctx(suppress_warning=True)
assert ctx is not None
if ctx.fragment_ids_this_run:
# This script run is a run of one or more fragments. We restore the
# state of ctx.cursors and dg_stack to the snapshots we took when this
# fragment was declared.
ctx.cursors = deepcopy(cursors_snapshot)
dg_stack.set(deepcopy(dg_stack_snapshot))
else:
# Otherwise, we must be in a full script run. We need to temporarily set
# ctx.current_fragment_id so that elements corresponding to this
# fragment get tagged with the appropriate ID. ctx.current_fragment_id
# gets reset after the fragment function finishes running.
ctx.current_fragment_id = fragment_id
try:
with st.container():
result = non_optional_func(*args, **kwargs)
finally:
ctx.current_fragment_id = None
return result
ctx.fragment_storage.set(fragment_id, wrapped_fragment)
if run_every:
msg = ForwardMsg()
msg.auto_rerun.interval = time_to_seconds(run_every)
msg.auto_rerun.fragment_id = fragment_id
ctx.enqueue(msg)
return wrapped_fragment()
with contextlib.suppress(AttributeError):
# Make this a well-behaved decorator by preserving important function
# attributes.
wrap.__dict__.update(non_optional_func.__dict__)
wrap.__signature__ = inspect.signature(non_optional_func) # type: ignore
return wrap
|
Get the active AppSession's session_id.
|
def _get_session_id() -> str:
"""Get the active AppSession's session_id."""
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
if ctx is None:
# This is only None when running "python myscript.py" rather than
# "streamlit run myscript.py". In which case the session ID doesn't
# matter and can just be a constant, as there's only ever "session".
return "dontcare"
else:
return ctx.session_id
|
Hash data, mimetype, and an optional filename to generate a stable file ID.
Parameters
----------
data
Content of in-memory file in bytes. Other types will throw TypeError.
mimetype
Any string. Will be converted to bytes and used to compute a hash.
filename
Any string. Will be converted to bytes and used to compute a hash.
|
def _calculate_file_id(data: bytes, mimetype: str, filename: str | None = None) -> str:
"""Hash data, mimetype, and an optional filename to generate a stable file ID.
Parameters
----------
data
Content of in-memory file in bytes. Other types will throw TypeError.
mimetype
Any string. Will be converted to bytes and used to compute a hash.
filename
Any string. Will be converted to bytes and used to compute a hash.
"""
filehash = hashlib.new("sha224", **HASHLIB_KWARGS)
filehash.update(data)
filehash.update(bytes(mimetype.encode()))
if filename is not None:
filehash.update(bytes(filename.encode()))
return filehash.hexdigest()
|
Get the machine ID
This is a unique identifier for a user for tracking metrics in Segment,
that is broken in different ways in some Linux distros and Docker images.
- at times just a hash of '', which means many machines map to the same ID
- at times a hash of the same string, when running in a Docker container
|
def _get_machine_id_v3() -> str:
"""Get the machine ID
This is a unique identifier for a user for tracking metrics in Segment,
that is broken in different ways in some Linux distros and Docker images.
- at times just a hash of '', which means many machines map to the same ID
- at times a hash of the same string, when running in a Docker container
"""
machine_id = str(uuid.getnode())
if os.path.isfile(_ETC_MACHINE_ID_PATH):
with open(_ETC_MACHINE_ID_PATH) as f:
machine_id = f.read()
elif os.path.isfile(_DBUS_MACHINE_ID_PATH):
with open(_DBUS_MACHINE_ID_PATH) as f:
machine_id = f.read()
return machine_id
|
Get a simplified name for the type of the given object.
|
def _get_type_name(obj: object) -> str:
"""Get a simplified name for the type of the given object."""
with contextlib.suppress(Exception):
obj_type = obj if inspect.isclass(obj) else type(obj)
type_name = "unknown"
if hasattr(obj_type, "__qualname__"):
type_name = obj_type.__qualname__
elif hasattr(obj_type, "__name__"):
type_name = obj_type.__name__
if obj_type.__module__ != "builtins":
# Add the full module path
type_name = f"{obj_type.__module__}.{type_name}"
if type_name in _OBJECT_NAME_MAPPING:
type_name = _OBJECT_NAME_MAPPING[type_name]
return type_name
return "failed"
|
Get the top level module for the given function.
|
def _get_top_level_module(func: Callable[..., Any]) -> str:
"""Get the top level module for the given function."""
module = inspect.getmodule(func)
if module is None or not module.__name__:
return "unknown"
return module.__name__.split(".")[0]
|
Get metadata information related to the value of the given object.
|
def _get_arg_metadata(arg: object) -> str | None:
"""Get metadata information related to the value of the given object."""
with contextlib.suppress(Exception):
if isinstance(arg, (bool)):
return f"val:{arg}"
if isinstance(arg, Sized):
return f"len:{len(arg)}"
return None
|
Get telemetry information for the given callable and its arguments.
|
def _get_command_telemetry(
_command_func: Callable[..., Any], _command_name: str, *args, **kwargs
) -> Command:
"""Get telemetry information for the given callable and its arguments."""
arg_keywords = inspect.getfullargspec(_command_func).args
self_arg: Any | None = None
arguments: list[Argument] = []
is_method = inspect.ismethod(_command_func)
name = _command_name
for i, arg in enumerate(args):
pos = i
if is_method:
# If func is a method, ignore the first argument (self)
i = i + 1
keyword = arg_keywords[i] if len(arg_keywords) > i else f"{i}"
if keyword == "self":
self_arg = arg
continue
argument = Argument(k=keyword, t=_get_type_name(arg), p=pos)
arg_metadata = _get_arg_metadata(arg)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
for kwarg, kwarg_value in kwargs.items():
argument = Argument(k=kwarg, t=_get_type_name(kwarg_value))
arg_metadata = _get_arg_metadata(kwarg_value)
if arg_metadata:
argument.m = arg_metadata
arguments.append(argument)
top_level_module = _get_top_level_module(_command_func)
if top_level_module != "streamlit":
# If the gather_metrics decorator is used outside of streamlit library
# we enforce a prefix to be added to the tracked command:
name = f"external:{top_level_module}:{name}"
if (
name == "create_instance"
and self_arg
and hasattr(self_arg, "name")
and self_arg.name
):
name = f"component:{self_arg.name}"
return Command(name=name, args=arguments)
|
Convert seconds into microseconds.
|
def to_microseconds(seconds: float) -> int:
"""Convert seconds into microseconds."""
return int(seconds * 1_000_000)
|
Function decorator to add telemetry tracking to commands.
Parameters
----------
func : callable
The function to track for telemetry.
name : str or None
Overwrite the function name with a custom name that is used for telemetry tracking.
Example
-------
>>> @st.gather_metrics
... def my_command(url):
... return url
>>> @st.gather_metrics(name="custom_name")
... def my_command(url):
... return url
|
def gather_metrics(name: str, func: F | None = None) -> Callable[[F], F] | F:
"""Function decorator to add telemetry tracking to commands.
Parameters
----------
func : callable
The function to track for telemetry.
name : str or None
Overwrite the function name with a custom name that is used for telemetry tracking.
Example
-------
>>> @st.gather_metrics
... def my_command(url):
... return url
>>> @st.gather_metrics(name="custom_name")
... def my_command(url):
... return url
"""
if not name:
_LOGGER.warning("gather_metrics: name is empty")
name = "undefined"
if func is None:
# Support passing the params via function decorator
def wrapper(f: F) -> F:
return gather_metrics(
name=name,
func=f,
)
return wrapper
else:
# To make mypy type narrow F | None -> F
non_optional_func = func
@wraps(non_optional_func)
def wrapped_func(*args, **kwargs):
from timeit import default_timer as timer
exec_start = timer()
# Local imports to prevent circular dependencies
from streamlit.runtime.scriptrunner import get_script_run_ctx
from streamlit.runtime.scriptrunner.script_runner import RerunException
ctx = get_script_run_ctx(suppress_warning=True)
tracking_activated = (
ctx is not None
and ctx.gather_usage_stats
and not ctx.command_tracking_deactivated
and len(ctx.tracked_commands)
< _MAX_TRACKED_COMMANDS # Prevent too much memory usage
)
command_telemetry: Command | None = None
if ctx and tracking_activated:
try:
command_telemetry = _get_command_telemetry(
non_optional_func, name, *args, **kwargs
)
if (
command_telemetry.name not in ctx.tracked_commands_counter
or ctx.tracked_commands_counter[command_telemetry.name]
< _MAX_TRACKED_PER_COMMAND
):
ctx.tracked_commands.append(command_telemetry)
ctx.tracked_commands_counter.update([command_telemetry.name])
# Deactivate tracking to prevent calls inside already tracked commands
ctx.command_tracking_deactivated = True
except Exception as ex:
# Always capture all exceptions since we want to make sure that
# the telemetry never causes any issues.
_LOGGER.debug("Failed to collect command telemetry", exc_info=ex)
try:
result = non_optional_func(*args, **kwargs)
except RerunException as ex:
# Duplicated from below, because static analysis tools get confused
# by deferring the rethrow.
if tracking_activated and command_telemetry:
command_telemetry.time = to_microseconds(timer() - exec_start)
raise ex
finally:
# Activate tracking again if command executes without any exceptions
if ctx:
ctx.command_tracking_deactivated = False
if tracking_activated and command_telemetry:
# Set the execution time to the measured value
command_telemetry.time = to_microseconds(timer() - exec_start)
return result
with contextlib.suppress(AttributeError):
# Make this a well-behaved decorator by preserving important function
# attributes.
wrapped_func.__dict__.update(non_optional_func.__dict__)
wrapped_func.__signature__ = inspect.signature(non_optional_func) # type: ignore
return cast(F, wrapped_func)
|
Create and return the full PageProfile ForwardMsg.
|
def create_page_profile_message(
commands: list[Command],
exec_time: int,
prep_time: int,
uncaught_exception: str | None = None,
) -> ForwardMsg:
"""Create and return the full PageProfile ForwardMsg."""
# Local import to prevent circular dependencies
from streamlit.runtime.scriptrunner import get_script_run_ctx
msg = ForwardMsg()
page_profile = msg.page_profile
page_profile.commands.extend(commands)
page_profile.exec_time = exec_time
page_profile.prep_time = prep_time
page_profile.headless = config.get_option("server.headless")
# Collect all config options that have been manually set
config_options: set[str] = set()
if config._config_options:
for option_name in config._config_options.keys():
if not config.is_manually_set(option_name):
# We only care about manually defined options
continue
config_option = config._config_options[option_name]
if config_option.is_default:
option_name = f"{option_name}:default"
config_options.add(option_name)
page_profile.config.extend(config_options)
# Check the predefined set of modules for attribution
attributions: set[str] = {
attribution
for attribution in _ATTRIBUTIONS_TO_CHECK
if attribution in sys.modules
}
page_profile.os = str(sys.platform)
page_profile.timezone = str(time.tzname)
page_profile.attributions.extend(attributions)
if uncaught_exception:
page_profile.uncaught_exception = uncaught_exception
if ctx := get_script_run_ctx():
page_profile.is_fragment_run = bool(ctx.current_fragment_id)
return msg
|
True if the given message qualifies for caching.
|
def is_cacheable_msg(msg: ForwardMsg) -> bool:
"""True if the given message qualifies for caching."""
if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
# Some message types never get cached
return False
return msg.ByteSize() >= int(config.get_option("global.minCachedMessageSize"))
|
Serialize a ForwardMsg to send to a client.
If the message is too large, it will be converted to an exception message
instead.
|
def serialize_forward_msg(msg: ForwardMsg) -> bytes:
"""Serialize a ForwardMsg to send to a client.
If the message is too large, it will be converted to an exception message
instead.
"""
populate_hash_if_needed(msg)
msg_str = msg.SerializeToString()
if len(msg_str) > get_max_message_size_bytes():
import streamlit.elements.exception as exception
# Overwrite the offending ForwardMsg.delta with an error to display.
# This assumes that the size limit wasn't exceeded due to metadata.
exception.marshall(msg.delta.new_element.exception, MessageSizeError(msg_str))
msg_str = msg.SerializeToString()
return msg_str
|
Returns the max websocket message size in bytes.
This will lazyload the value from the config and store it in the global symbol table.
|
def get_max_message_size_bytes() -> int:
"""Returns the max websocket message size in bytes.
This will lazyload the value from the config and store it in the global symbol table.
"""
global _max_message_size_bytes
if _max_message_size_bytes is None:
_max_message_size_bytes = config.get_option("server.maxMessageSize") * int(1e6)
return _max_message_size_bytes
|
Group a list of CacheStats by category_name and cache_name and sum byte_length
|
def group_stats(stats: list[CacheStat]) -> list[CacheStat]:
"""Group a list of CacheStats by category_name and cache_name and sum byte_length"""
def key_function(individual_stat):
return individual_stat.category_name, individual_stat.cache_name
result: list[CacheStat] = []
sorted_stats = sorted(stats, key=key_function)
grouped_stats = itertools.groupby(sorted_stats, key=key_function)
for (category_name, cache_name), single_group_stats in grouped_stats:
result.append(
CacheStat(
category_name=category_name,
cache_name=cache_name,
byte_length=sum(map(lambda item: item.byte_length, single_group_stats)),
)
)
return result
|
Return the singleton Runtime instance. Raise an Error if the
Runtime hasn't been created yet.
|
def get_instance() -> Runtime:
"""Return the singleton Runtime instance. Raise an Error if the
Runtime hasn't been created yet.
"""
return Runtime.instance()
|
True if the singleton Runtime instance has been created.
When a Streamlit app is running in "raw mode" - that is, when the
app is run via `python app.py` instead of `streamlit run app.py` -
the Runtime will not exist, and various Streamlit functions need
to adapt.
|
def exists() -> bool:
"""True if the singleton Runtime instance has been created.
When a Streamlit app is running in "raw mode" - that is, when the
app is run via `python app.py` instead of `streamlit run app.py` -
the Runtime will not exist, and various Streamlit functions need
to adapt.
"""
return Runtime.exists()
|
Replay the st element function calls that happened when executing a
cache-decorated function.
When a cache function is executed, we record the element and block messages
produced, and use those to reproduce the DeltaGenerator calls, so the elements
will appear in the web app even when execution of the function is skipped
because the result was cached.
To make this work, for each st function call we record an identifier for the
DG it was effectively called on (see Note [DeltaGenerator method invocation]).
We also record the identifier for each DG returned by an st function call, if
it returns one. Then, for each recorded message, we get the current DG instance
corresponding to the DG the message was originally called on, and enqueue the
message using that, recording any new DGs produced in case a later st function
call is on one of them.
|
def replay_cached_messages(
result: CachedResult, cache_type: CacheType, cached_func: types.FunctionType
) -> None:
"""Replay the st element function calls that happened when executing a
cache-decorated function.
When a cache function is executed, we record the element and block messages
produced, and use those to reproduce the DeltaGenerator calls, so the elements
will appear in the web app even when execution of the function is skipped
because the result was cached.
To make this work, for each st function call we record an identifier for the
DG it was effectively called on (see Note [DeltaGenerator method invocation]).
We also record the identifier for each DG returned by an st function call, if
it returns one. Then, for each recorded message, we get the current DG instance
corresponding to the DG the message was originally called on, and enqueue the
message using that, recording any new DGs produced in case a later st function
call is on one of them.
"""
from streamlit.delta_generator import DeltaGenerator
from streamlit.runtime.state.widgets import register_widget_from_metadata
# Maps originally recorded dg ids to this script run's version of that dg
returned_dgs: dict[str, DeltaGenerator] = {}
returned_dgs[result.main_id] = st._main
returned_dgs[result.sidebar_id] = st.sidebar
ctx = get_script_run_ctx()
try:
for msg in result.messages:
if isinstance(msg, ElementMsgData):
if msg.widget_metadata is not None:
register_widget_from_metadata(
msg.widget_metadata.metadata,
ctx,
None,
msg.delta_type,
)
if msg.media_data is not None:
for data in msg.media_data:
runtime.get_instance().media_file_mgr.add(
data.media, data.mimetype, data.media_id
)
dg = returned_dgs[msg.id_of_dg_called_on]
maybe_dg = dg._enqueue(msg.delta_type, msg.message)
if isinstance(maybe_dg, DeltaGenerator):
returned_dgs[msg.returned_dgs_id] = maybe_dg
elif isinstance(msg, BlockMsgData):
dg = returned_dgs[msg.id_of_dg_called_on]
new_dg = dg._block(msg.message)
returned_dgs[msg.returned_dgs_id] = new_dg
except KeyError:
raise CacheReplayClosureError(cache_type, cached_func)
|
Generate a key for the given list of widgets used in a cache-decorated function.
Keys are generated by hashing the IDs and values of the widgets in the given list.
|
def _make_widget_key(widgets: list[tuple[str, Any]], cache_type: CacheType) -> str:
"""Generate a key for the given list of widgets used in a cache-decorated function.
Keys are generated by hashing the IDs and values of the widgets in the given list.
"""
func_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
for widget_id_val in widgets:
update_hash(widget_id_val, func_hasher, cache_type)
return func_hasher.hexdigest()
|
Return the StatsProvider for all @st.cache_data functions.
|
def get_data_cache_stats_provider() -> CacheStatsProvider:
"""Return the StatsProvider for all @st.cache_data functions."""
return _data_caches
|
Get markdown representation of the function name.
|
def get_cached_func_name_md(func: Any) -> str:
"""Get markdown representation of the function name."""
if hasattr(func, "__name__"):
return "`%s()`" % func.__name__
elif hasattr(type(func), "__name__"):
return f"`{type(func).__name__}`"
return f"`{type(func)}`"
|
True if the two validate functions are equal for the purposes of
determining whether a given function cache needs to be recreated.
|
def _equal_validate_funcs(a: ValidateFunc | None, b: ValidateFunc | None) -> bool:
"""True if the two validate functions are equal for the purposes of
determining whether a given function cache needs to be recreated.
"""
# To "properly" test for function equality here, we'd need to compare function bytecode.
# For performance reasons, We've decided not to do that for now.
return (a is None and b is None) or (a is not None and b is not None)
|
Return the StatsProvider for all @st.cache_resource functions.
|
def get_resource_cache_stats_provider() -> CacheStatsProvider:
"""Return the StatsProvider for all @st.cache_resource functions."""
return _resource_caches
|
Return the name of the public decorator API for the given CacheType.
|
def get_decorator_api_name(cache_type: CacheType) -> str:
"""Return the name of the public decorator API for the given CacheType."""
if cache_type is CacheType.DATA:
return "cache_data"
if cache_type is CacheType.RESOURCE:
return "cache_resource"
raise RuntimeError(f"Unrecognized CacheType '{cache_type}'")
|
Create a callable wrapper around a CachedFunctionInfo.
Calling the wrapper will return the cached value if it's already been
computed, and will call the underlying function to compute and cache the
value otherwise.
The wrapper also has a `clear` function that can be called to clear
some or all of the wrapper's cached values.
|
def make_cached_func_wrapper(info: CachedFuncInfo) -> Callable[..., Any]:
"""Create a callable wrapper around a CachedFunctionInfo.
Calling the wrapper will return the cached value if it's already been
computed, and will call the underlying function to compute and cache the
value otherwise.
The wrapper also has a `clear` function that can be called to clear
some or all of the wrapper's cached values.
"""
cached_func = CachedFunc(info)
# We'd like to simply return `cached_func`, which is already a Callable.
# But using `functools.update_wrapper` on the CachedFunc instance
# itself results in errors when our caching decorators are used to decorate
# member functions. (See https://github.com/streamlit/streamlit/issues/6109)
@functools.wraps(info.func)
def wrapper(*args, **kwargs):
return cached_func(*args, **kwargs)
# Give our wrapper its `clear` function.
# (This results in a spurious mypy error that we suppress.)
wrapper.clear = cached_func.clear # type: ignore
return wrapper
|
Create the key for a value within a cache.
This key is generated from the function's arguments. All arguments
will be hashed, except for those named with a leading "_".
Raises
------
StreamlitAPIException
Raised (with a nicely-formatted explanation message) if we encounter
an un-hashable arg.
|
def _make_value_key(
cache_type: CacheType,
func: types.FunctionType,
func_args: tuple[Any, ...],
func_kwargs: dict[str, Any],
hash_funcs: HashFuncsDict | None,
) -> str:
"""Create the key for a value within a cache.
This key is generated from the function's arguments. All arguments
will be hashed, except for those named with a leading "_".
Raises
------
StreamlitAPIException
Raised (with a nicely-formatted explanation message) if we encounter
an un-hashable arg.
"""
# Create a (name, value) list of all *args and **kwargs passed to the
# function.
arg_pairs: list[tuple[str | None, Any]] = []
for arg_idx in range(len(func_args)):
arg_name = _get_positional_arg_name(func, arg_idx)
arg_pairs.append((arg_name, func_args[arg_idx]))
for kw_name, kw_val in func_kwargs.items():
# **kwargs ordering is preserved, per PEP 468
# https://www.python.org/dev/peps/pep-0468/, so this iteration is
# deterministic.
arg_pairs.append((kw_name, kw_val))
# Create the hash from each arg value, except for those args whose name
# starts with "_". (Underscore-prefixed args are deliberately excluded from
# hashing.)
args_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
for arg_name, arg_value in arg_pairs:
if arg_name is not None and arg_name.startswith("_"):
_LOGGER.debug("Not hashing %s because it starts with _", arg_name)
continue
try:
update_hash(
arg_name,
hasher=args_hasher,
cache_type=cache_type,
hash_source=func,
)
# we call update_hash twice here, first time for `arg_name`
# without `hash_funcs`, and second time for `arg_value` with hash_funcs
# to evaluate user defined `hash_funcs` only for computing `arg_value` hash.
update_hash(
arg_value,
hasher=args_hasher,
cache_type=cache_type,
hash_funcs=hash_funcs,
hash_source=func,
)
except UnhashableTypeError as exc:
raise UnhashableParamError(cache_type, func, arg_name, arg_value, exc)
value_key = args_hasher.hexdigest()
_LOGGER.debug("Cache key: %s", value_key)
return value_key
|
Create the unique key for a function's cache.
A function's key is stable across reruns of the app, and changes when
the function's source code changes.
|
def _make_function_key(cache_type: CacheType, func: types.FunctionType) -> str:
"""Create the unique key for a function's cache.
A function's key is stable across reruns of the app, and changes when
the function's source code changes.
"""
func_hasher = hashlib.new("md5", **HASHLIB_KWARGS)
# Include the function's __module__ and __qualname__ strings in the hash.
# This means that two identical functions in different modules
# will not share a hash; it also means that two identical *nested*
# functions in the same module will not share a hash.
update_hash(
(func.__module__, func.__qualname__),
hasher=func_hasher,
cache_type=cache_type,
hash_source=func,
)
# Include the function's source code in its hash. If the source code can't
# be retrieved, fall back to the function's bytecode instead.
source_code: str | bytes
try:
source_code = inspect.getsource(func)
except OSError as e:
_LOGGER.debug(
"Failed to retrieve function's source code when building its key; falling back to bytecode. err={0}",
e,
)
source_code = func.__code__.co_code
update_hash(
source_code, hasher=func_hasher, cache_type=cache_type, hash_source=func
)
cache_key = func_hasher.hexdigest()
return cache_key
|
Return the name of a function's positional argument.
If arg_index is out of range, or refers to a parameter that is not a
named positional argument (e.g. an *args, **kwargs, or keyword-only param),
return None instead.
|
def _get_positional_arg_name(func: types.FunctionType, arg_index: int) -> str | None:
"""Return the name of a function's positional argument.
If arg_index is out of range, or refers to a parameter that is not a
named positional argument (e.g. an *args, **kwargs, or keyword-only param),
return None instead.
"""
if arg_index < 0:
return None
params: list[inspect.Parameter] = list(inspect.signature(func).parameters.values())
if arg_index >= len(params):
return None
if params[arg_index].kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
):
return params[arg_index].name
return None
|
Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
|
def update_hash(
val: Any,
hasher,
cache_type: CacheType,
hash_source: Callable[..., Any] | None = None,
hash_funcs: HashFuncsDict | None = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_source = hash_source
ch = _CacheFuncHasher(cache_type, hash_funcs)
ch.update(hasher, val)
|
Return key for memoization.
|
def _key(obj: Any | None) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or isinstance(obj, uuid.UUID)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if inspect.isbuiltin(obj) or inspect.isroutine(obj) or inspect.iscode(obj):
return id(obj)
return NoResult
|
Save the message for an element to a thread-local callstack, so it can
be used later to replay the element when a cache-decorated function's
execution is skipped.
|
def save_element_message(
delta_type: str,
element_proto: Message,
invoked_dg_id: str,
used_dg_id: str,
returned_dg_id: str,
) -> None:
"""Save the message for an element to a thread-local callstack, so it can
be used later to replay the element when a cache-decorated function's
execution is skipped.
"""
CACHE_DATA_MESSAGE_REPLAY_CTX.save_element_message(
delta_type, element_proto, invoked_dg_id, used_dg_id, returned_dg_id
)
CACHE_RESOURCE_MESSAGE_REPLAY_CTX.save_element_message(
delta_type, element_proto, invoked_dg_id, used_dg_id, returned_dg_id
)
|
Save the message for a block to a thread-local callstack, so it can
be used later to replay the block when a cache-decorated function's
execution is skipped.
|
def save_block_message(
block_proto: Block,
invoked_dg_id: str,
used_dg_id: str,
returned_dg_id: str,
) -> None:
"""Save the message for a block to a thread-local callstack, so it can
be used later to replay the block when a cache-decorated function's
execution is skipped.
"""
CACHE_DATA_MESSAGE_REPLAY_CTX.save_block_message(
block_proto, invoked_dg_id, used_dg_id, returned_dg_id
)
CACHE_RESOURCE_MESSAGE_REPLAY_CTX.save_block_message(
block_proto, invoked_dg_id, used_dg_id, returned_dg_id
)
|
Save a widget's metadata to a thread-local callstack, so the widget
can be registered again when that widget is replayed.
|
def save_widget_metadata(metadata: WidgetMetadata[Any]) -> None:
"""Save a widget's metadata to a thread-local callstack, so the widget
can be registered again when that widget is replayed.
"""
CACHE_DATA_MESSAGE_REPLAY_CTX.save_widget_metadata(metadata)
CACHE_RESOURCE_MESSAGE_REPLAY_CTX.save_widget_metadata(metadata)
|
Build a deprecation warning string for a cache function that has returned the given
value.
|
def _make_deprecation_warning(cached_value: Any) -> str:
"""Build a deprecation warning string for a cache function that has returned the given
value.
"""
typename = type(cached_value).__qualname__
cache_type_rec = NEW_CACHE_FUNC_RECOMMENDATIONS.get(typename)
if cache_type_rec is not None:
# We have a recommended cache func for the cached value:
return (
f"`st.cache` is deprecated. Please use one of Streamlit's new caching commands,\n"
f"`st.cache_data` or `st.cache_resource`. Based on this function's return value\n"
f"of type `{typename}`, we recommend using `st.{get_decorator_api_name(cache_type_rec)}`.\n\n"
f"More information [in our docs]({CACHE_DOCS_URL})."
)
# We do not have a recommended cache func for the cached value:
return (
f"`st.cache` is deprecated. Please use one of Streamlit's new caching commands,\n"
f"`st.cache_data` or `st.cache_resource`.\n\n"
f"More information [in our docs]({CACHE_DOCS_URL})."
)
|
Read a value from the cache.
Our goal is to read from memory if possible. If the data was mutated (hash
changed), we show a warning. If reading from memory fails, we either read
from disk or rerun the code.
|
def _read_from_cache(
mem_cache: MemCache,
key: str,
persist: bool,
allow_output_mutation: bool,
func_or_code: Callable[..., Any],
hash_funcs: HashFuncsDict | None = None,
) -> Any:
"""Read a value from the cache.
Our goal is to read from memory if possible. If the data was mutated (hash
changed), we show a warning. If reading from memory fails, we either read
from disk or rerun the code.
"""
try:
return _read_from_mem_cache(
mem_cache, key, allow_output_mutation, func_or_code, hash_funcs
)
except CachedObjectMutationError as e:
handle_uncaught_app_exception(CachedObjectMutationWarning(e))
return e.cached_value
except CacheKeyNotFoundError as e:
if persist:
value = _read_from_disk_cache(key)
_write_to_mem_cache(
mem_cache, key, value, allow_output_mutation, func_or_code, hash_funcs
)
return value
raise e
|
Function decorator to memoize function executions.
Parameters
----------
func : callable
The function to cache. Streamlit hashes the function and dependent code.
persist : bool
Whether to persist the cache on disk.
allow_output_mutation : bool
Streamlit shows a warning when return values are mutated, as that
can have unintended consequences. This is done by hashing the return value internally.
If you know what you're doing and would like to override this warning, set this to True.
show_spinner : bool
Enable the spinner. Default is True to show a spinner when there is
a cache miss.
suppress_st_warning : bool
Suppress warnings about calling Streamlit commands from within
the cached function.
hash_funcs : dict or None
Mapping of types or fully qualified names to hash functions. This is used to override
the behavior of the hasher inside Streamlit's caching mechanism: when the hasher
encounters an object, it will first check to see if its type matches a key in this
dict and, if so, will use the provided function to generate a hash for it. See below
for an example of how this can be used.
max_entries : int or None
The maximum number of entries to keep in the cache, or None
for an unbounded cache. (When a new entry is added to a full cache,
the oldest cached entry will be removed.) The default is None.
ttl : float or None
The maximum number of seconds to keep an entry in the cache, or
None if cache entries should not expire. The default is None.
Example
-------
>>> import streamlit as st
>>>
>>> @st.cache
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
...
>>> d1 = fetch_and_clean_data(DATA_URL_1)
>>> # Actually executes the function, since this is the first time it was
>>> # encountered.
>>>
>>> d2 = fetch_and_clean_data(DATA_URL_1)
>>> # Does not execute the function. Instead, returns its previously computed
>>> # value. This means that now the data in d1 is the same as in d2.
>>>
>>> d3 = fetch_and_clean_data(DATA_URL_2)
>>> # This is a different URL, so the function executes.
To set the ``persist`` parameter, use this command as follows:
>>> @st.cache(persist=True)
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
To disable hashing return values, set the ``allow_output_mutation`` parameter to ``True``:
>>> @st.cache(allow_output_mutation=True)
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
To override the default hashing behavior, pass a custom hash function.
You can do that by mapping a type (e.g. ``MongoClient``) to a hash function (``id``) like this:
>>> @st.cache(hash_funcs={MongoClient: id})
... def connect_to_database(url):
... return MongoClient(url)
Alternatively, you can map the type's fully-qualified name
(e.g. ``"pymongo.mongo_client.MongoClient"``) to the hash function instead:
>>> @st.cache(hash_funcs={"pymongo.mongo_client.MongoClient": id})
... def connect_to_database(url):
... return MongoClient(url)
|
def cache(
func: F | None = None,
persist: bool = False,
allow_output_mutation: bool = False,
show_spinner: bool = True,
suppress_st_warning: bool = False,
hash_funcs: HashFuncsDict | None = None,
max_entries: int | None = None,
ttl: float | None = None,
) -> Callable[[F], F] | F:
"""Function decorator to memoize function executions.
Parameters
----------
func : callable
The function to cache. Streamlit hashes the function and dependent code.
persist : bool
Whether to persist the cache on disk.
allow_output_mutation : bool
Streamlit shows a warning when return values are mutated, as that
can have unintended consequences. This is done by hashing the return value internally.
If you know what you're doing and would like to override this warning, set this to True.
show_spinner : bool
Enable the spinner. Default is True to show a spinner when there is
a cache miss.
suppress_st_warning : bool
Suppress warnings about calling Streamlit commands from within
the cached function.
hash_funcs : dict or None
Mapping of types or fully qualified names to hash functions. This is used to override
the behavior of the hasher inside Streamlit's caching mechanism: when the hasher
encounters an object, it will first check to see if its type matches a key in this
dict and, if so, will use the provided function to generate a hash for it. See below
for an example of how this can be used.
max_entries : int or None
The maximum number of entries to keep in the cache, or None
for an unbounded cache. (When a new entry is added to a full cache,
the oldest cached entry will be removed.) The default is None.
ttl : float or None
The maximum number of seconds to keep an entry in the cache, or
None if cache entries should not expire. The default is None.
Example
-------
>>> import streamlit as st
>>>
>>> @st.cache
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
...
>>> d1 = fetch_and_clean_data(DATA_URL_1)
>>> # Actually executes the function, since this is the first time it was
>>> # encountered.
>>>
>>> d2 = fetch_and_clean_data(DATA_URL_1)
>>> # Does not execute the function. Instead, returns its previously computed
>>> # value. This means that now the data in d1 is the same as in d2.
>>>
>>> d3 = fetch_and_clean_data(DATA_URL_2)
>>> # This is a different URL, so the function executes.
To set the ``persist`` parameter, use this command as follows:
>>> @st.cache(persist=True)
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
To disable hashing return values, set the ``allow_output_mutation`` parameter to ``True``:
>>> @st.cache(allow_output_mutation=True)
... def fetch_and_clean_data(url):
... # Fetch data from URL here, and then clean it up.
... return data
To override the default hashing behavior, pass a custom hash function.
You can do that by mapping a type (e.g. ``MongoClient``) to a hash function (``id``) like this:
>>> @st.cache(hash_funcs={MongoClient: id})
... def connect_to_database(url):
... return MongoClient(url)
Alternatively, you can map the type's fully-qualified name
(e.g. ``"pymongo.mongo_client.MongoClient"``) to the hash function instead:
>>> @st.cache(hash_funcs={"pymongo.mongo_client.MongoClient": id})
... def connect_to_database(url):
... return MongoClient(url)
"""
_LOGGER.debug("Entering st.cache: %s", func)
# Support passing the params via function decorator, e.g.
# @st.cache(persist=True, allow_output_mutation=True)
if func is None:
def wrapper(f: F) -> F:
return cache(
func=f,
persist=persist,
allow_output_mutation=allow_output_mutation,
show_spinner=show_spinner,
suppress_st_warning=suppress_st_warning,
hash_funcs=hash_funcs,
max_entries=max_entries,
ttl=ttl,
)
return wrapper
else:
# To make mypy type narrow Optional[F] -> F
non_optional_func = func
cache_key = None
@functools.wraps(non_optional_func)
def wrapped_func(*args, **kwargs):
"""Wrapper function that only calls the underlying function on a cache miss.
Cached objects are stored in the cache/ directory.
"""
if not config.get_option("client.caching"):
_LOGGER.debug("Purposefully skipping cache")
return non_optional_func(*args, **kwargs)
name = non_optional_func.__qualname__
if len(args) == 0 and len(kwargs) == 0:
message = "Running `%s()`." % name
else:
message = "Running `%s(...)`." % name
def get_or_create_cached_value():
nonlocal cache_key
if cache_key is None:
# Delay generating the cache key until the first call.
# This way we can see values of globals, including functions
# defined after this one.
# If we generated the key earlier we would only hash those
# globals by name, and miss changes in their code or value.
cache_key = _hash_func(non_optional_func, hash_funcs)
# First, get the cache that's attached to this function.
# This cache's key is generated (above) from the function's code.
mem_cache = _mem_caches.get_cache(cache_key, max_entries, ttl)
# Next, calculate the key for the value we'll be searching for
# within that cache. This key is generated from both the function's
# code and the arguments that are passed into it. (Even though this
# key is used to index into a per-function cache, it must be
# globally unique, because it is *also* used for a global on-disk
# cache that is *not* per-function.)
value_hasher = hashlib.new("md5")
if args:
update_hash(
args,
hasher=value_hasher,
hash_funcs=hash_funcs,
hash_reason=HashReason.CACHING_FUNC_ARGS,
hash_source=non_optional_func,
)
if kwargs:
update_hash(
kwargs,
hasher=value_hasher,
hash_funcs=hash_funcs,
hash_reason=HashReason.CACHING_FUNC_ARGS,
hash_source=non_optional_func,
)
value_key = value_hasher.hexdigest()
# Avoid recomputing the body's hash by just appending the
# previously-computed hash to the arg hash.
value_key = "{}-{}".format(value_key, cache_key)
_LOGGER.debug("Cache key: %s", value_key)
try:
return_value = _read_from_cache(
mem_cache=mem_cache,
key=value_key,
persist=persist,
allow_output_mutation=allow_output_mutation,
func_or_code=non_optional_func,
hash_funcs=hash_funcs,
)
_LOGGER.debug("Cache hit: %s", non_optional_func)
except CacheKeyNotFoundError:
_LOGGER.debug("Cache miss: %s", non_optional_func)
with _calling_cached_function(non_optional_func):
return_value = non_optional_func(*args, **kwargs)
_write_to_cache(
mem_cache=mem_cache,
key=value_key,
value=return_value,
persist=persist,
allow_output_mutation=allow_output_mutation,
func_or_code=non_optional_func,
hash_funcs=hash_funcs,
)
# st.cache is deprecated. We show a warning every time it's used.
show_deprecation_warning(_make_deprecation_warning(return_value))
return return_value
if show_spinner:
with spinner(message, _cache=True):
return get_or_create_cached_value()
else:
return get_or_create_cached_value()
# Make this a well-behaved decorator by preserving important function
# attributes.
try:
wrapped_func.__dict__.update(non_optional_func.__dict__)
except AttributeError:
# For normal functions this should never happen, but if so it's not problematic.
pass
return cast(F, wrapped_func)
|
Clear the memoization cache.
Returns
-------
boolean
True if the disk cache was cleared. False otherwise (e.g. cache file
doesn't exist on disk).
|
def clear_cache() -> bool:
"""Clear the memoization cache.
Returns
-------
boolean
True if the disk cache was cleared. False otherwise (e.g. cache file
doesn't exist on disk).
"""
_clear_mem_cache()
return _clear_disk_cache()
|
Get markdown representation of the function name.
|
def _get_cached_func_name_md(func: Callable[..., Any]) -> str:
"""Get markdown representation of the function name."""
if hasattr(func, "__name__"):
return "`%s()`" % func.__name__
else:
return "a cached function"
|
Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
|
def update_hash(
val: Any,
hasher,
hash_reason: HashReason,
hash_source: Callable[..., Any],
context: Context | None = None,
hash_funcs: HashFuncsDict | None = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_reason = hash_reason
hash_stacks.current.hash_source = hash_source
ch = _CodeHasher(hash_funcs)
ch.update(hasher, val, context)
|
Return key for memoization.
|
def _key(obj: Any | None) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
|
Get list of strings (lines of code) from lineno to lineno+3.
Ideally we'd return the exact line where the error took place, but there
are reasons why this is not possible without a lot of work, including
playing with the AST. So for now we're returning 3 lines near where
the error took place.
|
def _get_failing_lines(code, lineno: int) -> list[str]:
"""Get list of strings (lines of code) from lineno to lineno+3.
Ideally we'd return the exact line where the error took place, but there
are reasons why this is not possible without a lot of work, including
playing with the AST. So for now we're returning 3 lines near where
the error took place.
"""
source_lines, source_lineno = inspect.getsourcelines(code)
start = lineno - source_lineno
end = min(start + 3, len(source_lines))
lines = source_lines[start:end]
return lines
|
Modifies the code to support magic Streamlit commands.
Parameters
----------
code : str
The Python code.
script_path : str
The path to the script file.
Returns
-------
ast.Module
The syntax tree for the code.
|
def add_magic(code: str, script_path: str) -> Any:
"""Modifies the code to support magic Streamlit commands.
Parameters
----------
code : str
The Python code.
script_path : str
The path to the script file.
Returns
-------
ast.Module
The syntax tree for the code.
"""
# Pass script_path so we get pretty exceptions.
tree = ast.parse(code, script_path, "exec")
file_ends_in_semicolon = _does_file_end_in_semicolon(tree, code)
return _modify_ast_subtree(
tree, is_root=True, file_ends_in_semicolon=file_ends_in_semicolon
)
|
Parses magic commands and modifies the given AST (sub)tree.
|
def _modify_ast_subtree(
tree: Any,
body_attr: str = "body",
is_root: bool = False,
file_ends_in_semicolon: bool = False,
):
"""Parses magic commands and modifies the given AST (sub)tree."""
body = getattr(tree, body_attr)
for i, node in enumerate(body):
node_type = type(node)
# Recursively parses the content of the statements
# `with`, `for` and `while`, as well as function definitions.
# Also covers their async counterparts
if (
node_type is ast.FunctionDef
or node_type is ast.With
or node_type is ast.For
or node_type is ast.While
or node_type is ast.AsyncFunctionDef
or node_type is ast.AsyncWith
or node_type is ast.AsyncFor
):
_modify_ast_subtree(node)
# Recursively parses methods in a class.
elif node_type is ast.ClassDef:
for inner_node in node.body:
if type(inner_node) in {ast.FunctionDef, ast.AsyncFunctionDef}:
_modify_ast_subtree(inner_node)
# Recursively parses the contents of try statements,
# all their handlers (except and else) and the finally body
elif node_type is ast.Try:
for j, inner_node in enumerate(node.handlers):
node.handlers[j] = _modify_ast_subtree(inner_node)
finally_node = _modify_ast_subtree(node, body_attr="finalbody")
node.finalbody = finally_node.finalbody
_modify_ast_subtree(node)
# Recursively parses if blocks, as well as their else/elif blocks
# (else/elif are both mapped to orelse)
# it intentionally does not parse the test expression.
elif node_type is ast.If:
_modify_ast_subtree(node)
_modify_ast_subtree(node, "orelse")
# Convert standalone expression nodes to st.write
elif node_type is ast.Expr:
value = _get_st_write_from_expr(
node,
i,
parent_type=type(tree),
is_root=is_root,
is_last_expr=(i == len(body) - 1),
file_ends_in_semicolon=file_ends_in_semicolon,
)
if value is not None:
node.value = value
if is_root:
# Import Streamlit so we can use it in the new_value above.
_insert_import_statement(tree)
ast.fix_missing_locations(tree)
return tree
|
Insert Streamlit import statement at the top(ish) of the tree.
|
def _insert_import_statement(tree: Any) -> None:
"""Insert Streamlit import statement at the top(ish) of the tree."""
st_import = _build_st_import_statement()
# If the 0th node is already an import statement, put the Streamlit
# import below that, so we don't break "from __future__ import".
if tree.body and type(tree.body[0]) in {ast.ImportFrom, ast.Import}:
tree.body.insert(1, st_import)
# If the 0th node is a docstring and the 1st is an import statement,
# put the Streamlit import below those, so we don't break "from
# __future__ import".
elif (
len(tree.body) > 1
and (
type(tree.body[0]) is ast.Expr
and _is_string_constant_node(tree.body[0].value)
)
and type(tree.body[1]) in {ast.ImportFrom, ast.Import}
):
tree.body.insert(2, st_import)
else:
tree.body.insert(0, st_import)
|
Build AST node for `import magic_funcs as __streamlitmagic__`.
|
def _build_st_import_statement():
"""Build AST node for `import magic_funcs as __streamlitmagic__`."""
return ast.Import(
names=[
ast.alias(
name="streamlit.runtime.scriptrunner.magic_funcs",
asname=MAGIC_MODULE_NAME,
)
]
)
|
Build AST node for `__streamlitmagic__.transparent_write(*nodes)`.
|
def _build_st_write_call(nodes):
"""Build AST node for `__streamlitmagic__.transparent_write(*nodes)`."""
return ast.Call(
func=ast.Attribute(
attr="transparent_write",
value=ast.Name(id=MAGIC_MODULE_NAME, ctx=ast.Load()),
ctx=ast.Load(),
),
args=nodes,
keywords=[],
kwargs=None,
starargs=None,
)
|
The function that gets magic-ified into Streamlit apps.
This is just st.write, but returns the arguments you passed to it.
|
def transparent_write(*args: Any) -> Any:
"""The function that gets magic-ified into Streamlit apps.
This is just st.write, but returns the arguments you passed to it.
"""
import streamlit as st
st.write(*args)
if len(args) == 1:
return args[0]
return args
|
Some modules are stateful, so we have to clear their state.
|
def _clean_problem_modules() -> None:
"""Some modules are stateful, so we have to clear their state."""
if "keras" in sys.modules:
try:
keras = sys.modules["keras"]
keras.backend.clear_session()
except Exception:
# We don't want to crash the app if we can't clear the Keras session.
pass
if "matplotlib.pyplot" in sys.modules:
try:
plt = sys.modules["matplotlib.pyplot"]
plt.close("all")
except Exception:
# We don't want to crash the app if we can't close matplotlib
pass
|
Adds the current ScriptRunContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ScriptRunContext to.
ctx : ScriptRunContext or None
The ScriptRunContext to add, or None to use the current thread's
ScriptRunContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
|
def add_script_run_ctx(
thread: threading.Thread | None = None, ctx: ScriptRunContext | None = None
):
"""Adds the current ScriptRunContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ScriptRunContext to.
ctx : ScriptRunContext or None
The ScriptRunContext to add, or None to use the current thread's
ScriptRunContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
"""
if thread is None:
thread = threading.current_thread()
if ctx is None:
ctx = get_script_run_ctx()
if ctx is not None:
setattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, ctx)
return thread
|
Parameters
----------
suppress_warning : bool
If True, don't log a warning if there's no ScriptRunContext.
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
|
def get_script_run_ctx(suppress_warning: bool = False) -> ScriptRunContext | None:
"""
Parameters
----------
suppress_warning : bool
If True, don't log a warning if there's no ScriptRunContext.
Returns
-------
ScriptRunContext | None
The current thread's ScriptRunContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx: ScriptRunContext | None = getattr(thread, SCRIPT_RUN_CONTEXT_ATTR_NAME, None)
if ctx is None and runtime.exists() and not suppress_warning:
# Only warn about a missing ScriptRunContext if suppress_warning is False, and
# we were started via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a session.
_LOGGER.warning("Thread '%s': missing ScriptRunContext", thread.name)
return ctx
|
Compute the widget id for the given widget. This id is stable: a given
set of inputs to this function will always produce the same widget id output.
Only stable, deterministic values should be used to compute widget ids. Using
nondeterministic values as inputs can cause the resulting widget id to
change between runs.
The widget id includes the user_key so widgets with identical arguments can
use it to be distinct.
The widget id includes an easily identified prefix, and the user_key as a
suffix, to make it easy to identify it and know if a key maps to it.
|
def compute_widget_id(
element_type: str,
user_key: str | None = None,
**kwargs: SAFE_VALUES | Iterable[SAFE_VALUES],
) -> str:
"""Compute the widget id for the given widget. This id is stable: a given
set of inputs to this function will always produce the same widget id output.
Only stable, deterministic values should be used to compute widget ids. Using
nondeterministic values as inputs can cause the resulting widget id to
change between runs.
The widget id includes the user_key so widgets with identical arguments can
use it to be distinct.
The widget id includes an easily identified prefix, and the user_key as a
suffix, to make it easy to identify it and know if a key maps to it.
"""
h = hashlib.new("md5", **HASHLIB_KWARGS)
h.update(element_type.encode("utf-8"))
# This will iterate in a consistent order when the provided arguments have
# consistent order; dicts are always in insertion order.
for k, v in kwargs.items():
h.update(str(k).encode("utf-8"))
h.update(str(v).encode("utf-8"))
return f"{GENERATED_WIDGET_ID_PREFIX}-{h.hexdigest()}-{user_key}"
|
Return the user key portion of a widget id, or None if the id does not
have a user key.
TODO This will incorrectly indicate no user key if the user actually provides
"None" as a key, but we can't avoid this kind of problem while storing the
string representation of the no-user-key sentinel as part of the widget id.
|
def user_key_from_widget_id(widget_id: str) -> str | None:
"""Return the user key portion of a widget id, or None if the id does not
have a user key.
TODO This will incorrectly indicate no user key if the user actually provides
"None" as a key, but we can't avoid this kind of problem while storing the
string representation of the no-user-key sentinel as part of the widget id.
"""
user_key = widget_id.split("-", maxsplit=2)[-1]
user_key = None if user_key == "None" else user_key
return user_key
|
True if the given session_state key has the structure of a widget ID.
|
def is_widget_id(key: str) -> bool:
"""True if the given session_state key has the structure of a widget ID."""
return key.startswith(GENERATED_WIDGET_ID_PREFIX)
|
True if the given session_state key has the structure of a widget ID with a user_key.
|
def is_keyed_widget_id(key: str) -> bool:
"""True if the given session_state key has the structure of a widget ID with a user_key."""
return is_widget_id(key) and not key.endswith("-None")
|
Raise an Exception if the given user_key is invalid.
|
def require_valid_user_key(key: str) -> None:
"""Raise an Exception if the given user_key is invalid."""
if is_widget_id(key):
raise StreamlitAPIException(
f"Keys beginning with {GENERATED_WIDGET_ID_PREFIX} are reserved."
)
|
Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
|
def get_session_state() -> SafeSessionState:
"""Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
"""
global _state_use_warning_already_displayed
from streamlit.runtime.scriptrunner import get_script_run_ctx
ctx = get_script_run_ctx()
# If there is no script run context because the script is run bare, have
# session state act as an always empty dictionary, and print a warning.
if ctx is None:
if not _state_use_warning_already_displayed:
_state_use_warning_already_displayed = True
if not runtime.exists():
_LOGGER.warning(
"Session state does not function when running a script without `streamlit run`"
)
return SafeSessionState(SessionState(), lambda: None)
return ctx.session_state
|
Register a widget with Streamlit, and return its current value.
NOTE: This function should be called after the proto has been filled.
Parameters
----------
element_type : ElementType
The type of the element as stored in proto.
element_proto : WidgetProto
The proto of the specified type (e.g. Button/Multiselect/Slider proto)
deserializer : WidgetDeserializer[T]
Called to convert a widget's protobuf value to the value returned by
its st.<widget_name> function.
serializer : WidgetSerializer[T]
Called to convert a widget's value to its protobuf representation.
ctx : ScriptRunContext or None
Used to ensure uniqueness of widget IDs, and to look up widget values.
user_key : str or None
Optional user-specified string to use as the widget ID.
If this is None, we'll generate an ID by hashing the element.
widget_func_name : str or None
The widget's DeltaGenerator function name, if it's different from
its element_type. Custom components are a special case: they all have
the element_type "component_instance", but are instantiated with
dynamically-named functions.
on_change_handler : WidgetCallback or None
An optional callback invoked when the widget's value changes.
args : WidgetArgs or None
args to pass to on_change_handler when invoked
kwargs : WidgetKwargs or None
kwargs to pass to on_change_handler when invoked
Returns
-------
register_widget_result : RegisterWidgetResult[T]
Provides information on which value to return to the widget caller,
and whether the UI needs updating.
- Unhappy path:
- Our ScriptRunContext doesn't exist (meaning that we're running
as a "bare script" outside streamlit).
- We are disconnected from the SessionState instance.
In both cases we'll return a fallback RegisterWidgetResult[T].
- Happy path:
- The widget has already been registered on a previous run but the
user hasn't interacted with it on the client. The widget will have
the default value it was first created with. We then return a
RegisterWidgetResult[T], containing this value.
- The widget has already been registered and the user *has*
interacted with it. The widget will have that most recent
user-specified value. We then return a RegisterWidgetResult[T],
containing this value.
For both paths a widget return value is provided, allowing the widgets
to be used in a non-streamlit setting.
|
def register_widget(
element_type: ElementType,
element_proto: WidgetProto,
deserializer: WidgetDeserializer[T],
serializer: WidgetSerializer[T],
ctx: ScriptRunContext | None,
user_key: str | None = None,
widget_func_name: str | None = None,
on_change_handler: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
) -> RegisterWidgetResult[T]:
"""Register a widget with Streamlit, and return its current value.
NOTE: This function should be called after the proto has been filled.
Parameters
----------
element_type : ElementType
The type of the element as stored in proto.
element_proto : WidgetProto
The proto of the specified type (e.g. Button/Multiselect/Slider proto)
deserializer : WidgetDeserializer[T]
Called to convert a widget's protobuf value to the value returned by
its st.<widget_name> function.
serializer : WidgetSerializer[T]
Called to convert a widget's value to its protobuf representation.
ctx : ScriptRunContext or None
Used to ensure uniqueness of widget IDs, and to look up widget values.
user_key : str or None
Optional user-specified string to use as the widget ID.
If this is None, we'll generate an ID by hashing the element.
widget_func_name : str or None
The widget's DeltaGenerator function name, if it's different from
its element_type. Custom components are a special case: they all have
the element_type "component_instance", but are instantiated with
dynamically-named functions.
on_change_handler : WidgetCallback or None
An optional callback invoked when the widget's value changes.
args : WidgetArgs or None
args to pass to on_change_handler when invoked
kwargs : WidgetKwargs or None
kwargs to pass to on_change_handler when invoked
Returns
-------
register_widget_result : RegisterWidgetResult[T]
Provides information on which value to return to the widget caller,
and whether the UI needs updating.
- Unhappy path:
- Our ScriptRunContext doesn't exist (meaning that we're running
as a "bare script" outside streamlit).
- We are disconnected from the SessionState instance.
In both cases we'll return a fallback RegisterWidgetResult[T].
- Happy path:
- The widget has already been registered on a previous run but the
user hasn't interacted with it on the client. The widget will have
the default value it was first created with. We then return a
RegisterWidgetResult[T], containing this value.
- The widget has already been registered and the user *has*
interacted with it. The widget will have that most recent
user-specified value. We then return a RegisterWidgetResult[T],
containing this value.
For both paths a widget return value is provided, allowing the widgets
to be used in a non-streamlit setting.
"""
# Create the widget's updated metadata, and register it with session_state.
metadata = WidgetMetadata(
element_proto.id,
deserializer,
serializer,
value_type=ELEMENT_TYPE_TO_VALUE_TYPE[element_type],
callback=on_change_handler,
callback_args=args,
callback_kwargs=kwargs,
fragment_id=ctx.current_fragment_id if ctx else None,
)
return register_widget_from_metadata(metadata, ctx, widget_func_name, element_type)
|
Register a widget and return its value, using an already constructed
`WidgetMetadata`.
This is split out from `register_widget` to allow caching code to replay
widgets by saving and reusing the completed metadata.
See `register_widget` for details on what this returns.
|
def register_widget_from_metadata(
metadata: WidgetMetadata[T],
ctx: ScriptRunContext | None,
widget_func_name: str | None,
element_type: ElementType,
) -> RegisterWidgetResult[T]:
"""Register a widget and return its value, using an already constructed
`WidgetMetadata`.
This is split out from `register_widget` to allow caching code to replay
widgets by saving and reusing the completed metadata.
See `register_widget` for details on what this returns.
"""
# Local import to avoid import cycle
import streamlit.runtime.caching as caching
if ctx is None:
# Early-out if we don't have a script run context (which probably means
# we're running as a "bare" Python script, and not via `streamlit run`).
return RegisterWidgetResult.failure(deserializer=metadata.deserializer)
widget_id = metadata.id
user_key = user_key_from_widget_id(widget_id)
# Ensure another widget with the same user key hasn't already been registered.
if user_key is not None:
if user_key not in ctx.widget_user_keys_this_run:
ctx.widget_user_keys_this_run.add(user_key)
else:
raise DuplicateWidgetID(
_build_duplicate_widget_message(
widget_func_name if widget_func_name is not None else element_type,
user_key,
)
)
# Ensure another widget with the same id hasn't already been registered.
new_widget = widget_id not in ctx.widget_ids_this_run
if new_widget:
ctx.widget_ids_this_run.add(widget_id)
else:
raise DuplicateWidgetID(
_build_duplicate_widget_message(
widget_func_name if widget_func_name is not None else element_type,
user_key,
)
)
# Save the widget metadata for cached result replay
caching.save_widget_metadata(metadata)
return ctx.session_state.register_widget(metadata, user_key)
|
Coalesce an older WidgetStates into a newer one, and return a new
WidgetStates containing the result.
For most widget values, we just take the latest version.
However, any trigger_values (which are set by buttons) that are True in
`old_states` will be set to True in the coalesced result, so that button
presses don't go missing.
|
def coalesce_widget_states(
old_states: WidgetStates | None, new_states: WidgetStates | None
) -> WidgetStates | None:
"""Coalesce an older WidgetStates into a newer one, and return a new
WidgetStates containing the result.
For most widget values, we just take the latest version.
However, any trigger_values (which are set by buttons) that are True in
`old_states` will be set to True in the coalesced result, so that button
presses don't go missing.
"""
if not old_states and not new_states:
return None
elif not old_states:
return new_states
elif not new_states:
return old_states
states_by_id: dict[str, WidgetState] = {
wstate.id: wstate for wstate in new_states.widgets
}
trigger_value_types = [("trigger_value", False), ("string_trigger_value", None)]
for old_state in old_states.widgets:
for trigger_value_type, unset_value in trigger_value_types:
if (
old_state.WhichOneof("value") == trigger_value_type
and old_state.trigger_value != unset_value
):
# Ensure the corresponding new_state is also a trigger;
# otherwise, a widget that was previously a button but no longer is
# could get a bad value.
new_trigger_val = states_by_id.get(old_state.id)
if (
new_trigger_val
and new_trigger_val.WhichOneof("value") == trigger_value_type
):
states_by_id[old_state.id] = old_state
coalesced = WidgetStates()
coalesced.widgets.extend(states_by_id.values())
return coalesced
|
A custom repr similar to `streamlit.util.repr_` but that shows tree
structure using indentation.
|
def repr_(self) -> str:
"""A custom repr similar to `streamlit.util.repr_` but that shows tree
structure using indentation.
"""
classname = self.__class__.__name__
defaults: list[Any] = [None, "", False, [], set(), dict()]
if is_dataclass(self):
fields_vals = (
(f.name, getattr(self, f.name))
for f in fields(self)
if f.repr
and getattr(self, f.name) != f.default
and getattr(self, f.name) not in defaults
)
else:
fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults)
reprs = []
for field, value in fields_vals:
if isinstance(value, dict):
line = f"{field}={format_dict(value)}"
else:
line = f"{field}={value!r}"
reprs.append(line)
reprs[0] = "\n" + reprs[0]
field_reprs = ",\n".join(reprs)
field_reprs = textwrap.indent(field_reprs, " " * 4)
return f"{classname}({field_reprs}\n)"
|
Transform a list of `ForwardMsg` into a tree matching the implicit
tree structure of blocks and elements in a streamlit app.
Returns the root of the tree, which acts as the entrypoint for the query
and interaction API.
|
def parse_tree_from_messages(messages: list[ForwardMsg]) -> ElementTree:
"""Transform a list of `ForwardMsg` into a tree matching the implicit
tree structure of blocks and elements in a streamlit app.
Returns the root of the tree, which acts as the entrypoint for the query
and interaction API.
"""
root = ElementTree()
root.children = {
0: SpecialBlock(type="main", root=root, proto=None),
1: SpecialBlock(type="sidebar", root=root, proto=None),
2: SpecialBlock(type="event", root=root, proto=None),
}
for msg in messages:
if not msg.HasField("delta"):
continue
delta_path = msg.metadata.delta_path
delta = msg.delta
if delta.WhichOneof("type") == "new_element":
elt = delta.new_element
ty = elt.WhichOneof("type")
new_node: Node
if ty == "alert":
format = elt.alert.format
if format == AlertProto.Format.ERROR:
new_node = Error(elt.alert, root=root)
elif format == AlertProto.Format.INFO:
new_node = Info(elt.alert, root=root)
elif format == AlertProto.Format.SUCCESS:
new_node = Success(elt.alert, root=root)
elif format == AlertProto.Format.WARNING:
new_node = Warning(elt.alert, root=root)
else:
raise ValueError(
f"Unknown alert type with format {elt.alert.format}"
)
elif ty == "arrow_data_frame":
new_node = Dataframe(elt.arrow_data_frame, root=root)
elif ty == "arrow_table":
new_node = Table(elt.arrow_table, root=root)
elif ty == "button":
new_node = Button(elt.button, root=root)
elif ty == "chat_input":
new_node = ChatInput(elt.chat_input, root=root)
elif ty == "checkbox":
style = elt.checkbox.type
if style == CheckboxProto.StyleType.TOGGLE:
new_node = Toggle(elt.checkbox, root=root)
else:
new_node = Checkbox(elt.checkbox, root=root)
elif ty == "code":
new_node = Code(elt.code, root=root)
elif ty == "color_picker":
new_node = ColorPicker(elt.color_picker, root=root)
elif ty == "date_input":
new_node = DateInput(elt.date_input, root=root)
elif ty == "exception":
new_node = Exception(elt.exception, root=root)
elif ty == "heading":
if elt.heading.tag == HeadingProtoTag.TITLE_TAG.value:
new_node = Title(elt.heading, root=root)
elif elt.heading.tag == HeadingProtoTag.HEADER_TAG.value:
new_node = Header(elt.heading, root=root)
elif elt.heading.tag == HeadingProtoTag.SUBHEADER_TAG.value:
new_node = Subheader(elt.heading, root=root)
else:
raise ValueError(f"Unknown heading type with tag {elt.heading.tag}")
elif ty == "json":
new_node = Json(elt.json, root=root)
elif ty == "markdown":
if elt.markdown.element_type == MarkdownProto.Type.NATIVE:
new_node = Markdown(elt.markdown, root=root)
elif elt.markdown.element_type == MarkdownProto.Type.CAPTION:
new_node = Caption(elt.markdown, root=root)
elif elt.markdown.element_type == MarkdownProto.Type.LATEX:
new_node = Latex(elt.markdown, root=root)
elif elt.markdown.element_type == MarkdownProto.Type.DIVIDER:
new_node = Divider(elt.markdown, root=root)
else:
raise ValueError(
f"Unknown markdown type {elt.markdown.element_type}"
)
elif ty == "metric":
new_node = Metric(elt.metric, root=root)
elif ty == "multiselect":
new_node = Multiselect(elt.multiselect, root=root)
elif ty == "number_input":
new_node = NumberInput(elt.number_input, root=root)
elif ty == "radio":
new_node = Radio(elt.radio, root=root)
elif ty == "selectbox":
new_node = Selectbox(elt.selectbox, root=root)
elif ty == "slider":
if elt.slider.type == SliderProto.Type.SLIDER:
new_node = Slider(elt.slider, root=root)
elif elt.slider.type == SliderProto.Type.SELECT_SLIDER:
new_node = SelectSlider(elt.slider, root=root)
else:
raise ValueError(f"Slider with unknown type {elt.slider}")
elif ty == "text":
new_node = Text(elt.text, root=root)
elif ty == "text_area":
new_node = TextArea(elt.text_area, root=root)
elif ty == "text_input":
new_node = TextInput(elt.text_input, root=root)
elif ty == "time_input":
new_node = TimeInput(elt.time_input, root=root)
elif ty == "toast":
new_node = Toast(elt.toast, root=root)
else:
new_node = UnknownElement(elt, root=root)
elif delta.WhichOneof("type") == "add_block":
block = delta.add_block
bty = block.WhichOneof("type")
if bty == "chat_message":
new_node = ChatMessage(block.chat_message, root=root)
elif bty == "column":
new_node = Column(block.column, root=root)
elif bty == "expandable":
if block.expandable.icon:
new_node = Status(block.expandable, root=root)
else:
new_node = Expander(block.expandable, root=root)
elif bty == "tab":
new_node = Tab(block.tab, root=root)
else:
new_node = Block(proto=block, root=root)
else:
# add_rows
continue
current_node: Block = root
# Every node up to the end is a Block
for idx in delta_path[:-1]:
children = current_node.children
child = children.get(idx)
if child is None:
child = Block(proto=None, root=root)
children[idx] = child
assert isinstance(child, Block)
current_node = child
# Handle a block when we already have a placeholder for that location
if isinstance(new_node, Block):
placeholder_block = current_node.children.get(delta_path[-1])
if placeholder_block is not None:
new_node.children = placeholder_block.children
current_node.children[delta_path[-1]] = new_node
return root
|
Wait for the given ScriptRunner to emit a completion event. If the timeout
is reached, the runner will be shutdown and an error will be thrown.
|
def require_widgets_deltas(runner: LocalScriptRunner, timeout: float = 3) -> None:
"""Wait for the given ScriptRunner to emit a completion event. If the timeout
is reached, the runner will be shutdown and an error will be thrown.
"""
t0 = time.time()
while time.time() - t0 < timeout:
time.sleep(0.001)
if runner.script_stopped():
return
# If we get here, the runner hasn't yet completed before our
# timeout. Create an error string for debugging.
err_string = f"AppTest script run timed out after {timeout}(s)"
# Shutdown the runner before throwing an error, so that the script
# doesn't hang forever.
runner.request_stop()
runner.join()
raise RuntimeError(err_string)
|
A context manager that overrides config options. It can
also be used as a function decorator.
Examples:
>>> with patch_config_options({"server.headless": True}):
... assert(config.get_option("server.headless") is True)
... # Other test code that relies on these options
>>> @patch_config_options({"server.headless": True})
... def test_my_thing():
... assert(config.get_option("server.headless") is True)
|
def patch_config_options(config_overrides: dict[str, Any]):
"""A context manager that overrides config options. It can
also be used as a function decorator.
Examples:
>>> with patch_config_options({"server.headless": True}):
... assert(config.get_option("server.headless") is True)
... # Other test code that relies on these options
>>> @patch_config_options({"server.headless": True})
... def test_my_thing():
... assert(config.get_option("server.headless") is True)
"""
# Lazy-load for performance reasons.
from unittest.mock import patch
mock_get_option = build_mock_config_get_option(config_overrides)
with patch.object(config, "get_option", new=mock_get_option):
yield
|
Like struct.calcsize() but with 'z' for Py_ssize_t.
|
def _calcsize(fmt):
"""Like struct.calcsize() but with 'z' for Py_ssize_t."""
return calcsize(fmt.replace("z", _z_P_L))
|
Return iter-/generator, preferably.
|
def _items(obj): # dict only
"""Return iter-/generator, preferably."""
o = getattr(obj, "iteritems", obj.items)
return o() if callable(o) else (o or ())
|
Return iter-/generator, preferably.
|
def _keys(obj): # dict only
"""Return iter-/generator, preferably."""
o = getattr(obj, "iterkeys", obj.keys)
return o() if callable(o) else (o or ())
|
Return iter-/generator, preferably.
|
def _values(obj): # dict only
"""Return iter-/generator, preferably."""
o = getattr(obj, "itervalues", obj.values)
return o() if callable(o) else (o or ())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.