response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Handle an exception that originated from a user app. By default, we show exceptions directly in the browser. However, if the user has disabled client error details, we display a generic warning in the frontend instead.
def handle_uncaught_app_exception(ex: BaseException) -> None: """Handle an exception that originated from a user app. By default, we show exceptions directly in the browser. However, if the user has disabled client error details, we display a generic warning in the frontend instead. """ error_logged = False if config.get_option("logger.enableRich"): try: # Print exception via rich # Rich is only a soft dependency # -> if not installed, we will use the default traceback formatting _print_rich_exception(ex) error_logged = True except Exception: # Rich is not installed or not compatible to our config # -> Use normal traceback formatting as fallback # Catching all exceptions because we don't want to leave any possibility of breaking here. error_logged = False if config.get_option("client.showErrorDetails"): if not error_logged: # TODO: Clean up the stack trace, so it doesn't include ScriptRunner. _LOGGER.warning("Uncaught app exception", exc_info=ex) st.exception(ex) else: if not error_logged: # Use LOGGER.error, rather than LOGGER.debug, since we don't # show debug logs by default. _LOGGER.error("Uncaught app exception", exc_info=ex) st.exception(UncaughtAppException(ex))
Coerce bytes to a BytesIO or a StringIO. Parameters ---------- data : bytes encoding : str Returns ------- BytesIO or StringIO If the file's data is in a well-known textual format (or if the encoding parameter is set), return a StringIO. Otherwise, return BytesIO.
def get_encoded_file_data( data: bytes, encoding: str = "auto" ) -> io.StringIO | io.BytesIO: """Coerce bytes to a BytesIO or a StringIO. Parameters ---------- data : bytes encoding : str Returns ------- BytesIO or StringIO If the file's data is in a well-known textual format (or if the encoding parameter is set), return a StringIO. Otherwise, return BytesIO. """ if encoding == "auto": # If the file does not look like a pure binary file, assume # it's utf-8. It would be great if we could guess it a little # more smartly here, but it is what it is! data_encoding = None if is_binary_string(data) else "utf-8" else: data_encoding = encoding if data_encoding: return io.StringIO(data.decode(data_encoding)) return io.BytesIO(data)
Opens a context to read this file relative to the streamlit path. For example: with streamlit_read('foo.txt') as foo: ... opens the file `.streamlit/foo.txt` path - the path to write to (within the streamlit directory) binary - set to True for binary IO
def streamlit_read(path, binary=False): """Opens a context to read this file relative to the streamlit path. For example: with streamlit_read('foo.txt') as foo: ... opens the file `.streamlit/foo.txt` path - the path to write to (within the streamlit directory) binary - set to True for binary IO """ filename = get_streamlit_file_path(path) if os.stat(filename).st_size == 0: raise util.Error('Read zero byte file: "%s"' % filename) mode = "r" if binary: mode += "b" with open(os.path.join(CONFIG_FOLDER_NAME, path), mode) as handle: yield handle
Opens a file for writing within the streamlit path, and ensuring that the path exists. For example: with streamlit_write('foo/bar.txt') as bar: ... opens the file .streamlit/foo/bar.txt for writing, creating any necessary directories along the way. path - the path to write to (within the streamlit directory) binary - set to True for binary IO
def streamlit_write(path, binary=False): """Opens a file for writing within the streamlit path, and ensuring that the path exists. For example: with streamlit_write('foo/bar.txt') as bar: ... opens the file .streamlit/foo/bar.txt for writing, creating any necessary directories along the way. path - the path to write to (within the streamlit directory) binary - set to True for binary IO """ mode = "w" if binary: mode += "b" path = get_streamlit_file_path(path) os.makedirs(os.path.dirname(path), exist_ok=True) try: with open(path, mode) as handle: yield handle except OSError as e: msg = ["Unable to write file: %s" % os.path.abspath(path)] if e.errno == errno.EINVAL and env_util.IS_DARWIN: msg.append( "Python is limited to files below 2GB on OSX. " "See https://bugs.python.org/issue24658" ) raise util.Error("\n".join(msg))
Get the folder where static HTML/JS/CSS files live.
def get_static_dir() -> str: """Get the folder where static HTML/JS/CSS files live.""" dirname = os.path.dirname(os.path.normpath(__file__)) return os.path.normpath(os.path.join(dirname, "static"))
Get the folder where app static files live
def get_app_static_dir(main_script_path: str) -> str: """Get the folder where app static files live""" main_script_path = Path(main_script_path) static_dir = main_script_path.parent / APP_STATIC_FOLDER_NAME return os.path.abspath(static_dir)
Return the full path to a file in ~/.streamlit. This doesn't guarantee that the file (or its directory) exists.
def get_streamlit_file_path(*filepath) -> str: """Return the full path to a file in ~/.streamlit. This doesn't guarantee that the file (or its directory) exists. """ # os.path.expanduser works on OSX, Linux and Windows home = os.path.expanduser("~") if home is None: raise RuntimeError("No home directory.") return os.path.join(home, CONFIG_FOLDER_NAME, *filepath)
Return the full path to a filepath in ${CWD}/.streamlit. This doesn't guarantee that the file (or its directory) exists.
def get_project_streamlit_file_path(*filepath): """Return the full path to a filepath in ${CWD}/.streamlit. This doesn't guarantee that the file (or its directory) exists. """ return os.path.join(os.getcwd(), CONFIG_FOLDER_NAME, *filepath)
Test whether a file is in some folder with globbing support. Parameters ---------- filepath : str A file path. folderpath_glob: str A path to a folder that may include globbing.
def file_is_in_folder_glob(filepath: str, folderpath_glob: str) -> bool: """Test whether a file is in some folder with globbing support. Parameters ---------- filepath : str A file path. folderpath_glob: str A path to a folder that may include globbing. """ # Make the glob always end with "/*" so we match files inside subfolders of # folderpath_glob. if not folderpath_glob.endswith("*"): if folderpath_glob.endswith("/"): folderpath_glob += "*" else: folderpath_glob += "/*" import fnmatch file_dir = os.path.dirname(filepath) + "/" return fnmatch.fnmatch(file_dir, folderpath_glob)
Return the size of a directory in bytes.
def get_directory_size(directory: str) -> int: """Return the size of a directory in bytes.""" total_size = 0 for dirpath, _, filenames in os.walk(directory): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable. Parameters ---------- filepath : str An absolute file path. Returns ------- boolean True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty.
def file_in_pythonpath(filepath: str) -> bool: """Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable. Parameters ---------- filepath : str An absolute file path. Returns ------- boolean True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty. """ pythonpath = os.environ.get("PYTHONPATH", "") if len(pythonpath) == 0: return False absolute_paths = [os.path.abspath(path) for path in pythonpath.split(os.pathsep)] return any( file_is_in_folder_glob(os.path.normpath(filepath), path) for path in absolute_paths )
Return the normalized path of the joined path. Parameters ---------- *args : str The path components to join. Returns ------- str The normalized path of the joined path.
def normalize_path_join(*args): """Return the normalized path of the joined path. Parameters ---------- *args : str The path components to join. Returns ------- str The normalized path of the joined path. """ return os.path.normpath(os.path.join(*args))
Return the full path to the main script directory. Parameters ---------- main_script : str The main script path. The path can be an absolute path or a relative path. Returns ------- str The full path to the main script directory.
def get_main_script_directory(main_script): """Return the full path to the main script directory. Parameters ---------- main_script : str The main script path. The path can be an absolute path or a relative path. Returns ------- str The full path to the main script directory. """ main_script_path = normalize_path_join(os.getcwd(), main_script) return os.path.dirname(main_script_path)
Set log level.
def set_log_level(level: str | int) -> None: """Set log level.""" logger = get_logger(__name__) if isinstance(level, str): level = level.upper() if level == "CRITICAL" or level == logging.CRITICAL: log_level = logging.CRITICAL elif level == "ERROR" or level == logging.ERROR: log_level = logging.ERROR elif level == "WARNING" or level == logging.WARNING: log_level = logging.WARNING elif level == "INFO" or level == logging.INFO: log_level = logging.INFO elif level == "DEBUG" or level == logging.DEBUG: log_level = logging.DEBUG else: msg = 'undefined log level "%s"' % level logger.critical(msg) sys.exit(1) for log in _loggers.values(): log.setLevel(log_level) global _global_log_level _global_log_level = log_level
Set up the console formatter for a given logger.
def setup_formatter(logger: logging.Logger) -> None: """Set up the console formatter for a given logger.""" # Deregister any previous console loggers. if hasattr(logger, "streamlit_console_handler"): logger.removeHandler(logger.streamlit_console_handler) logger.streamlit_console_handler = logging.StreamHandler() # type: ignore[attr-defined] # Import here to avoid circular imports from streamlit import config if config._config_options: # logger is required in ConfigOption.set_value # Getting the config option before the config file has been parsed # can create an infinite loop message_format = config.get_option("logger.messageFormat") else: message_format = DEFAULT_LOG_MESSAGE formatter = logging.Formatter(fmt=message_format) formatter.default_msec_format = "%s.%03d" logger.streamlit_console_handler.setFormatter(formatter) # type: ignore[attr-defined] # Register the new console logger. logger.addHandler(logger.streamlit_console_handler)
Set Tornado log levels. This function does not import any Tornado code, so it's safe to call even when Server is not running.
def init_tornado_logs() -> None: """Set Tornado log levels. This function does not import any Tornado code, so it's safe to call even when Server is not running. """ # http://www.tornadoweb.org/en/stable/log.html for log in ("access", "application", "general"): # get_logger will set the log level for the logger with the given name. get_logger(f"tornado.{log}")
Return a logger. Parameters ---------- name : str The name of the logger to use. You should just pass in __name__. Returns ------- Logger
def get_logger(name: str) -> logging.Logger: """Return a logger. Parameters ---------- name : str The name of the logger to use. You should just pass in __name__. Returns ------- Logger """ if name in _loggers.keys(): return _loggers[name] if name == "root": logger = logging.getLogger("streamlit") else: logger = logging.getLogger(name) logger.setLevel(_global_log_level) logger.propagate = False setup_formatter(logger) _loggers[name] = logger return logger
Get the *external* IP address of the current machine. Returns ------- string The external IPv4 address of the current machine.
def get_external_ip() -> str | None: """Get the *external* IP address of the current machine. Returns ------- string The external IPv4 address of the current machine. """ global _external_ip if _external_ip is not None: return _external_ip response = _make_blocking_http_get(_AWS_CHECK_IP, timeout=5) if response is None: response = _make_blocking_http_get(_AWS_CHECK_IP_HTTPS, timeout=5) if _looks_like_an_ip_adress(response): _external_ip = response else: _LOGGER.warning( # fmt: off "Did not auto detect external IP.\n" "Please go to %s for debugging hints.", # fmt: on util.HELP_DOC ) _external_ip = None return _external_ip
Get the *local* IP address of the current machine. From: https://stackoverflow.com/a/28950776 Returns ------- string The local IPv4 address of the current machine.
def get_internal_ip() -> str | None: """Get the *local* IP address of the current machine. From: https://stackoverflow.com/a/28950776 Returns ------- string The local IPv4 address of the current machine. """ global _internal_ip if _internal_ip is not None: return _internal_ip import socket with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: try: # Doesn't even have to be reachable s.connect(("8.8.8.8", 1)) _internal_ip = s.getsockname()[0] except Exception: _internal_ip = "127.0.0.1" return _internal_ip
Sends a string message to the parent window (when host configuration allows).
def post_parent_message(message: str) -> None: """ Sends a string message to the parent window (when host configuration allows). """ ctx = get_script_run_ctx() if ctx is None: return fwd_msg = ForwardMsg() fwd_msg.parent_message.message = message ctx.enqueue(fwd_msg)
Open a read-only Python file taking proper care of its encoding. In Python 3, we would like all files to be opened with utf-8 encoding. However, some author like to specify PEP263 headers in their source files with their own encodings. In that case, we should respect the author's encoding.
def open_python_file(filename: str): """Open a read-only Python file taking proper care of its encoding. In Python 3, we would like all files to be opened with utf-8 encoding. However, some author like to specify PEP263 headers in their source files with their own encodings. In that case, we should respect the author's encoding. """ import tokenize if hasattr(tokenize, "open"): # Added in Python 3.2 # Open file respecting PEP263 encoding. If no encoding header is # found, opens as utf-8. return tokenize.open(filename) else: return open(filename, encoding="utf-8")
Compute the icon and name of a page from its script path. This is *almost* the page name displayed in the nav UI, but it has underscores instead of spaces. The reason we do this is because having spaces in URLs both looks bad and is hard to deal with due to the need to URL-encode them. To solve this, we only swap the underscores for spaces right before we render page names.
def page_icon_and_name(script_path: Path) -> tuple[str, str]: """Compute the icon and name of a page from its script path. This is *almost* the page name displayed in the nav UI, but it has underscores instead of spaces. The reason we do this is because having spaces in URLs both looks bad and is hard to deal with due to the need to URL-encode them. To solve this, we only swap the underscores for spaces right before we render page names. """ extraction = re.search(PAGE_FILENAME_REGEX, script_path.name) if extraction is None: return "", "" # This cast to Any+type annotation weirdness is done because # cast(re.Match[str], ...) explodes at runtime since Python interprets it # as an attempt to index into re.Match instead of as a type annotation. extraction: re.Match[str] = cast(Any, extraction) icon_and_name = re.sub( r"[_ ]+", "_", extraction.group(2) ).strip() or extraction.group(1) return extract_leading_emoji(icon_and_name)
Decodes a string as ascii.
def decode_ascii(string: bytes) -> str: """Decodes a string as ascii.""" return string.decode("ascii")
Convert an object to text, dedent it, and strip whitespace.
def clean_text(text: SupportsStr) -> str: """Convert an object to text, dedent it, and strip whitespace.""" return textwrap.dedent(str(text)).strip()
Check if a string contains any special chars. Special chars in that case are all chars that are not alphanumeric, underscore, hyphen or whitespace.
def _contains_special_chars(text: str) -> bool: """Check if a string contains any special chars. Special chars in that case are all chars that are not alphanumeric, underscore, hyphen or whitespace. """ return re.match(_ALPHANUMERIC_CHAR_REGEX, text) is None if text else False
Check if input string is a valid emoji.
def is_emoji(text: str) -> bool: """Check if input string is a valid emoji.""" if not _contains_special_chars(text): return False from streamlit.emojis import ALL_EMOJIS return text.replace("\U0000FE0F", "") in ALL_EMOJIS
Check if input string is a valid Material icon.
def is_material_icon(maybe_icon: str) -> bool: """Check if input string is a valid Material icon.""" from streamlit.material_icon_names import ALL_MATERIAL_ICONS return maybe_icon in ALL_MATERIAL_ICONS
Validate an icon or emoji and return it in normalized format if valid.
def validate_icon_or_emoji(icon: str | None) -> str: """Validate an icon or emoji and return it in normalized format if valid.""" if icon is not None and icon.startswith(":material"): return validate_material_icon(icon) return validate_emoji(icon)
Validate a Material icon shortcode and return the icon in normalized format if valid.
def validate_material_icon(maybe_material_icon: str | None) -> str: """Validate a Material icon shortcode and return the icon in normalized format if valid.""" supported_icon_packs = [ "material", ] if maybe_material_icon is None: return "" icon_regex = r"^\s*:(.+)\/(.+):\s*$" icon_match = re.match(icon_regex, maybe_material_icon) if not icon_match: raise StreamlitAPIException( f'The value `"{maybe_material_icon}"` is not a valid Material icon. ' f"Please use a Material icon shortcode like **`:material/thumb_up:`**" ) pack_name, icon_name = icon_match.groups() if ( pack_name not in supported_icon_packs or not icon_name or not is_material_icon(icon_name) ): raise StreamlitAPIException( f'The value `"{maybe_material_icon}"` is not a valid Material icon.' f" Please use a Material icon shortcode like **`:material/thumb_up:`**. " ) return f":{pack_name}/{icon_name}:"
Return a tuple containing the first emoji found in the given string and the rest of the string (minus an optional separator between the two).
def extract_leading_emoji(text: str) -> tuple[str, str]: """Return a tuple containing the first emoji found in the given string and the rest of the string (minus an optional separator between the two). """ if not _contains_special_chars(text): # If the string only contains basic alphanumerical chars and/or # underscores, hyphen & whitespaces, then it's guaranteed that there # is no emoji in the string. return "", text from streamlit.emojis import EMOJI_EXTRACTION_REGEX re_match = re.search(EMOJI_EXTRACTION_REGEX, text) if re_match is None: return "", text # This cast to Any+type annotation weirdness is done because # cast(re.Match[str], ...) explodes at runtime since Python interprets it # as an attempt to index into re.Match instead of as a type annotation. re_match: re.Match[str] = cast(Any, re_match) return re_match.group(1), re_match.group(2)
Returns the count of the max sequence of a given char in a string.
def max_char_sequence(string: str, char: str) -> int: """Returns the count of the max sequence of a given char in a string.""" max_sequence = 0 current_sequence = 0 for c in string: if c == char: current_sequence += 1 max_sequence = max(max_sequence, current_sequence) else: current_sequence = 0 return max_sequence
Guess if an input bytesarray can be encoded as a string.
def is_binary_string(inp: bytes) -> bool: """Guess if an input bytesarray can be encoded as a string.""" # From https://stackoverflow.com/a/7392391 return bool(inp.translate(None, TEXTCHARS))
Simplifies number into Human readable format, returns str
def simplify_number(num: int) -> str: """Simplifies number into Human readable format, returns str""" num_converted = float(f"{num:.2g}") magnitude = 0 while abs(num_converted) >= 1000: magnitude += 1 num_converted /= 1000.0 return "{}{}".format( f"{num_converted:f}".rstrip("0").rstrip("."), ["", "k", "m", "b", "t"][magnitude], )
Returns True if the string looks like <foo blarg at 0x15ee6f9a0>.
def is_mem_address_str(string): """Returns True if the string looks like <foo blarg at 0x15ee6f9a0>.""" if _OBJ_MEM_ADDRESS.match(string): return True return False
Returns True if the given string contains what seem to be HTML tags. Note that false positives/negatives are possible, so this function should not be used in contexts where complete correctness is required.
def probably_contains_html_tags(s: str) -> bool: """Returns True if the given string contains what seem to be HTML tags. Note that false positives/negatives are possible, so this function should not be used in contexts where complete correctness is required.""" return bool(_RE_CONTAINS_HTML.search(s))
Add or subtract years from a date.
def adjust_years(input_date: date, years: int) -> date: """Add or subtract years from a date.""" try: # Attempt to directly add/subtract years return input_date.replace(year=input_date.year + years) except ValueError as err: # Handle case for leap year date (February 29) that doesn't exist in the target year # by moving the date to February 28 if input_date.month == 2 and input_date.day == 29: return input_date.replace(year=input_date.year + years, month=2, day=28) raise StreamlitAPIException( f"Date {input_date} does not exist in the target year {input_date.year + years}. " "This should never happen. Please report this bug." ) from err
Convert a time string value to a float representing "number of seconds".
def time_to_seconds( t: float | timedelta | str | None, *, coerce_none_to_inf: bool = True ) -> float | None: """ Convert a time string value to a float representing "number of seconds". """ if coerce_none_to_inf and t is None: return math.inf if isinstance(t, timedelta): return t.total_seconds() if isinstance(t, str): import numpy as np import pandas as pd try: seconds: float = pd.Timedelta(t).total_seconds() if np.isnan(seconds): raise BadTimeStringError(t) return seconds except ValueError as ex: raise BadTimeStringError(t) from ex return t
Check type without importing expensive modules. Parameters ---------- obj : object The object to type-check. fqn_type_pattern : str or regex The fully-qualified type string or a regular expression. Regexes should start with `^` and end with `$`. Example ------- To check whether something is a Matplotlib Figure without importing matplotlib, use: >>> is_type(foo, 'matplotlib.figure.Figure')
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool: """Check type without importing expensive modules. Parameters ---------- obj : object The object to type-check. fqn_type_pattern : str or regex The fully-qualified type string or a regular expression. Regexes should start with `^` and end with `$`. Example ------- To check whether something is a Matplotlib Figure without importing matplotlib, use: >>> is_type(foo, 'matplotlib.figure.Figure') """ fqn_type = get_fqn_type(obj) if isinstance(fqn_type_pattern, str): return fqn_type_pattern == fqn_type else: return fqn_type_pattern.match(fqn_type) is not None
Get module.type_name for a given type.
def get_fqn(the_type: type) -> str: """Get module.type_name for a given type.""" return f"{the_type.__module__}.{the_type.__qualname__}"
Get module.type_name for a given object.
def get_fqn_type(obj: object) -> str: """Get module.type_name for a given object.""" return get_fqn(type(obj))
True if the object is one of the supported unevaluated data objects: Currently supported objects are: - Snowpark DataFrame / Table - PySpark DataFrame - Modin DataFrame / Series - Snowpandas DataFrame / Series Unevaluated means that the data is not yet in the local memory. Unevaluated data objects are treated differently from other data objects by only requesting a subset of the data instead of loading all data into th memory
def is_unevaluated_data_object(obj: object) -> bool: """True if the object is one of the supported unevaluated data objects: Currently supported objects are: - Snowpark DataFrame / Table - PySpark DataFrame - Modin DataFrame / Series - Snowpandas DataFrame / Series Unevaluated means that the data is not yet in the local memory. Unevaluated data objects are treated differently from other data objects by only requesting a subset of the data instead of loading all data into th memory """ return ( is_snowpark_data_object(obj) or is_pyspark_data_object(obj) or is_snowpandas_data_object(obj) or is_modin_data_object(obj) )
True if obj is a Snowpark DataFrame or Table.
def is_snowpark_data_object(obj: object) -> bool: """True if obj is a Snowpark DataFrame or Table.""" return is_type(obj, _SNOWPARK_TABLE_TYPE_STR) or is_type(obj, _SNOWPARK_DF_TYPE_STR)
True if obj is a list of snowflake.snowpark.row.Row.
def is_snowpark_row_list(obj: object) -> bool: """True if obj is a list of snowflake.snowpark.row.Row.""" if not isinstance(obj, list): return False if len(obj) < 1: return False if not hasattr(obj[0], "__class__"): return False return is_type(obj[0], _SNOWPARK_DF_ROW_TYPE_STR)
True if obj is of type pyspark.sql.dataframe.DataFrame
def is_pyspark_data_object(obj: object) -> bool: """True if obj is of type pyspark.sql.dataframe.DataFrame""" return ( is_type(obj, _PYSPARK_DF_TYPE_STR) and hasattr(obj, "toPandas") and callable(getattr(obj, "toPandas")) )
True if obj is of Modin Dataframe or Series
def is_modin_data_object(obj: object) -> bool: """True if obj is of Modin Dataframe or Series""" return is_type(obj, _MODIN_DF_TYPE_STR) or is_type(obj, _MODIN_SERIES_TYPE_STR)
True if obj is a Snowpark Pandas DataFrame or Series.
def is_snowpandas_data_object(obj: object) -> bool: """True if obj is a Snowpark Pandas DataFrame or Series.""" return is_type(obj, _SNOWPANDAS_DF_TYPE_STR) or is_type( obj, _SNOWPANDAS_SERIES_TYPE_STR )
True if type that can be passed to convert_anything_to_df.
def is_dataframe_compatible(obj: object) -> TypeGuard[DataFrameCompatible]: """True if type that can be passed to convert_anything_to_df.""" return is_dataframe_like(obj) or type(obj) in _DATAFRAME_COMPATIBLE_TYPES
True if the type is considered bytes-like for the purposes of protobuf data marshalling.
def is_bytes_like(obj: object) -> TypeGuard[BytesLike]: """True if the type is considered bytes-like for the purposes of protobuf data marshalling. """ return isinstance(obj, _BYTES_LIKE_TYPES)
Converts the given object to bytes. Only types for which `is_bytes_like` is true can be converted; anything else will result in an exception.
def to_bytes(obj: BytesLike) -> bytes: """Converts the given object to bytes. Only types for which `is_bytes_like` is true can be converted; anything else will result in an exception. """ if isinstance(obj, bytearray): return bytes(obj) elif isinstance(obj, bytes): return obj raise RuntimeError(f"{obj} is not convertible to bytes")
True if input is a SymPy expression.
def is_sympy_expession(obj: object) -> TypeGuard[sympy.Expr]: """True if input is a SymPy expression.""" if not is_type(obj, _SYMPY_RE): return False try: import sympy return isinstance(obj, sympy.Expr) except ImportError: return False
True if input looks like an Altair chart.
def is_altair_chart(obj: object) -> bool: """True if input looks like an Altair chart.""" return is_type(obj, _ALTAIR_RE)
True if input looks like a pillow image.
def is_pillow_image(obj: object) -> bool: """True if input looks like a pillow image.""" return is_type(obj, _PILLOW_RE)
True if input looks like a Keras model.
def is_keras_model(obj: object) -> bool: """True if input looks like a Keras model.""" return ( is_type(obj, "keras.engine.sequential.Sequential") or is_type(obj, "keras.engine.training.Model") or is_type(obj, "tensorflow.python.keras.engine.sequential.Sequential") or is_type(obj, "tensorflow.python.keras.engine.training.Model") )
True if input looks like an OpenAI chat completion chunk.
def is_openai_chunk(obj: object) -> bool: """True if input looks like an OpenAI chat completion chunk.""" return is_type(obj, _OPENAI_CHUNK_RE)
Check if the list only contains scalar values.
def is_list_of_scalars(data: Iterable[Any]) -> bool: """Check if the list only contains scalar values.""" from pandas.api.types import infer_dtype # Overview on all value that are interpreted as scalar: # https://pandas.pydata.org/docs/reference/api/pandas.api.types.is_scalar.html return infer_dtype(data, skipna=True) not in ["mixed", "unknown-array"]
True if input looks like a Plotly chart.
def is_plotly_chart(obj: object) -> TypeGuard[Figure | list[Any] | dict[str, Any]]: """True if input looks like a Plotly chart.""" return ( is_type(obj, "plotly.graph_objs._figure.Figure") or _is_list_of_plotly_objs(obj) or _is_probably_plotly_dict(obj) )
True if input looks like a GraphViz chart.
def is_graphviz_chart( obj: object, ) -> TypeGuard[graphviz.Graph | graphviz.Digraph]: """True if input looks like a GraphViz chart.""" return ( # GraphViz < 0.18 is_type(obj, "graphviz.dot.Graph") or is_type(obj, "graphviz.dot.Digraph") # GraphViz >= 0.18 or is_type(obj, "graphviz.graphs.Graph") or is_type(obj, "graphviz.graphs.Digraph") )
True if input if from a type that lives in plotly.plotly_objs.
def _is_plotly_obj(obj: object) -> bool: """True if input if from a type that lives in plotly.plotly_objs.""" the_type = type(obj) return the_type.__module__.startswith("plotly.graph_objs")
Return True if x is a function.
def is_function(x: object) -> TypeGuard[types.FunctionType]: """Return True if x is a function.""" return isinstance(x, types.FunctionType)
True if input looks like a pydeck chart.
def is_pydeck(obj: object) -> TypeGuard[Deck]: """True if input looks like a pydeck chart.""" return is_type(obj, "pydeck.bindings.deck.Deck")
True if input looks like a sequence.
def is_sequence(seq: Any) -> bool: """True if input looks like a sequence.""" if isinstance(seq, str): return False try: len(seq) except Exception: return False return True
Try to convert different formats to a Pandas Dataframe. Parameters ---------- data : ndarray, Iterable, dict, DataFrame, Styler, pa.Table, None, dict, list, or any max_unevaluated_rows: int If unevaluated data is detected this func will evaluate it, taking max_unevaluated_rows, defaults to 10k and 100 for st.table ensure_copy: bool If True, make sure to always return a copy of the data. If False, it depends on the type of the data. For example, a Pandas DataFrame will be returned as-is. allow_styler: bool If True, allows this to return a Pandas Styler object as well. If False, returns a plain Pandas DataFrame (which, of course, won't contain the Styler's styles). Returns ------- pandas.DataFrame or pandas.Styler
def convert_anything_to_df( data: Any, max_unevaluated_rows: int = MAX_UNEVALUATED_DF_ROWS, ensure_copy: bool = False, allow_styler: bool = False, ) -> DataFrame | Styler: """Try to convert different formats to a Pandas Dataframe. Parameters ---------- data : ndarray, Iterable, dict, DataFrame, Styler, pa.Table, None, dict, list, or any max_unevaluated_rows: int If unevaluated data is detected this func will evaluate it, taking max_unevaluated_rows, defaults to 10k and 100 for st.table ensure_copy: bool If True, make sure to always return a copy of the data. If False, it depends on the type of the data. For example, a Pandas DataFrame will be returned as-is. allow_styler: bool If True, allows this to return a Pandas Styler object as well. If False, returns a plain Pandas DataFrame (which, of course, won't contain the Styler's styles). Returns ------- pandas.DataFrame or pandas.Styler """ import pandas as pd if is_type(data, _PANDAS_DF_TYPE_STR): return data.copy() if ensure_copy else cast(pd.DataFrame, data) if is_pandas_styler(data): # Every Styler is a StyleRenderer. I'm casting to StyleRenderer here rather than to the more # correct Styler becayse MyPy doesn't like when we cast to Styler. It complains .data # doesn't exist, when it does in fact exist in the parent class StyleRenderer! sr = cast("StyleRenderer", data) if allow_styler: if ensure_copy: out = copy.deepcopy(sr) out.data = sr.data.copy() return cast("Styler", out) else: return data else: return cast("Styler", sr.data.copy() if ensure_copy else sr.data) if is_type(data, "numpy.ndarray"): if len(data.shape) == 0: return pd.DataFrame([]) return pd.DataFrame(data) if is_modin_data_object(data): data = data.head(max_unevaluated_rows)._to_pandas() if isinstance(data, pd.Series): data = data.to_frame() if data.shape[0] == max_unevaluated_rows: st.caption( f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} rows. " "Call `_to_pandas()` on the dataframe to show more." ) return cast(pd.DataFrame, data) if is_pyspark_data_object(data): data = data.limit(max_unevaluated_rows).toPandas() if data.shape[0] == max_unevaluated_rows: st.caption( f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} rows. " "Call `toPandas()` on the dataframe to show more." ) return cast(pd.DataFrame, data) if is_snowpark_data_object(data): data = data.limit(max_unevaluated_rows).to_pandas() if data.shape[0] == max_unevaluated_rows: st.caption( f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} rows. " "Call `to_pandas()` on the dataframe to show more." ) return cast(pd.DataFrame, data) if is_snowpandas_data_object(data): data = data.head(max_unevaluated_rows).to_pandas() if isinstance(data, pd.Series): data = data.to_frame() if data.shape[0] == max_unevaluated_rows: st.caption( f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} rows. " "Call `to_pandas()` on the dataframe to show more." ) return cast(pd.DataFrame, data) # This is inefficient when data is a pyarrow.Table as it will be converted # back to Arrow when marshalled to protobuf, but area/bar/line charts need # DataFrame magic to generate the correct output. if hasattr(data, "to_pandas"): return cast(pd.DataFrame, data.to_pandas()) # Try to convert to pandas.DataFrame. This will raise an error is df is not # compatible with the pandas.DataFrame constructor. try: return pd.DataFrame(data) except ValueError as ex: if isinstance(data, dict): with contextlib.suppress(ValueError): # Try to use index orient as back-up to support key-value dicts return pd.DataFrame.from_dict(data, orient="index") raise errors.StreamlitAPIException( f""" Unable to convert object of type `{type(data)}` to `pandas.DataFrame`. Offending object: ```py {data} ```""" ) from ex
Try to convert different formats to something iterable. Most inputs are assumed to be iterable, but if we have a DataFrame, we can just select the first column to iterate over. If the input is not iterable, a TypeError is raised. Parameters ---------- obj : list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame or snowflake.snowpark.table.Table Returns ------- iterable
def ensure_iterable(obj: OptionSequence[V_co] | Iterable[V_co]) -> Iterable[Any]: """Try to convert different formats to something iterable. Most inputs are assumed to be iterable, but if we have a DataFrame, we can just select the first column to iterate over. If the input is not iterable, a TypeError is raised. Parameters ---------- obj : list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame, pyspark.sql.DataFrame, snowflake.snowpark.dataframe.DataFrame or snowflake.snowpark.table.Table Returns ------- iterable """ if is_unevaluated_data_object(obj): obj = convert_anything_to_df(obj) if is_dataframe(obj): # Return first column as a pd.Series # The type of the elements in this column is not known up front, hence # the Iterable[Any] return type. return cast(Iterable[Any], obj.iloc[:, 0]) if is_iterable(obj): return obj raise TypeError( f"Object is not an iterable and could not be converted to one. Object: {obj}" )
Try to ensure a value is an indexable Sequence. If the collection already is one, it has the index method that we need. Otherwise, convert it to a list.
def ensure_indexable(obj: OptionSequence[V_co]) -> Sequence[V_co]: """Try to ensure a value is an indexable Sequence. If the collection already is one, it has the index method that we need. Otherwise, convert it to a list. """ it = ensure_iterable(obj) # This is an imperfect check because there is no guarantee that an `index` # function actually does the thing we want. index_fn = getattr(it, "index", None) if callable(index_fn): # We return a shallow copy of the Sequence here because the return value of # this function is saved in a widget serde class instance to be used in later # script runs, and we don't want mutations to the options object passed to a # widget affect the widget. # (See https://github.com/streamlit/streamlit/issues/7534) return copy.copy(cast(Sequence[V_co], it)) else: return list(it)
Check if the sequence elements support "python comparison". That means that the equality operator (==) returns a boolean value. Which is not True for e.g. numpy arrays and pandas series.
def check_python_comparable(seq: Sequence[Any]) -> None: """Check if the sequence elements support "python comparison". That means that the equality operator (==) returns a boolean value. Which is not True for e.g. numpy arrays and pandas series.""" try: bool(seq[0] == seq[0]) except LookupError: # In case of empty sequences, the check not raise an exception. pass except ValueError: raise StreamlitAPIException( "Invalid option type provided. Options must be comparable, returning a " f"boolean when used with *==*. \n\nGot **{type(seq[0]).__name__}**, " "which cannot be compared. Refactor your code to use elements of " "comparable types as options, e.g. use indices instead." )
Return True if the current Pandas version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool
def is_pandas_version_less_than(v: str) -> bool: """Return True if the current Pandas version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool """ import pandas as pd from packaging import version return version.parse(pd.__version__) < version.parse(v)
Return True if the current Pyarrow version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool
def is_pyarrow_version_less_than(v: str) -> bool: """Return True if the current Pyarrow version is less than the input version. Parameters ---------- v : str Version string, e.g. "0.25.0" Returns ------- bool """ import pyarrow as pa from packaging import version return version.parse(pa.__version__) < version.parse(v)
Experimental feature to automatically truncate tables that are larger than the maximum allowed message size. It needs to be enabled via the server.enableArrowTruncation config option. Parameters ---------- table : pyarrow.Table A table to truncate. truncated_rows : int or None The number of rows that have been truncated so far. This is used by the recursion logic to keep track of the total number of truncated rows.
def _maybe_truncate_table( table: pa.Table, truncated_rows: int | None = None ) -> pa.Table: """Experimental feature to automatically truncate tables that are larger than the maximum allowed message size. It needs to be enabled via the server.enableArrowTruncation config option. Parameters ---------- table : pyarrow.Table A table to truncate. truncated_rows : int or None The number of rows that have been truncated so far. This is used by the recursion logic to keep track of the total number of truncated rows. """ if config.get_option("server.enableArrowTruncation"): # This is an optimization problem: We don't know at what row # the perfect cut-off is to comply with the max size. But we want to figure # it out in as few iterations as possible. We almost always will cut out # more than required to keep the iterations low. # The maximum size allowed for protobuf messages in bytes: max_message_size = int(config.get_option("server.maxMessageSize") * 1e6) # We add 1 MB for other overhead related to the protobuf message. # This is a very conservative estimate, but it should be good enough. table_size = int(table.nbytes + 1 * 1e6) table_rows = table.num_rows if table_rows > 1 and table_size > max_message_size: # targeted rows == the number of rows the table should be truncated to. # Calculate an approximation of how many rows we need to truncate to. targeted_rows = math.ceil(table_rows * (max_message_size / table_size)) # Make sure to cut out at least a couple of rows to avoid running # this logic too often since it is quite inefficient and could lead # to infinity recursions without these precautions. targeted_rows = math.floor( max( min( # Cut out: # an additional 5% of the estimated num rows to cut out: targeted_rows - math.floor((table_rows - targeted_rows) * 0.05), # at least 1% of table size: table_rows - (table_rows * 0.01), # at least 5 rows: table_rows - 5, ), 1, # but it should always have at least 1 row ) ) sliced_table = table.slice(0, targeted_rows) return _maybe_truncate_table( sliced_table, (truncated_rows or 0) + (table_rows - targeted_rows) ) if truncated_rows: displayed_rows = string_util.simplify_number(table.num_rows) total_rows = string_util.simplify_number(table.num_rows + truncated_rows) if displayed_rows == total_rows: # If the simplified numbers are the same, # we just display the exact numbers. displayed_rows = str(table.num_rows) total_rows = str(table.num_rows + truncated_rows) st.caption( f"⚠️ Showing {displayed_rows} out of {total_rows} " "rows due to data size limitations." ) return table
Serialize pyarrow.Table to bytes using Apache Arrow. Parameters ---------- table : pyarrow.Table A table to convert.
def pyarrow_table_to_bytes(table: pa.Table) -> bytes: """Serialize pyarrow.Table to bytes using Apache Arrow. Parameters ---------- table : pyarrow.Table A table to convert. """ try: table = _maybe_truncate_table(table) except RecursionError as err: # This is a very unlikely edge case, but we want to make sure that # it doesn't lead to unexpected behavior. # If there is a recursion error, we just return the table as-is # which will lead to the normal message limit exceed error. _LOGGER.warning( "Recursion error while truncating Arrow table. This is not " "supposed to happen.", exc_info=err, ) import pyarrow as pa # Convert table to bytes sink = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(sink, table.schema) writer.write_table(table) writer.close() return cast(bytes, sink.getvalue().to_pybytes())
Return True if the column type is known to cause issues during Arrow conversion.
def is_colum_type_arrow_incompatible(column: Series[Any] | Index) -> bool: """Return True if the column type is known to cause issues during Arrow conversion.""" from pandas.api.types import infer_dtype, is_dict_like, is_list_like if column.dtype.kind in [ "c", # complex64, complex128, complex256 ]: return True if str(column.dtype) in { # These period types are not yet supported by our frontend impl. # See comments in Quiver.ts for more details. "period[B]", "period[N]", "period[ns]", "period[U]", "period[us]", }: return True if column.dtype == "object": # The dtype of mixed type columns is always object, the actual type of the column # values can be determined via the infer_dtype function: # https://pandas.pydata.org/docs/reference/api/pandas.api.types.infer_dtype.html inferred_type = infer_dtype(column, skipna=True) if inferred_type in [ "mixed-integer", "complex", ]: return True elif inferred_type == "mixed": # This includes most of the more complex/custom types (objects, dicts, lists, ...) if len(column) == 0 or not hasattr(column, "iloc"): # The column seems to be invalid, so we assume it is incompatible. # But this would most likely never happen since empty columns # cannot be mixed. return True # Get the first value to check if it is a supported list-like type. first_value = column.iloc[0] if ( not is_list_like(first_value) # dicts are list-like, but have issues in Arrow JS (see comments in Quiver.ts) or is_dict_like(first_value) # Frozensets are list-like, but are not compatible with pyarrow. or isinstance(first_value, frozenset) ): # This seems to be an incompatible list-like type return True return False # We did not detect an incompatible type, so we assume it is compatible: return False
Fix column types that are not supported by Arrow table. This includes mixed types (e.g. mix of integers and strings) as well as complex numbers (complex128 type). These types will cause errors during conversion of the dataframe to an Arrow table. It is fixed by converting all values of the column to strings This is sufficient for displaying the data on the frontend. Parameters ---------- df : pandas.DataFrame A dataframe to fix. selected_columns: List[str] or None A list of columns to fix. If None, all columns are evaluated. Returns ------- The fixed dataframe.
def fix_arrow_incompatible_column_types( df: DataFrame, selected_columns: list[str] | None = None ) -> DataFrame: """Fix column types that are not supported by Arrow table. This includes mixed types (e.g. mix of integers and strings) as well as complex numbers (complex128 type). These types will cause errors during conversion of the dataframe to an Arrow table. It is fixed by converting all values of the column to strings This is sufficient for displaying the data on the frontend. Parameters ---------- df : pandas.DataFrame A dataframe to fix. selected_columns: List[str] or None A list of columns to fix. If None, all columns are evaluated. Returns ------- The fixed dataframe. """ import pandas as pd # Make a copy, but only initialize if necessary to preserve memory. df_copy: DataFrame | None = None for col in selected_columns or df.columns: if is_colum_type_arrow_incompatible(df[col]): if df_copy is None: df_copy = df.copy() df_copy[col] = df[col].astype("string") # The index can also contain mixed types # causing Arrow issues during conversion. # Skipping multi-indices since they won't return # the correct value from infer_dtype if not selected_columns and ( not isinstance( df.index, pd.MultiIndex, ) and is_colum_type_arrow_incompatible(df.index) ): if df_copy is None: df_copy = df.copy() df_copy.index = df.index.astype("string") return df_copy if df_copy is not None else df
Serialize pandas.DataFrame to bytes using Apache Arrow. Parameters ---------- df : pandas.DataFrame A dataframe to convert.
def data_frame_to_bytes(df: DataFrame) -> bytes: """Serialize pandas.DataFrame to bytes using Apache Arrow. Parameters ---------- df : pandas.DataFrame A dataframe to convert. """ import pyarrow as pa try: table = pa.Table.from_pandas(df) except (pa.ArrowTypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError) as ex: _LOGGER.info( "Serialization of dataframe to Arrow table was unsuccessful due to: %s. " "Applying automatic fixes for column types to make the dataframe Arrow-compatible.", ex, ) df = fix_arrow_incompatible_column_types(df) table = pa.Table.from_pandas(df) return pyarrow_table_to_bytes(table)
Convert bytes to pandas.DataFrame. Using this function in production needs to make sure that the pyarrow version >= 14.0.1. Parameters ---------- source : bytes A bytes object to convert.
def bytes_to_data_frame(source: bytes) -> DataFrame: """Convert bytes to pandas.DataFrame. Using this function in production needs to make sure that the pyarrow version >= 14.0.1. Parameters ---------- source : bytes A bytes object to convert. """ import pyarrow as pa reader = pa.RecordBatchStreamReader(source) return reader.read_pandas()
Determine the data format of the input data. Parameters ---------- input_data : Any The input data to determine the data format of. Returns ------- DataFormat The data format of the input data.
def determine_data_format(input_data: Any) -> DataFormat: """Determine the data format of the input data. Parameters ---------- input_data : Any The input data to determine the data format of. Returns ------- DataFormat The data format of the input data. """ import numpy as np import pandas as pd import pyarrow as pa if input_data is None: return DataFormat.EMPTY elif isinstance(input_data, pd.DataFrame): return DataFormat.PANDAS_DATAFRAME elif isinstance(input_data, np.ndarray): if len(input_data.shape) == 1: # For technical reasons, we need to distinguish one # one-dimensional numpy array from multidimensional ones. return DataFormat.NUMPY_LIST return DataFormat.NUMPY_MATRIX elif isinstance(input_data, pa.Table): return DataFormat.PYARROW_TABLE elif isinstance(input_data, pd.Series): return DataFormat.PANDAS_SERIES elif isinstance(input_data, pd.Index): return DataFormat.PANDAS_INDEX elif is_pandas_styler(input_data): return DataFormat.PANDAS_STYLER elif is_snowpark_data_object(input_data): return DataFormat.SNOWPARK_OBJECT elif is_modin_data_object(input_data): return DataFormat.MODIN_OBJECT elif is_snowpandas_data_object(input_data): return DataFormat.SNOWPANDAS_OBJECT elif is_pyspark_data_object(input_data): return DataFormat.PYSPARK_OBJECT elif isinstance(input_data, (list, tuple, set)): if is_list_of_scalars(input_data): # -> one-dimensional data structure if isinstance(input_data, tuple): return DataFormat.TUPLE_OF_VALUES if isinstance(input_data, set): return DataFormat.SET_OF_VALUES return DataFormat.LIST_OF_VALUES else: # -> Multi-dimensional data structure # This should always contain at least one element, # otherwise the values type from infer_dtype would have been empty first_element = next(iter(input_data)) if isinstance(first_element, dict): return DataFormat.LIST_OF_RECORDS if isinstance(first_element, (list, tuple, set)): return DataFormat.LIST_OF_ROWS elif isinstance(input_data, dict): if not input_data: return DataFormat.KEY_VALUE_DICT if len(input_data) > 0: first_value = next(iter(input_data.values())) if isinstance(first_value, dict): return DataFormat.COLUMN_INDEX_MAPPING if isinstance(first_value, (list, tuple)): return DataFormat.COLUMN_VALUE_MAPPING if isinstance(first_value, pd.Series): return DataFormat.COLUMN_SERIES_MAPPING # In the future, we could potentially also support the tight & split formats here if is_list_of_scalars(input_data.values()): # Only use the key-value dict format if the values are only scalar values return DataFormat.KEY_VALUE_DICT return DataFormat.UNKNOWN
Unify all missing values in a DataFrame to None. Pandas uses a variety of values to represent missing values, including np.nan, NaT, None, and pd.NA. This function replaces all of these values with None, which is the only missing value type that is supported by all data
def _unify_missing_values(df: DataFrame) -> DataFrame: """Unify all missing values in a DataFrame to None. Pandas uses a variety of values to represent missing values, including np.nan, NaT, None, and pd.NA. This function replaces all of these values with None, which is the only missing value type that is supported by all data """ import numpy as np return df.fillna(np.nan).replace([np.nan], [None])
Convert a dataframe to the specified data format. Parameters ---------- df : pd.DataFrame The dataframe to convert. data_format : DataFormat The data format to convert to. Returns ------- pd.DataFrame, pd.Series, pyarrow.Table, np.ndarray, list, set, tuple, or dict. The converted dataframe.
def convert_df_to_data_format( df: DataFrame, data_format: DataFormat ) -> ( DataFrame | Series[Any] | pa.Table | np.ndarray[Any, np.dtype[Any]] | tuple[Any] | list[Any] | set[Any] | dict[str, Any] ): """Convert a dataframe to the specified data format. Parameters ---------- df : pd.DataFrame The dataframe to convert. data_format : DataFormat The data format to convert to. Returns ------- pd.DataFrame, pd.Series, pyarrow.Table, np.ndarray, list, set, tuple, or dict. The converted dataframe. """ if data_format in [ DataFormat.EMPTY, DataFormat.PANDAS_DATAFRAME, DataFormat.SNOWPARK_OBJECT, DataFormat.PYSPARK_OBJECT, DataFormat.PANDAS_INDEX, DataFormat.PANDAS_STYLER, DataFormat.MODIN_OBJECT, DataFormat.SNOWPANDAS_OBJECT, ]: return df elif data_format == DataFormat.NUMPY_LIST: import numpy as np # It's a 1-dimensional array, so we only return # the first column as numpy array # Calling to_numpy() on the full DataFrame would result in: # [[1], [2]] instead of [1, 2] return np.ndarray(0) if df.empty else df.iloc[:, 0].to_numpy() elif data_format == DataFormat.NUMPY_MATRIX: import numpy as np return np.ndarray(0) if df.empty else df.to_numpy() elif data_format == DataFormat.PYARROW_TABLE: import pyarrow as pa return pa.Table.from_pandas(df) elif data_format == DataFormat.PANDAS_SERIES: # Select first column in dataframe and create a new series based on the values if len(df.columns) != 1: raise ValueError( f"DataFrame is expected to have a single column but has {len(df.columns)}." ) return df[df.columns[0]] elif data_format == DataFormat.LIST_OF_RECORDS: return _unify_missing_values(df).to_dict(orient="records") elif data_format == DataFormat.LIST_OF_ROWS: # to_numpy converts the dataframe to a list of rows return _unify_missing_values(df).to_numpy().tolist() elif data_format == DataFormat.COLUMN_INDEX_MAPPING: return _unify_missing_values(df).to_dict(orient="dict") elif data_format == DataFormat.COLUMN_VALUE_MAPPING: return _unify_missing_values(df).to_dict(orient="list") elif data_format == DataFormat.COLUMN_SERIES_MAPPING: return df.to_dict(orient="series") elif data_format in [ DataFormat.LIST_OF_VALUES, DataFormat.TUPLE_OF_VALUES, DataFormat.SET_OF_VALUES, ]: df = _unify_missing_values(df) return_list = [] if len(df.columns) == 1: # Get the first column and convert to list return_list = df[df.columns[0]].tolist() elif len(df.columns) >= 1: raise ValueError( f"DataFrame is expected to have a single column but has {len(df.columns)}." ) if data_format == DataFormat.TUPLE_OF_VALUES: return tuple(return_list) if data_format == DataFormat.SET_OF_VALUES: return set(return_list) return return_list elif data_format == DataFormat.KEY_VALUE_DICT: df = _unify_missing_values(df) # The key is expected to be the index -> this will return the first column # as a dict with index as key. return dict() if df.empty else df.iloc[:, 0].to_dict() raise ValueError(f"Unsupported input data format: {data_format}")
Convert a tuple to a list. Leave as is if it's not a tuple.
def maybe_tuple_to_list(item: Any) -> Any: """Convert a tuple to a list. Leave as is if it's not a tuple.""" return list(item) if isinstance(item, tuple) else item
From an array-like input, infer the correct vega typecode ('ordinal', 'nominal', 'quantitative', or 'temporal') Parameters ---------- data: Numpy array or Pandas Series
def infer_vegalite_type( data: Series[Any], ) -> VegaLiteType: """ From an array-like input, infer the correct vega typecode ('ordinal', 'nominal', 'quantitative', or 'temporal') Parameters ---------- data: Numpy array or Pandas Series """ from pandas.api.types import infer_dtype # STREAMLIT MOD: I'm using infer_dtype directly here, rather than using Altair's wrapper. Their # wrapper is only there to support Pandas < 0.20, but Streamlit requires Pandas 1.3. typ = infer_dtype(data) if typ in [ "floating", "mixed-integer-float", "integer", "mixed-integer", "complex", ]: return "quantitative" elif typ == "categorical" and data.cat.ordered: # STREAMLIT MOD: The original code returns a tuple here: # return ("ordinal", data.cat.categories.tolist()) # But returning the tuple here isn't compatible with our # built-in chart implementation. And it also doesn't seem to be necessary. # Altair already extracts the correct sort order somewhere else. # More info about the issue here: https://github.com/streamlit/streamlit/issues/7776 return "ordinal" elif typ in ["string", "bytes", "categorical", "boolean", "mixed", "unicode"]: return "nominal" elif typ in [ "datetime", "datetime64", "timedelta", "timedelta64", "date", "time", "period", ]: return "temporal" else: # STREAMLIT MOD: I commented this out since Streamlit doesn't have a warnings object. # warnings.warn( # "I don't know how to infer vegalite type from '{}'. " # "Defaulting to nominal.".format(typ), # stacklevel=1, # ) return "nominal"
Attempt to coerce an Enum value to another EnumMeta. An Enum value of EnumMeta E1 is considered coercable to EnumType E2 if the EnumMeta __qualname__ match and the names of their members match as well. (This is configurable in streamlist configs)
def coerce_enum(from_enum_value: E1, to_enum_class: type[E2]) -> E1 | E2: """Attempt to coerce an Enum value to another EnumMeta. An Enum value of EnumMeta E1 is considered coercable to EnumType E2 if the EnumMeta __qualname__ match and the names of their members match as well. (This is configurable in streamlist configs) """ if not isinstance(from_enum_value, Enum): raise ValueError( f"Expected an Enum in the first argument. Got {type(from_enum_value)}" ) if not isinstance(to_enum_class, EnumMeta): raise ValueError( f"Expected an EnumMeta/Type in the second argument. Got {type(to_enum_class)}" ) if isinstance(from_enum_value, to_enum_class): return from_enum_value # Enum is already a member, no coersion necessary coercion_type = config.get_option("runner.enumCoercion") if coercion_type not in ALLOWED_ENUM_COERCION_CONFIG_SETTINGS: raise errors.StreamlitAPIException( "Invalid value for config option runner.enumCoercion. " f"Expected one of {ALLOWED_ENUM_COERCION_CONFIG_SETTINGS}, " f"but got '{coercion_type}'." ) if coercion_type == "off": return from_enum_value # do not attempt to coerce # We now know this is an Enum AND the user has configured coercion enabled. # Check if we do NOT meet the required conditions and log a failure message # if that is the case. from_enum_class = from_enum_value.__class__ if ( from_enum_class.__qualname__ != to_enum_class.__qualname__ or ( coercion_type == "nameOnly" and set(to_enum_class._member_names_) != set(from_enum_class._member_names_) ) or ( coercion_type == "nameAndValue" and set(to_enum_class._value2member_map_) != set(from_enum_class._value2member_map_) ) ): _LOGGER.debug("Failed to coerce %s to class %s", from_enum_value, to_enum_class) return from_enum_value # do not attempt to coerce # At this point we think the Enum is coercable, and we know # E1 and E2 have the same member names. We convert from E1 to E2 using _name_ # (since user Enum subclasses can override the .name property in 3.11) _LOGGER.debug("Coerced %s to class %s", from_enum_value, to_enum_class) return to_enum_class[from_enum_value._name_]
Check url to see if it describes a GitHub Gist "blob" URL. If so, returns a new URL to get the "raw" script. If not, returns URL unchanged.
def process_gitblob_url(url: str) -> str: """Check url to see if it describes a GitHub Gist "blob" URL. If so, returns a new URL to get the "raw" script. If not, returns URL unchanged. """ # Matches github.com and gist.github.com. Will not match githubusercontent.com. # See this regex with explainer and sample text here: https://regexr.com/4odk3 match = _GITBLOB_RE.match(url) if match: mdict = match.groupdict() # If it has "blob" in the url, replace this with "raw" and we're done. if mdict["blob_or_raw"] == "blob": return "{base}{account}raw{suffix}".format(**mdict) # If it is a "raw" url already, return untouched. if mdict["blob_or_raw"] == "raw": return url # It's a gist. Just tack "raw" on the end. return url + "/raw" return url
Return the hostname of a URL (with or without protocol).
def get_hostname(url: str) -> str | None: """Return the hostname of a URL (with or without protocol).""" # Just so urllib can parse the URL, make sure there's a protocol. # (The actual protocol doesn't matter to us) if "://" not in url: url = f"http://{url}" parsed = urlparse(url) return parsed.hostname
Check if a string looks like an URL. This doesn't check if the URL is actually valid or reachable. Parameters ---------- url : str The URL to check. allowed_schemas : Tuple[str] The allowed URL schemas. Default is ("http", "https").
def is_url( url: str, allowed_schemas: tuple[UrlSchema, ...] = ("http", "https"), ) -> bool: """Check if a string looks like an URL. This doesn't check if the URL is actually valid or reachable. Parameters ---------- url : str The URL to check. allowed_schemas : Tuple[str] The allowed URL schemas. Default is ("http", "https"). """ try: result = urlparse(str(url)) if result.scheme not in allowed_schemas: return False if result.scheme in ["http", "https"]: return bool(result.netloc) elif result.scheme in ["mailto", "data"]: return bool(result.path) except ValueError: return False return False
Decorator to memoize the result of a no-args func.
def memoize(func: Callable[..., Any]) -> Callable[..., Any]: """Decorator to memoize the result of a no-args func.""" result: list[Any] = [] @functools.wraps(func) def wrapped_func(): if not result: result.append(func()) return result[0] return wrapped_func
Open a web browser pointing to a given URL. We use this function instead of Python's `webbrowser` module because this way we can capture stdout/stderr to avoid polluting the terminal with the browser's messages. For example, Chrome always prints things like "Created new window in existing browser session", and those get on the user's way. url : str The URL. Must include the protocol.
def open_browser(url: str) -> None: """Open a web browser pointing to a given URL. We use this function instead of Python's `webbrowser` module because this way we can capture stdout/stderr to avoid polluting the terminal with the browser's messages. For example, Chrome always prints things like "Created new window in existing browser session", and those get on the user's way. url : str The URL. Must include the protocol. """ # Treat Windows separately because: # 1. /dev/null doesn't exist. # 2. subprocess.Popen(['start', url]) doesn't actually pop up the # browser even though 'start url' works from the command prompt. # Fun! # Also, use webbrowser if we are on Linux and xdg-open is not installed. # # We don't use the webbrowser module on Linux and Mac because some browsers # (ahem... Chrome) always print "Opening in existing browser session" to # the terminal, which is spammy and annoying. So instead we start the # browser ourselves and send all its output to /dev/null. if env_util.IS_WINDOWS: _open_browser_with_webbrowser(url) return if env_util.IS_LINUX_OR_BSD: if env_util.is_executable_in_path("xdg-open"): _open_browser_with_command("xdg-open", url) return _open_browser_with_webbrowser(url) return if env_util.IS_DARWIN: _open_browser_with_command("open", url) return import platform raise Error('Cannot open browser in platform "%s"' % platform.system())
A clean repr for a class, excluding both values that are likely defaults, and those explicitly default for dataclasses.
def repr_(self: Any) -> str: """A clean repr for a class, excluding both values that are likely defaults, and those explicitly default for dataclasses. """ classname = self.__class__.__name__ # Most of the falsey value, but excluding 0 and 0.0, since those often have # semantic meaning within streamlit. defaults: list[Any] = [None, "", False, [], set(), dict()] if dataclasses.is_dataclass(self): fields_vals = ( (f.name, getattr(self, f.name)) for f in dataclasses.fields(self) if f.repr and getattr(self, f.name) != f.default and getattr(self, f.name) not in defaults ) else: fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults) field_reprs = ", ".join(f"{field}={value!r}" for field, value in fields_vals) return f"{classname}({field_reprs})"
Return zero-based index of the first item whose value is equal to x. Raises a ValueError if there is no such item. We need a custom implementation instead of the built-in list .index() to be compatible with NumPy array and Pandas Series. Parameters ---------- iterable : list, tuple, numpy.ndarray, pandas.Series x : Any Returns ------- int
def index_(iterable: Iterable[_Value], x: _Value) -> int: """Return zero-based index of the first item whose value is equal to x. Raises a ValueError if there is no such item. We need a custom implementation instead of the built-in list .index() to be compatible with NumPy array and Pandas Series. Parameters ---------- iterable : list, tuple, numpy.ndarray, pandas.Series x : Any Returns ------- int """ for i, value in enumerate(iterable): if x == value: return i elif isinstance(value, float) and isinstance(x, float): if abs(x - value) < FLOAT_EQUALITY_EPSILON: return i raise ValueError(f"{str(x)} is not in iterable")
Return the md5 hash of the given string.
def calc_md5(s: bytes | str) -> str: """Return the md5 hash of the given string.""" h = hashlib.new("md5", **HASHLIB_KWARGS) b = s.encode("utf-8") if isinstance(s, str) else s h.update(b) return h.hexdigest()
Returns new object but without keys defined in keys_to_exclude
def exclude_keys_in_dict( d: dict[str, Any], keys_to_exclude: list[str] ) -> dict[str, Any]: """Returns new object but without keys defined in keys_to_exclude""" return { key: value for key, value in d.items() if key.lower() not in keys_to_exclude }
Extracts key (case-insensitive) query params from Dict, and returns them as Set of str.
def extract_key_query_params( query_params: dict[str, list[str]], param_key: str ) -> set[str]: """Extracts key (case-insensitive) query params from Dict, and returns them as Set of str.""" return { item.lower() for sublist in [ [value.lower() for value in query_params[key]] for key in query_params.keys() if key.lower() == param_key and query_params.get(key) ] for item in sublist }
Return the streamlit version string from setup.py. Returns ------- str The version string specified in setup.py.
def _get_installed_streamlit_version() -> Version: """Return the streamlit version string from setup.py. Returns ------- str The version string specified in setup.py. """ return _version_str_to_obj(STREAMLIT_VERSION_STRING)
Request the latest streamlit version string from PyPI. NB: this involves a network call, so it could raise an error or take a long time. Parameters ---------- timeout : float or None The request timeout. Returns ------- str The version string for the latest version of streamlit on PyPI.
def _get_latest_streamlit_version(timeout: float | None = None) -> Version: """Request the latest streamlit version string from PyPI. NB: this involves a network call, so it could raise an error or take a long time. Parameters ---------- timeout : float or None The request timeout. Returns ------- str The version string for the latest version of streamlit on PyPI. """ import requests rsp = requests.get(PYPI_STREAMLIT_URL, timeout=timeout) try: version_str = rsp.json()["info"]["version"] except Exception as e: raise RuntimeError("Got unexpected response from PyPI", e) return _version_str_to_obj(version_str)
True if streamlit should show a 'new version!' notice to the user. We need to make a network call to PyPI to determine the latest streamlit version. Since we don't want to do this every time streamlit is run, we'll only perform the check ~5% of the time. If we do make the request to PyPI and there's any sort of error, we log it and return False. Returns ------- bool True if we should tell the user that their streamlit is out of date.
def should_show_new_version_notice() -> bool: """True if streamlit should show a 'new version!' notice to the user. We need to make a network call to PyPI to determine the latest streamlit version. Since we don't want to do this every time streamlit is run, we'll only perform the check ~5% of the time. If we do make the request to PyPI and there's any sort of error, we log it and return False. Returns ------- bool True if we should tell the user that their streamlit is out of date. """ if random.random() >= CHECK_PYPI_PROBABILITY: # We don't check PyPI every time this function is called. _LOGGER.debug("Skipping PyPI version check") return False try: installed_version = _get_installed_streamlit_version() latest_version = _get_latest_streamlit_version(timeout=1) except Exception as ex: # Log this as a debug. We don't care if the user sees it. _LOGGER.debug("Failed PyPI version check.", exc_info=ex) return False return latest_version > installed_version
Stops execution immediately. Streamlit will not run any statements after `st.stop()`. We recommend rendering a message to explain why the script has stopped. Example ------- >>> import streamlit as st >>> >>> name = st.text_input('Name') >>> if not name: >>> st.warning('Please input a name.') >>> st.stop() >>> st.success('Thank you for inputting a name.')
def stop() -> NoReturn: # type: ignore[misc] """Stops execution immediately. Streamlit will not run any statements after `st.stop()`. We recommend rendering a message to explain why the script has stopped. Example ------- >>> import streamlit as st >>> >>> name = st.text_input('Name') >>> if not name: >>> st.warning('Please input a name.') >>> st.stop() >>> st.success('Thank you for inputting a name.') """ ctx = get_script_run_ctx() if ctx and ctx.script_requests: ctx.script_requests.request_stop() # Force a yield point so the runner can stop st.empty()
Rerun the script immediately. When ``st.rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top.
def rerun() -> NoReturn: # type: ignore[misc] """Rerun the script immediately. When ``st.rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top. """ ctx = get_script_run_ctx() if ctx and ctx.script_requests: query_string = ctx.query_string page_script_hash = ctx.page_script_hash ctx.script_requests.request_rerun( RerunData( query_string=query_string, page_script_hash=page_script_hash, ) ) # Force a yield point so the runner can do the rerun st.empty()
Rerun the script immediately. When ``st.experimental_rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top.
def experimental_rerun() -> NoReturn: """Rerun the script immediately. When ``st.experimental_rerun()`` is called, the script is halted - no more statements will be run, and the script will be queued to re-run from the top. """ msg = make_deprecated_name_warning("experimental_rerun", "rerun", "2024-04-01") # Log warning before the rerun, or else it would be interrupted # by the rerun. We do not send a frontend warning because it wouldn't # be seen. _LOGGER.warning(msg) rerun()
Programmatically switch the current page in a multipage app. When ``st.switch_page`` is called, the current page execution stops and the specified page runs as if the user clicked on it in the sidebar navigation. The specified page must be recognized by Streamlit's multipage architecture (your main Python file or a Python file in a ``pages/`` folder). Arbitrary Python scripts cannot be passed to ``st.switch_page``. Parameters ---------- page: str The file path (relative to the main script) of the page to switch to. Example ------- Consider the following example given this file structure: >>> your-repository/ >>> β”œβ”€β”€ pages/ >>> β”‚ β”œβ”€β”€ page_1.py >>> β”‚ └── page_2.py >>> └── your_app.py >>> import streamlit as st >>> >>> if st.button("Home"): >>> st.switch_page("your_app.py") >>> if st.button("Page 1"): >>> st.switch_page("pages/page_1.py") >>> if st.button("Page 2"): >>> st.switch_page("pages/page_2.py") .. output :: https://doc-switch-page.streamlit.app/ height: 350px
def switch_page(page: str) -> NoReturn: # type: ignore[misc] """Programmatically switch the current page in a multipage app. When ``st.switch_page`` is called, the current page execution stops and the specified page runs as if the user clicked on it in the sidebar navigation. The specified page must be recognized by Streamlit's multipage architecture (your main Python file or a Python file in a ``pages/`` folder). Arbitrary Python scripts cannot be passed to ``st.switch_page``. Parameters ---------- page: str The file path (relative to the main script) of the page to switch to. Example ------- Consider the following example given this file structure: >>> your-repository/ >>> β”œβ”€β”€ pages/ >>> β”‚ β”œβ”€β”€ page_1.py >>> β”‚ └── page_2.py >>> └── your_app.py >>> import streamlit as st >>> >>> if st.button("Home"): >>> st.switch_page("your_app.py") >>> if st.button("Page 1"): >>> st.switch_page("pages/page_1.py") >>> if st.button("Page 2"): >>> st.switch_page("pages/page_2.py") .. output :: https://doc-switch-page.streamlit.app/ height: 350px """ ctx = get_script_run_ctx() if not ctx or not ctx.script_requests: # This should never be the case raise NoSessionContext() main_script_directory = get_main_script_directory(ctx.main_script_path) requested_page = os.path.realpath(normalize_path_join(main_script_directory, page)) all_app_pages = source_util.get_pages(ctx.main_script_path).values() matched_pages = [p for p in all_app_pages if p["script_path"] == requested_page] if len(matched_pages) == 0: raise StreamlitAPIException( f"Could not find page: `{page}`. Must be the file path relative to the main script, from the directory: `{os.path.basename(main_script_directory)}`. Only the main app file and files in the `pages/` directory are supported." ) ctx.script_requests.request_rerun( RerunData( query_string=ctx.query_string, page_script_hash=matched_pages[0]["page_script_hash"], ) ) # Force a yield point so the runner can do the rerun st.empty()
Return the query parameters that is currently showing in the browser's URL bar. Returns ------- dict The current query parameters as a dict. "Query parameters" are the part of the URL that comes after the first "?". Example ------- Let's say the user's web browser is at `http://localhost:8501/?show_map=True&selected=asia&selected=america`. Then, you can get the query parameters using the following: >>> import streamlit as st >>> >>> st.experimental_get_query_params() {"show_map": ["True"], "selected": ["asia", "america"]} Note that the values in the returned dict are *always* lists. This is because we internally use Python's urllib.parse.parse_qs(), which behaves this way. And this behavior makes sense when you consider that every item in a query string is potentially a 1-element array.
def get_query_params() -> dict[str, list[str]]: """Return the query parameters that is currently showing in the browser's URL bar. Returns ------- dict The current query parameters as a dict. "Query parameters" are the part of the URL that comes after the first "?". Example ------- Let's say the user's web browser is at `http://localhost:8501/?show_map=True&selected=asia&selected=america`. Then, you can get the query parameters using the following: >>> import streamlit as st >>> >>> st.experimental_get_query_params() {"show_map": ["True"], "selected": ["asia", "america"]} Note that the values in the returned dict are *always* lists. This is because we internally use Python's urllib.parse.parse_qs(), which behaves this way. And this behavior makes sense when you consider that every item in a query string is potentially a 1-element array. """ ctx = get_script_run_ctx() if ctx is None: return {} ctx.mark_experimental_query_params_used() # Return new query params dict, but without embed, embed_options query params return util.exclude_keys_in_dict( parse.parse_qs(ctx.query_string, keep_blank_values=True), keys_to_exclude=EMBED_QUERY_PARAMS_KEYS, )
Set the query parameters that are shown in the browser's URL bar. .. warning:: Query param `embed` cannot be set using this method. Parameters ---------- **query_params : dict The query parameters to set, as key-value pairs. Example ------- To point the user's web browser to something like "http://localhost:8501/?show_map=True&selected=asia&selected=america", you would do the following: >>> import streamlit as st >>> >>> st.experimental_set_query_params( ... show_map=True, ... selected=["asia", "america"], ... )
def set_query_params(**query_params: Any) -> None: """Set the query parameters that are shown in the browser's URL bar. .. warning:: Query param `embed` cannot be set using this method. Parameters ---------- **query_params : dict The query parameters to set, as key-value pairs. Example ------- To point the user's web browser to something like "http://localhost:8501/?show_map=True&selected=asia&selected=america", you would do the following: >>> import streamlit as st >>> >>> st.experimental_set_query_params( ... show_map=True, ... selected=["asia", "america"], ... ) """ ctx = get_script_run_ctx() if ctx is None: return ctx.mark_experimental_query_params_used() msg = ForwardMsg() msg.page_info_changed.query_string = _ensure_no_embed_params( query_params, ctx.query_string ) ctx.query_string = msg.page_info_changed.query_string ctx.enqueue(msg)
Ensures there are no embed params set (raises StreamlitAPIException) if there is a try, also makes sure old param values in query_string are preserved. Returns query_string : str.
def _ensure_no_embed_params( query_params: dict[str, list[str] | str], query_string: str ) -> str: """Ensures there are no embed params set (raises StreamlitAPIException) if there is a try, also makes sure old param values in query_string are preserved. Returns query_string : str. """ # Get query params dict without embed, embed_options params query_params_without_embed = util.exclude_keys_in_dict( query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS ) if query_params != query_params_without_embed: raise StreamlitAPIException( "Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method." ) all_current_params = parse.parse_qs(query_string, keep_blank_values=True) current_embed_params = parse.urlencode( { EMBED_QUERY_PARAM: [ param for param in util.extract_key_query_params( all_current_params, param_key=EMBED_QUERY_PARAM ) ], EMBED_OPTIONS_QUERY_PARAM: [ param for param in util.extract_key_query_params( all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM ) ], }, doseq=True, ) query_string = parse.urlencode(query_params, doseq=True) if query_string: separator = "&" if current_embed_params else "" return separator.join([query_string, current_embed_params]) return current_embed_params
Return the string to pass to the frontend to have it show the given PageIcon. If page_icon is a string that looks like an emoji (or an emoji shortcode), we return it as-is. Otherwise we use `image_to_url` to return a URL. (If `image_to_url` raises an error and page_icon is a string, return the unmodified page_icon string instead of re-raising the error.)
def _get_favicon_string(page_icon: PageIcon) -> str: """Return the string to pass to the frontend to have it show the given PageIcon. If page_icon is a string that looks like an emoji (or an emoji shortcode), we return it as-is. Otherwise we use `image_to_url` to return a URL. (If `image_to_url` raises an error and page_icon is a string, return the unmodified page_icon string instead of re-raising the error.) """ # Choose a random emoji. if page_icon == "random": return get_random_emoji() # If page_icon is an emoji, return it as is. if isinstance(page_icon, str) and is_emoji(page_icon): return page_icon if isinstance(page_icon, str) and page_icon.startswith(":material"): return validate_material_icon(page_icon) # Fall back to image_to_url. try: return image.image_to_url( page_icon, width=-1, # Always use full width for favicons clamp=False, channels="RGB", output_format="auto", image_id="favicon", ) except Exception: if isinstance(page_icon, str): # This fall-thru handles emoji shortcode strings (e.g. ":shark:"), # which aren't valid filenames and so will cause an Exception from # `image_to_url`. return page_icon raise