response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Fetch remote file at url_path to main_script_path
def _download_remote(main_script_path: str, url_path: str) -> None: """Fetch remote file at url_path to main_script_path""" import requests with open(main_script_path, "wb") as fp: try: resp = requests.get(url_path) resp.raise_for_status() fp.write(resp.content) except requests.exceptions.RequestException as e: raise click.BadParameter(f"Unable to fetch {url_path}.\n{e}")
Try out a demo with: $ streamlit hello Or use the line below to run your own script: $ streamlit run your_script.py
def main(log_level="info"): """Try out a demo with: $ streamlit hello Or use the line below to run your own script: $ streamlit run your_script.py """ if log_level: from streamlit.logger import get_logger LOGGER = get_logger(__name__) LOGGER.warning( "Setting the log level using the --log_level flag is unsupported." "\nUse the --logger.level flag (after your streamlit command) instead." )
Print this help message.
def help(): """Print this help message.""" # We use _get_command_line_as_string to run some error checks but don't do # anything with its return value. _get_command_line_as_string() assert len(sys.argv) == 2 # This is always true, but let's assert anyway. # Pretend user typed 'streamlit --help' instead of 'streamlit help'. sys.argv[1] = "--help" main(prog_name="streamlit")
Print Streamlit's version number.
def main_version(): """Print Streamlit's version number.""" # Pretend user typed 'streamlit --version' instead of 'streamlit version' import sys # We use _get_command_line_as_string to run some error checks but don't do # anything with its return value. _get_command_line_as_string() assert len(sys.argv) == 2 # This is always true, but let's assert anyway. sys.argv[1] = "--version" main()
Show help in browser.
def main_docs(): """Show help in browser.""" print("Showing help page in browser...") from streamlit import util util.open_browser("https://docs.streamlit.io")
Runs the Hello World script.
def main_hello(**kwargs): """Runs the Hello World script.""" from streamlit.hello import Hello bootstrap.load_config_options(flag_options=kwargs) filename = Hello.__file__ _main_run(filename, flag_options=kwargs)
Run a Python script, piping stderr to Streamlit. The script can be local or it can be an url. In the latter case, Streamlit will download the script to a temporary file and runs this file.
def main_run(target: str, args=None, **kwargs): """Run a Python script, piping stderr to Streamlit. The script can be local or it can be an url. In the latter case, Streamlit will download the script to a temporary file and runs this file. """ from streamlit import url_util bootstrap.load_config_options(flag_options=kwargs) _, extension = os.path.splitext(target) if extension[1:] not in ACCEPTED_FILE_EXTENSIONS: if extension[1:] == "": raise click.BadArgumentUsage( "Streamlit requires raw Python (.py) files, but the provided file has no extension.\nFor more information, please see https://docs.streamlit.io" ) else: raise click.BadArgumentUsage( f"Streamlit requires raw Python (.py) files, not {extension}.\nFor more information, please see https://docs.streamlit.io" ) if url_util.is_url(target): from streamlit.temporary_directory import TemporaryDirectory with TemporaryDirectory() as temp_dir: from urllib.parse import urlparse path = urlparse(target).path main_script_path = os.path.join( temp_dir, path.strip("/").rsplit("/", 1)[-1] ) # if this is a GitHub/Gist blob url, convert to a raw URL first. target = url_util.process_gitblob_url(target) _download_remote(main_script_path, target) _main_run(main_script_path, args, flag_options=kwargs) else: if not os.path.exists(target): raise click.BadParameter(f"File does not exist: {target}") _main_run(target, args, flag_options=kwargs)
Manage the Streamlit cache.
def cache(): """Manage the Streamlit cache.""" pass
Clear st.cache, st.cache_data, and st.cache_resource caches.
def cache_clear(): """Clear st.cache, st.cache_data, and st.cache_resource caches.""" result = legacy_caching.clear_cache() cache_path = legacy_caching.get_cache_path() if result: print(f"Cleared directory {cache_path}.") else: print(f"Nothing to clear at {cache_path}.") # in this `streamlit cache clear` cli command we cannot use the # `cache_storage_manager from runtime (since runtime is not initialized) # so we create a new cache_storage_manager instance that used in runtime, # and call clear_all() method for it. # This will not remove the in-memory cache. cache_storage_manager = create_default_cache_storage_manager() cache_storage_manager.clear_all() caching.cache_resource.clear()
Manage Streamlit's config settings.
def config(): """Manage Streamlit's config settings.""" pass
Show all of Streamlit's config settings.
def config_show(**kwargs): """Show all of Streamlit's config settings.""" bootstrap.load_config_options(flag_options=kwargs) _config.show_config()
Activate Streamlit by entering your email.
def activate(ctx): """Activate Streamlit by entering your email.""" if not ctx.invoked_subcommand: Credentials.get_current().activate()
Reset Activation Credentials.
def activate_reset(): """Reset Activation Credentials.""" Credentials.get_current().reset()
Internal-only commands used for testing. These commands are not included in the output of `streamlit help`.
def test(): """Internal-only commands used for testing. These commands are not included in the output of `streamlit help`. """ pass
Assert that the program name is set to `streamlit test`. This is used by our cli-smoke-tests to verify that the program name is set to `streamlit ...` whether the streamlit binary is invoked directly or via `python -m streamlit ...`.
def test_prog_name(): """Assert that the program name is set to `streamlit test`. This is used by our cli-smoke-tests to verify that the program name is set to `streamlit ...` whether the streamlit binary is invoked directly or via `python -m streamlit ...`. """ # We use _get_command_line_as_string to run some error checks but don't do # anything with its return value. _get_command_line_as_string() parent = click.get_current_context().parent assert parent is not None assert parent.command_path == "streamlit test"
True if cross-origin requests are allowed. We only allow cross-origin requests when CORS protection has been disabled with server.enableCORS=False or if using the Node server. When using the Node server, we have a dev and prod port, which count as two origins.
def allow_cross_origin_requests() -> bool: """True if cross-origin requests are allowed. We only allow cross-origin requests when CORS protection has been disabled with server.enableCORS=False or if using the Node server. When using the Node server, we have a dev and prod port, which count as two origins. """ return not config.get_option("server.enableCORS") or config.get_option( "global.developmentMode" )
Makes the server start listening at the configured port. In case the port is already taken it tries listening to the next available port. It will error after MAX_PORT_SEARCH_RETRIES attempts.
def start_listening(app: tornado.web.Application) -> None: """Makes the server start listening at the configured port. In case the port is already taken it tries listening to the next available port. It will error after MAX_PORT_SEARCH_RETRIES attempts. """ cert_file = config.get_option("server.sslCertFile") key_file = config.get_option("server.sslKeyFile") ssl_options = _get_ssl_options(cert_file, key_file) http_server = HTTPServer( app, max_buffer_size=config.get_option("server.maxUploadSize") * 1024 * 1024, ssl_options=ssl_options, ) if server_address_is_unix_socket(): start_listening_unix_socket(http_server) else: start_listening_tcp_socket(http_server)
Return True if URL is from allowed origins (for CORS purpose). Allowed origins: 1. localhost 2. The internal and external IP addresses of the machine where this function was called from. If `server.enableCORS` is False, this allows all origins.
def is_url_from_allowed_origins(url: str) -> bool: """Return True if URL is from allowed origins (for CORS purpose). Allowed origins: 1. localhost 2. The internal and external IP addresses of the machine where this function was called from. If `server.enableCORS` is False, this allows all origins. """ if not config.get_option("server.enableCORS"): # Allow everything when CORS is disabled. return True hostname = url_util.get_hostname(url) allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]] # Check localhost first. "localhost", "0.0.0.0", "127.0.0.1", # Try to avoid making unnecessary HTTP requests by checking if the user # manually specified a server address. _get_server_address_if_manually_set, # Then try the options that depend on HTTP requests or opening sockets. net_util.get_internal_ip, net_util.get_external_ip, ] for allowed_domain in allowed_domains: if callable(allowed_domain): allowed_domain = allowed_domain() if allowed_domain is None: continue if hostname == allowed_domain: return True return False
Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).
def make_url_path_regex(*path, **kwargs) -> str: """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).""" path = [x.strip("/") for x in path if x] # Filter out falsely components. path_format = r"^/%s/?$" if kwargs.get("trailing_slash", True) else r"^/%s$" return path_format % "/".join(path)
Get the URL for any app served at the given host_ip. Parameters ---------- host_ip : str The IP address of the machine that is running the Streamlit Server. Returns ------- str The URL.
def get_url(host_ip: str) -> str: """Get the URL for any app served at the given host_ip. Parameters ---------- host_ip : str The IP address of the machine that is running the Streamlit Server. Returns ------- str The URL. """ protocol = "https" if config.get_option("server.sslCertFile") else "http" port = _get_browser_address_bar_port() base_path = config.get_option("server.baseUrlPath").strip("/") if base_path: base_path = "/" + base_path host_ip = host_ip.strip("/") return f"{protocol}://{host_ip}:{port}{base_path}"
Get the app URL that will be shown in the browser's address bar. That is, this is the port where static assets will be served from. In dev, this is different from the URL that will be used to connect to the server-browser websocket.
def _get_browser_address_bar_port() -> int: """Get the app URL that will be shown in the browser's address bar. That is, this is the port where static assets will be served from. In dev, this is different from the URL that will be used to connect to the server-browser websocket. """ if config.get_option("global.developmentMode"): return DEVELOPMENT_PORT return int(config.get_option("browser.serverPort"))
Emits the warning about deprecation of HTTP endpoint in the HTTP header.
def emit_endpoint_deprecation_notice( handler: tornado.web.RequestHandler, new_path: str ) -> None: """ Emits the warning about deprecation of HTTP endpoint in the HTTP header. """ handler.set_header("Deprecation", True) new_url = urljoin(f"{handler.request.protocol}://{handler.request.host}", new_path) handler.set_header("Link", f'<{new_url}>; rel="alternate"')
Return a copy of the HTTP request headers for the current session's WebSocket connection. If there's no active session, return None instead. Raise an error if the server is not running. Note to the intrepid: this is an UNSUPPORTED, INTERNAL API. (We don't have plans to remove it without a replacement, but we don't consider this a production-ready function, and its signature may change without a deprecation warning.)
def _get_websocket_headers() -> dict[str, str] | None: """Return a copy of the HTTP request headers for the current session's WebSocket connection. If there's no active session, return None instead. Raise an error if the server is not running. Note to the intrepid: this is an UNSUPPORTED, INTERNAL API. (We don't have plans to remove it without a replacement, but we don't consider this a production-ready function, and its signature may change without a deprecation warning.) """ ctx = get_script_run_ctx() if ctx is None: return None session_client = runtime.get_instance().get_client(ctx.session_id) if session_client is None: return None if not isinstance(session_client, BrowserWebSocketHandler): raise RuntimeError( f"SessionClient is not a BrowserWebSocketHandler! ({session_client})" ) return dict(session_client.request.headers)
Call a function on multiple threads simultaneously and assert that no thread raises an unhandled exception. The function must take single `int` param, which will be the index of the thread it's being called on. Note that a passing multi-threaded test does not generally guarantee that the tested code is thread safe! Because threading issues tend to be non-deterministic, a flaky test that fails only occasionally is a good indicator of an underlying issue. Parameters ---------- func The function to call on each thread. num_threads The number of threads to create. timeout If the thread runs for longer than this amount of time, raise an Exception. attach_script_run_ctx If True, attach a mock ScriptRunContext to each thread before starting.
def call_on_threads( func: Callable[[int], Any], num_threads: int, timeout: Optional[float] = 0.25, attach_script_run_ctx: bool = True, ) -> None: """Call a function on multiple threads simultaneously and assert that no thread raises an unhandled exception. The function must take single `int` param, which will be the index of the thread it's being called on. Note that a passing multi-threaded test does not generally guarantee that the tested code is thread safe! Because threading issues tend to be non-deterministic, a flaky test that fails only occasionally is a good indicator of an underlying issue. Parameters ---------- func The function to call on each thread. num_threads The number of threads to create. timeout If the thread runs for longer than this amount of time, raise an Exception. attach_script_run_ctx If True, attach a mock ScriptRunContext to each thread before starting. """ threads = [ ExceptionCapturingThread(name=f"Thread {ii}", target=func, args=[ii]) for ii in range(num_threads) ] if attach_script_run_ctx: for ii in range(num_threads): ctx = ScriptRunContext( session_id=f"Thread{ii}_Session", _enqueue=ForwardMsgQueue().enqueue, query_string="", session_state=SafeSessionState(SessionState(), lambda: None), uploaded_file_mgr=MemoryUploadedFileManager("/mock/upload"), main_script_path="", page_script_hash="", user_info={"email": "[email protected]"}, fragment_storage=MemoryFragmentStorage(), ) thread = threads[ii] add_script_run_ctx(thread, ctx) for thread in threads: thread.start() for thread in threads: thread.join(timeout=timeout) thread.assert_no_unhandled_exception()
Create a ScriptRunContext for use in tests.
def create_mock_script_run_ctx() -> ScriptRunContext: """Create a ScriptRunContext for use in tests.""" return ScriptRunContext( session_id="mock_session_id", _enqueue=lambda msg: None, query_string="mock_query_string", session_state=SafeSessionState(SessionState(), lambda: None), uploaded_file_mgr=MemoryUploadedFileManager("/mock/upload"), main_script_path="", page_script_hash="mock_page_script_hash", user_info={"email": "[email protected]"}, fragment_storage=MemoryFragmentStorage(), )
Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard " " from being converted, we really should be guarding for RegEx(" +") instead. But that doesn't matter for our tests.
def normalize_md(txt: str) -> str: """Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests. """ # Two newlines in a row should NOT be replaced with a space. txt = txt.replace("\n\n", "OMG_NEWLINE") # Lists should NOT be replaced with a space. txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") # Links broken over two lines should not get an extra space. txt = txt.replace("]\n(", "OMG_LINK") # Convert all remaining newlines into spaces. txt = txt.replace("\n", " ") # Restore everything else. txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
Create a mock legacy_data_frame ForwardMsg.
def create_dataframe_msg(df: Data, id: int = 1) -> ForwardMsg: """Create a mock legacy_data_frame ForwardMsg.""" msg = ForwardMsg() msg.metadata.delta_path[:] = make_delta_path(RootContainer.SIDEBAR, (), id) arrow.marshall(msg.delta.new_element.arrow_data_frame, df) return msg
Create a script_finished ForwardMsg.
def create_script_finished_message( status: "ForwardMsg.ScriptFinishedStatus.ValueType", ) -> ForwardMsg: """Create a script_finished ForwardMsg.""" msg = ForwardMsg() msg.script_finished = status return msg
Get version by parsing out setup.py.
def get_version(): """Get version by parsing out setup.py.""" dirname = os.path.dirname(__file__) base_dir = os.path.abspath(os.path.join(dirname, "../..")) pattern = re.compile(r"(?:.*VERSION = \")(?P<version>.*)(?:\" # PEP-440$)") for line in open(os.path.join(base_dir, "setup.py")).readlines(): m = pattern.match(line) if m: return m.group("version")
Test that `adjust_years` correctly` adjusts the year of a date.
def test_adjust_years(input_date: date, years: int, expected_date: date): """Test that `adjust_years` correctly` adjusts the year of a date.""" assert adjust_years(input_date, years) == expected_date
Test the various types of input that time_to_seconds accepts.
def test_time_to_seconds_coerced(_, input_value: Any, expected_seconds: float): """Test the various types of input that time_to_seconds accepts.""" assert expected_seconds == time_to_seconds(input_value)
Test the various types of input that time_to_seconds accepts.
def test_time_to_seconds_not_coerced(_, input_value: Any, expected_seconds: float): """Test the various types of input that time_to_seconds accepts.""" assert expected_seconds == time_to_seconds(input_value, coerce_none_to_inf=False)
Test that a badly-formatted time string raises an exception.
def test_time_str_exception(): """Test that a badly-formatted time string raises an exception.""" with pytest.raises(BadTimeStringError): time_to_seconds("") with pytest.raises(BadTimeStringError): time_to_seconds("1 flecond")
Return a function that mocks is_type. When you do this: mock_is_type.side_effect = make_is_type_mock("foo.bar.Baz") ...then when you call mock_is_type(my_type, "foo.bar.Baz") it will return True (and False otherwise). You can also pass in a tuple.
def make_is_type_mock(true_type_matchers): """Return a function that mocks is_type. When you do this: mock_is_type.side_effect = make_is_type_mock("foo.bar.Baz") ...then when you call mock_is_type(my_type, "foo.bar.Baz") it will return True (and False otherwise). You can also pass in a tuple. """ if type(true_type_matchers) is not tuple: true_type_matchers = (true_type_matchers,) def new_is_type(obj, type_matchers): if type(type_matchers) is not tuple: type_matchers = (type_matchers,) for type_matcher in type_matchers: if type_matcher in true_type_matchers: return True return False return new_is_type
Get the Arrow schema for a DataFrame.
def _get_arrow_schema(df: pd.DataFrame) -> pa.Schema: """Get the Arrow schema for a DataFrame.""" return pa.Table.from_pandas(df).schema
Test interactions with an empty date_input widget.
def test_date_input_interaction(): """Test interactions with an empty date_input widget.""" def script(): import streamlit as st st.date_input("the label", value=None) at = AppTest.from_function(script).run() date_input = at.date_input[0] assert date_input.value is None # Set the value to a specific date at = date_input.set_value(date(2012, 1, 3)).run() date_input = at.date_input[0] assert date_input.value == date(2012, 1, 3) # # Clear the value at = date_input.set_value(None).run() date_input = at.date_input[0] assert date_input.value is None
Patches streamlit.elements.doc_string so _get_variable_name() works outside ScriptRunner.
def patch_varname_getter(): """ Patches streamlit.elements.doc_string so _get_variable_name() works outside ScriptRunner. """ import inspect parent_frame_filename = inspect.getouterframes(inspect.currentframe())[2].filename return mock.patch( "streamlit.elements.doc_string.SCRIPTRUNNER_FILENAME", parent_frame_filename )
Patches streamlit.elements.doc_string so _get_variable_name() works outside ScriptRunner.
def patch_varname_getter(): """Patches streamlit.elements.doc_string so _get_variable_name() works outside ScriptRunner.""" parent_frame_filename = inspect.getouterframes(inspect.currentframe())[2].filename return patch( "streamlit.elements.doc_string.SCRIPTRUNNER_FILENAME", parent_frame_filename )
Test E2E Enum Coercion on a selectbox.
def test_multiselect_enum_coercion(): """Test E2E Enum Coercion on a selectbox.""" def script(): from enum import Enum import streamlit as st class EnumA(Enum): A = 1 B = 2 C = 3 selected_list = st.multiselect("my_enum", EnumA, default=[EnumA.A, EnumA.C]) st.text(id(selected_list[0].__class__)) st.text(id(EnumA)) st.text(all(selected in EnumA for selected in selected_list)) at = AppTest.from_function(script).run() def test_enum(): multiselect = at.multiselect[0] original_class = multiselect.value[0].__class__ multiselect.set_value([original_class.A, original_class.B]).run() assert at.text[0].value == at.text[1].value, "Enum Class ID not the same" assert at.text[2].value == "True", "Not all enums found in class" with patch_config_options({"runner.enumCoercion": "nameOnly"}): test_enum() with patch_config_options({"runner.enumCoercion": "off"}): with pytest.raises(AssertionError): test_enum()
Test interactions with an empty number input widget.
def test_number_input_interaction(): """Test interactions with an empty number input widget.""" def script(): import streamlit as st st.number_input("the label", value=None) at = AppTest.from_function(script).run() number_input = at.number_input[0] assert number_input.value is None # Set the value to 10 at = number_input.set_value(10).run() number_input = at.number_input[0] assert number_input.value == 10 # # Increment the value at = number_input.increment().run() number_input = at.number_input[0] assert number_input.value == 10.01 # # Clear the value at = number_input.set_value(None).run() number_input = at.number_input[0] assert number_input.value is None
Test interactions with an empty radio widget.
def test_radio_interaction(): """Test interactions with an empty radio widget.""" def script(): import streamlit as st st.radio("the label", ("m", "f"), index=None) at = AppTest.from_function(script).run() radio = at.radio[0] assert radio.value is None # Select option m at = radio.set_value("m").run() radio = at.radio[0] assert radio.value == "m" # # Clear the value at = radio.set_value(None).run() radio = at.radio[0] assert radio.value is None
Test E2E Enum Coercion on a radio.
def test_radio_enum_coercion(): """Test E2E Enum Coercion on a radio.""" def script(): from enum import Enum import streamlit as st class EnumA(Enum): A = 1 B = 2 C = 3 selected = st.radio("my_enum", EnumA, index=0) st.text(id(selected.__class__)) st.text(id(EnumA)) st.text(selected in EnumA) at = AppTest.from_function(script).run() def test_enum(): radio = at.radio[0] original_class = radio.value.__class__ radio.set_value(original_class.C).run() assert at.text[0].value == at.text[1].value, "Enum Class ID not the same" assert at.text[2].value == "True", "Not all enums found in class" with patch_config_options({"runner.enumCoercion": "nameOnly"}): test_enum() with patch_config_options({"runner.enumCoercion": "off"}): with pytest.raises(AssertionError): test_enum()
Test interactions with an empty selectbox widget.
def test_selectbox_interaction(): """Test interactions with an empty selectbox widget.""" def script(): import streamlit as st st.selectbox("the label", ("m", "f"), index=None) at = AppTest.from_function(script).run() selectbox = at.selectbox[0] assert selectbox.value is None # Select option m at = selectbox.set_value("m").run() selectbox = at.selectbox[0] assert selectbox.value == "m" # # Clear the value at = selectbox.set_value(None).run() selectbox = at.selectbox[0] assert selectbox.value is None
Test E2E Enum Coercion on a selectbox.
def test_selectbox_enum_coercion(): """Test E2E Enum Coercion on a selectbox.""" def script(): from enum import Enum import streamlit as st class EnumA(Enum): A = 1 B = 2 C = 3 selected = st.selectbox("my_enum", EnumA, index=0) st.text(id(selected.__class__)) st.text(id(EnumA)) st.text(selected in EnumA) at = AppTest.from_function(script).run() def test_enum(): selectbox = at.selectbox[0] original_class = selectbox.value.__class__ selectbox.set_value(original_class.C).run() assert at.text[0].value == at.text[1].value, "Enum Class ID not the same" assert at.text[2].value == "True", "Not all enums found in class" with patch_config_options({"runner.enumCoercion": "nameOnly"}): test_enum() with patch_config_options({"runner.enumCoercion": "off"}): with pytest.raises(AssertionError): test_enum()
Test E2E Enum Coercion on a select_slider.
def test_select_slider_enum_coercion(): """Test E2E Enum Coercion on a select_slider.""" def script(): from enum import Enum import streamlit as st class EnumA(Enum): A = 1 B = 2 C = 3 selected = st.select_slider("my_enum", EnumA, value=EnumA.A) st.text(id(selected.__class__)) st.text(id(EnumA)) st.text(selected in EnumA) at = AppTest.from_function(script).run() def test_enum(): select_slider = at.select_slider[0] original_class = select_slider.value.__class__ select_slider.set_value(original_class.C).run() assert at.text[0].value == at.text[1].value, "Enum Class ID not the same" assert at.text[2].value == "True", "Not all enums found in class" with patch_config_options({"runner.enumCoercion": "nameOnly"}): test_enum() with patch_config_options({"runner.enumCoercion": "off"}): with pytest.raises(AssertionError): test_enum()
Test E2E Enum Coercion on a selectbox.
def test_select_slider_enum_coercion_multivalue(): """Test E2E Enum Coercion on a selectbox.""" def script(): from enum import Enum import streamlit as st class EnumA(Enum): A = 1 B = 2 C = 3 selected_list = st.select_slider("my_enum", EnumA, value=[EnumA.A, EnumA.C]) st.text(id(selected_list[0].__class__)) st.text(id(EnumA)) st.text(all(selected in EnumA for selected in selected_list)) at = AppTest.from_function(script).run() def test_enum(): select_slider = at.select_slider[0] original_class = select_slider.value[0].__class__ select_slider.set_value([original_class.A, original_class.B]).run() assert at.text[0].value == at.text[1].value, "Enum Class ID not the same" assert at.text[2].value == "True", "Not all enums found in class" with patch_config_options({"runner.enumCoercion": "nameOnly"}): test_enum() with patch_config_options({"runner.enumCoercion": "off"}): with pytest.raises(AssertionError): test_enum()
Test interactions with an empty text_area widget.
def test_text_input_interaction(): """Test interactions with an empty text_area widget.""" def script(): import streamlit as st st.text_area("the label", value=None) at = AppTest.from_function(script).run() text_area = at.text_area[0] assert text_area.value is None # Input a value: at = text_area.input("Foo").run() text_area = at.text_area[0] assert text_area.value == "Foo" # # Clear the value at = text_area.set_value(None).run() text_area = at.text_area[0] assert text_area.value is None
Test interactions with an empty text_input widget.
def test_text_input_interaction(): """Test interactions with an empty text_input widget.""" def script(): import streamlit as st st.text_input("the label", value=None) at = AppTest.from_function(script).run() text_input = at.text_input[0] assert text_input.value is None # Input a value: at = text_input.input("Foo").run() text_input = at.text_input[0] assert text_input.value == "Foo" # # Clear the value at = text_input.set_value(None).run() text_input = at.text_input[0] assert text_input.value is None
Test interactions with an empty time_input widget.
def test_time_input_interaction(): """Test interactions with an empty time_input widget.""" def script(): import streamlit as st st.time_input("the label", value=None) at = AppTest.from_function(script).run() time_input = at.time_input[0] assert time_input.value is None # Input a time: at = time_input.set_value(time(8, 45)).run() time_input = at.time_input[0] assert time_input.value == time(8, 45) # # Clear the value at = time_input.set_value(None).run() time_input = at.time_input[0] assert time_input.value is None
Get the Arrow schema field for a pandas Series.
def _get_arrow_schema_field(column: pd.Series) -> pa.Field | None: """Get the Arrow schema field for a pandas Series.""" try: arrow_schema = pa.Table.from_pandas(column.to_frame()).schema return arrow_schema.field(0) except (pa.ArrowTypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError): return None
Load the list of CallbackRecords from a pickle file at the given path.
def load_records_from_file(path: str) -> list[CallbackRecord]: """Load the list of CallbackRecords from a pickle file at the given path.""" with open(path, "rb") as file: records = pickle.load(file) if not isinstance(records, list): raise RuntimeError(f"Bad CallbackRecord data in {path}") return records
Playback a recorded list of callbacks using the given LangChain CallbackHandlers. This is useful for offline testing of LangChain callback handling logic. Parameters ---------- handlers A list of LangChain CallbackHandlers to playback the callbacks on. records_or_filename A list of CallbackRecords, or a string path to a pickled record list max_pause_time The maxmimum number of seconds to pause between callbacks. By default `playback_callbacks` sleeps between each callback for the same amount of time as the callback's recorded delay. You can use `max_pause_time` to speed up the simulation. Set `max_pause_time` to 0 to issue all callbacks "instantly", with no delay in between. Returns ------- The Agent's recorded result string.
def playback_callbacks( handlers: list[BaseCallbackHandler], records_or_filename: list[CallbackRecord] | str, max_pause_time: float = math.inf, ) -> str: """Playback a recorded list of callbacks using the given LangChain CallbackHandlers. This is useful for offline testing of LangChain callback handling logic. Parameters ---------- handlers A list of LangChain CallbackHandlers to playback the callbacks on. records_or_filename A list of CallbackRecords, or a string path to a pickled record list max_pause_time The maxmimum number of seconds to pause between callbacks. By default `playback_callbacks` sleeps between each callback for the same amount of time as the callback's recorded delay. You can use `max_pause_time` to speed up the simulation. Set `max_pause_time` to 0 to issue all callbacks "instantly", with no delay in between. Returns ------- The Agent's recorded result string. """ if isinstance(records_or_filename, list): records = records_or_filename else: records = load_records_from_file(records_or_filename) for record in records: pause_time = min(record["time_delta"], max_pause_time) if pause_time > 0: time.sleep(pause_time) for handler in handlers: if record["callback_type"] == CallbackType.ON_LLM_START: handler.on_llm_start(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_LLM_NEW_TOKEN: handler.on_llm_new_token(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_LLM_END: handler.on_llm_end(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_LLM_ERROR: handler.on_llm_error(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_TOOL_START: handler.on_tool_start(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_TOOL_END: handler.on_tool_end(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_TOOL_ERROR: handler.on_tool_error(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_TEXT: handler.on_text(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_CHAIN_START: handler.on_chain_start(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_CHAIN_END: handler.on_chain_end(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_CHAIN_ERROR: handler.on_chain_error(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_AGENT_ACTION: handler.on_agent_action(*record["args"], **record["kwargs"]) elif record["callback_type"] == CallbackType.ON_AGENT_FINISH: handler.on_agent_finish(*record["args"], **record["kwargs"]) # Return the agent's result for record in records: if record["callback_type"] == CallbackType.ON_AGENT_FINISH: return str(record["args"][0][0]["output"]) return "[Missing Agent Result]"
Create an AppSession instance with some default mocked data.
def _create_test_session( event_loop: Optional[AbstractEventLoop] = None, session_id_override: Optional[str] = None, ) -> AppSession: """Create an AppSession instance with some default mocked data.""" if event_loop is None: event_loop = MagicMock() with patch( "streamlit.runtime.app_session.asyncio.get_running_loop", return_value=event_loop, ): return AppSession( script_data=ScriptData("/fake/script_path.py", is_hello=False), uploaded_file_manager=MagicMock(), script_cache=MagicMock(), message_enqueued_callback=None, local_sources_watcher=MagicMock(), user_info={"email": "[email protected]"}, session_id_override=session_id_override, )
Creates cached results for a function that returned 1 and executed `st.text(1)`.
def as_replay_test_data() -> MultiCacheResults: """Creates cached results for a function that returned 1 and executed `st.text(1)`. """ widget_key = _make_widget_key([], CacheType.DATA) d = {} d[widget_key] = CachedResult( 1, [ElementMsgData("text", TextProto(body="1"), st._main.id, "")], st._main.id, st.sidebar.id, ) return MultiCacheResults(set(), d)
Return the byte length of the pickled value.
def get_byte_length(value): """Return the byte length of the pickled value.""" return len(pickle.dumps(value))
Return the byte length of the pickled value.
def get_byte_length(value: Any) -> int: """Return the byte length of the pickled value.""" return asizeof(value)
Creates cached results for a function that returned `value` and did not execute any elements.
def as_cached_result(value: Any, cache_type: CacheType) -> MultiCacheResults: """Creates cached results for a function that returned `value` and did not execute any elements. """ result = CachedResult(value, [], st._main.id, st.sidebar.id) widget_key = _make_widget_key([], cache_type) d = {widget_key: result} initial = MultiCacheResults(set(), d) return initial
Regression test for https://github.com/streamlit/streamlit/issues/6103
def test_arrow_replay(): """Regression test for https://github.com/streamlit/streamlit/issues/6103""" at = AppTest.from_file("test_data/arrow_replay.py").run() assert not at.exception
Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard " " from being converted, we really should be guarding for RegEx(" +") instead. But that doesn't matter for our tests.
def normalize_md(txt): """Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests. """ # Two newlines in a row should NOT be replaced with a space. txt = txt.replace("\n\n", "OMG_NEWLINE") # Lists should NOT be replaced with a space. txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") # Links broken over two lines should not get an extra space. txt = txt.replace("]\n(", "OMG_LINK") # Convert all remaining newlines into spaces. txt = txt.replace("\n", " ") # Restore everything else. txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
Create a widget with the given ID.
def _create_widget(id: str, states: WidgetStates) -> WidgetState: """Create a widget with the given ID.""" states.widgets.add().id = id return states.widgets[-1]
Return the widget with the given ID.
def _get_widget(id: str, states: WidgetStates) -> Optional[WidgetState]: """Return the widget with the given ID.""" for state in states.widgets: if state.id == id: return state return None
Returns ------- streamlit.proto.WidgetStates_pb2.WidgetState
def _create_widget(id: str, states: WidgetStates) -> WidgetState: """ Returns ------- streamlit.proto.WidgetStates_pb2.WidgetState """ states.widgets.add().id = id return states.widgets[-1]
True if the given ScriptRunnerEvent is a 'control' event, as opposed to a 'data' event.
def _is_control_event(event: ScriptRunnerEvent) -> bool: """True if the given ScriptRunnerEvent is a 'control' event, as opposed to a 'data' event. """ # There's only one data event type. return event != ScriptRunnerEvent.ENQUEUE_FORWARD_MSG
Wait for the given ScriptRunners to each produce the appropriate number of deltas for widgets_script.py before a timeout. If the timeout is reached, the runners will all be shutdown and an error will be thrown.
def require_widgets_deltas( runners: List[TestScriptRunner], timeout: float = 15 ) -> None: """Wait for the given ScriptRunners to each produce the appropriate number of deltas for widgets_script.py before a timeout. If the timeout is reached, the runners will all be shutdown and an error will be thrown. """ # widgets_script.py has 8 deltas, then a 1-delta loop. If 9 # have been emitted, we can proceed with the test.. NUM_DELTAS = 9 t0 = time.time() num_complete = 0 while time.time() - t0 < timeout: time.sleep(0.1) num_complete = sum( 1 for runner in runners if len(runner.deltas()) >= NUM_DELTAS ) if num_complete == len(runners): return # If we get here, at least 1 runner hasn't yet completed before our # timeout. Create an error string for debugging. err_string = f"require_widgets_deltas() timed out after {timeout}s ({num_complete}/{len(runners)} runners complete)" for runner in runners: if len(runner.deltas()) < NUM_DELTAS: err_string += f"\n- incomplete deltas: {runner.text_deltas()}" # Shutdown all runners before throwing an error, so that the script # doesn't hang forever. for runner in runners: runner.request_stop() for runner in runners: runner.join() raise RuntimeError(err_string)
Return a new SafeSessionState instance populated with the given query param values.
def _create_mock_session_state( initial_query_params_values: Dict[str, Union[List[str], str]] ) -> SafeSessionState: """Return a new SafeSessionState instance populated with the given query param values. """ session_state = SessionState() query_params = QueryParams() for key, value in initial_query_params_values.items(): query_params[key] = value session_state.query_params = query_params return SafeSessionState(session_state, lambda: None)
Return a new SafeSessionState instance populated with the given state values.
def _create_mock_session_state( initial_state_values: Dict[str, Any] ) -> SafeSessionState: """Return a new SafeSessionState instance populated with the given state values. """ session_state = SessionState() for key, value in initial_state_values.items(): session_state[key] = value return SafeSessionState(session_state, lambda: None)
Return the SessionState instance within the current ScriptRunContext's SafeSessionState wrapper.
def _raw_session_state() -> SessionState: """Return the SessionState instance within the current ScriptRunContext's SafeSessionState wrapper. """ return get_session_state()._state
Calling 'rerun' from within a widget callback is disallowed and results in a warning.
def test_callbacks_with_rerun(): """Calling 'rerun' from within a widget callback is disallowed and results in a warning. """ def script(): import streamlit as st def callback(): st.session_state["message"] = "ran callback" st.rerun() st.checkbox("cb", on_change=callback) at = AppTest.from_function(script).run() at.checkbox[0].check().run() assert at.session_state["message"] == "ran callback" warning = at.warning[0] assert "no-op" in warning.value
When the config option is on, adding unserializable data to session state should result in an exception.
def test_serializable_check(): """When the config option is on, adding unserializable data to session state should result in an exception. """ with patch_config_options({"runner.enforceSerializableSessionState": True}): def script(): import streamlit as st def unserializable_data(): return lambda x: x st.session_state.unserializable = unserializable_data() at = AppTest.from_function(script).run() assert at.exception assert "pickle" in at.exception[0].value
When the config option is off, adding unserializable data to session state should work without errors.
def test_serializable_check_off(): """When the config option is off, adding unserializable data to session state should work without errors. """ with patch_config_options({"runner.enforceSerializableSessionState": False}): def script(): import streamlit as st def unserializable_data(): return lambda x: x st.session_state.unserializable = unserializable_data() at = AppTest.from_function(script).run() assert not at.exception
Return a compacted copy of the given SessionState.
def _compact_copy(state: SessionState) -> SessionState: """Return a compacted copy of the given SessionState.""" state_copy = deepcopy(state) state_copy._compact_state() return state_copy
Return all key-value pairs in the SessionState. The returned list is sorted by key for easier comparison.
def _sorted_items(state: SessionState) -> List[Tuple[str, Any]]: """Return all key-value pairs in the SessionState. The returned list is sorted by key for easier comparison. """ return [(key, state[key]) for key in sorted(state._keys())]
A regression test for `test_map_set_del` that involves too much setup to conveniently use the hypothesis `example` decorator.
def test_map_set_del_3837_regression(): """A regression test for `test_map_set_del` that involves too much setup to conveniently use the hypothesis `example` decorator.""" meta1 = stst.mock_metadata( " $$GENERATED_WIDGET_ID-e3e70682-c209-4cac-629f-6fbed82c07cd-None", 0 ) meta2 = stst.mock_metadata( "$$GENERATED_WIDGET_ID-f728b4fa-4248-5e3a-0a5d-2f346baa9455-0", 0 ) m = SessionState() m["0"] = 0 m.register_widget(metadata=meta1, user_key=None) m._compact_state() m.register_widget(metadata=meta2, user_key="0") key = "0" value1 = 0 m[key] = value1 l1 = len(m) del m[key] assert key not in m assert len(m) == l1 - 1
Merge 'b' into 'a'.
def _merge_states(a: SessionState, b: SessionState) -> None: """Merge 'b' into 'a'.""" a._new_session_state.update(b._new_session_state) a._new_widget_state.update(b._new_widget_state) a._old_state.update(b._old_state) a._key_id_mapping.update(b._key_id_mapping)
Test that the value of a widget persists, disappears, and resets appropriately, as the widget is added and removed from the script execution.
def test_widget_added_removed(): """ Test that the value of a widget persists, disappears, and resets appropriately, as the widget is added and removed from the script execution. """ def script(): import streamlit as st cb = st.radio("radio emulating a checkbox", options=["off", "on"], key="cb") if cb == "on": st.radio("radio", options=["a", "b", "c"], key="conditional") at = AppTest.from_function(script).run() assert len(at.radio) == 1 with pytest.raises(KeyError): at.radio(key="conditional") at.radio(key="cb").set_value("on").run() assert len(at.radio) == 2 assert at.radio(key="conditional").value == "a" at.radio(key="conditional").set_value("c").run() assert len(at.radio) == 2 assert at.radio(key="conditional").value == "c" at.radio(key="cb").set_value("off").run() assert len(at.radio) == 1 with pytest.raises(KeyError): at.radio(key="conditional") at.radio(key="cb").set_value("on").run() assert len(at.radio) == 2 assert at.radio(key="conditional").value == "a"
Return a mock.patch for LocalSourcesWatcher
def _patch_local_sources_watcher(): """Return a mock.patch for LocalSourcesWatcher""" return patch("streamlit.runtime.runtime.LocalSourcesWatcher")
Sort key for lists of UploadedFiles
def _get_filename(file): """Sort key for lists of UploadedFiles""" return file.name
Return the license type string for a dependency entry.
def get_license_type(package: PackageInfo) -> str: """Return the license type string for a dependency entry.""" return package[2]
Create a release from the Git Tag
def create_release(): """Create a release from the Git Tag""" tag = os.getenv("GIT_TAG") access_token = os.getenv("GH_TOKEN") if not tag: raise Exception("Unable to retrieve GIT_TAG environment variable") url = "https://api.github.com/repos/streamlit/streamlit/releases" header = {"Authorization": f"token {access_token}"} # Get the latest release tag to compare against response = requests.get(f"{url}/latest", headers=header) previous_tag_name = None if response.status_code == 200: previous_tag_name = response.json()["tag_name"] else: raise Exception(f"Unable get the latest release: {response.text}") # Generate the automated release notes payload = {"tag_name": tag, "previous_tag_name": previous_tag_name} response = requests.post(f"{url}/generate-notes", json=payload, headers=header) body = None if response.status_code == 200: body = response.json()["body"] else: raise Exception(f"Unable generate the latest release notes: {response.text}") # Create the release with the generated release notes payload = {"tag_name": tag, "name": tag, "body": body} response = requests.post(url, json=payload, headers=header) if response.status_code == 201: print(f"Successfully created Release {tag}") else: raise Exception(f"Unable to create release, HTTP response: {response.text}")
Retrieve the release branch from the release PR
def get_release_branch(): """Retrieve the release branch from the release PR""" url = "https://api.github.com/repos/streamlit/streamlit/pulls" response = requests.get(url).json() # Response is in an array, must map over each pull (dict) for pull in response: ref = check_for_release_pr(pull) if ref != None: return ref
Create tag with updated version, a suffix and date.
def create_tag(): """Create tag with updated version, a suffix and date.""" # Get latest version current_version = streamlit.version._get_latest_streamlit_version() # Update micro version_with_inc_micro = ( current_version.major, current_version.minor, current_version.micro + 1, ) # Append todays date version_with_date = ( ".".join([str(x) for x in version_with_inc_micro]) + ".dev" + datetime.now(pytz.timezone("US/Pacific")).strftime("%Y%m%d") ) # Verify if version is PEP440 compliant. packaging.version.Version(version_with_date) return version_with_date
Run a list of commands, displaying them within the given section.
def run_commands(section_header, commands): """Run a list of commands, displaying them within the given section.""" pool = ThreadPool(processes=4) lock = Lock() failed_commands = [] def process_command(arg): i, command = arg # Display the status. vars = { "section_header": section_header, "total": len(commands), "command": _command_to_string(command), "v": i + 1, } click.secho( "\nRunning %(section_header)s %(v)s/%(total)s : %(command)s" % vars, bold=True, ) # Run the command. result = subprocess.call( command.split(" "), stdout=subprocess.DEVNULL, stderr=None ) if result != 0: with lock: failed_commands.append(command) pool.map(process_command, enumerate(commands)) return failed_commands
Remove the given folder or file if it exists
def remove_if_exists(path): """Remove the given folder or file if it exists""" if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): shutil.rmtree(path)
Move a file aside if it exists; restore it on completion
def move_aside_file(path): """Move a file aside if it exists; restore it on completion""" moved = False if os.path.exists(path): os.rename(path, f"{path}.bak") moved = True try: yield None finally: if moved: os.rename(f"{path}.bak", path)
Writes ~/.streamlit/credentials.toml
def create_credentials_toml(contents): """Writes ~/.streamlit/credentials.toml""" os.makedirs(dirname(CREDENTIALS_FILE), exist_ok=True) with open(CREDENTIALS_FILE, "w") as f: f.write(contents)
Kill any active `streamlit run` processes
def kill_streamlits(): """Kill any active `streamlit run` processes""" kill_with_pgrep("streamlit run")
Kill any active app servers spawned by this script.
def kill_app_servers(): """Kill any active app servers spawned by this script.""" kill_with_pgrep("running-streamlit-e2e-test")
Run a single e2e test. An e2e test consists of a Streamlit script that produces a result, and a Cypress test file that asserts that result is as expected. Parameters ---------- ctx : Context The Context object that contains our global testing parameters. specpath : str The path of the Cypress spec file to run. streamlit_command : list of str The Streamlit command to run (passed directly to subprocess.Popen()). Returns ------- bool True if the test succeeded.
def run_test( ctx: Context, specpath: str, streamlit_command: List[str], show_output: bool = False, ) -> bool: """Run a single e2e test. An e2e test consists of a Streamlit script that produces a result, and a Cypress test file that asserts that result is as expected. Parameters ---------- ctx : Context The Context object that contains our global testing parameters. specpath : str The path of the Cypress spec file to run. streamlit_command : list of str The Streamlit command to run (passed directly to subprocess.Popen()). Returns ------- bool True if the test succeeded. """ SUCCESS = "SUCCESS" RETRY = "RETRY" SKIP = "SKIP" QUIT = "QUIT" result = None # Move existing credentials file aside, and create a new one if the # tests call for it. with move_aside_file(CREDENTIALS_FILE): create_credentials_toml('[general]\nemail="[email protected]"') # Loop until the test succeeds or is skipped. while result not in (SUCCESS, SKIP, QUIT): cypress_command = ["yarn", "cy:run", "--spec", specpath] cypress_command.extend(ctx.cypress_flags) click.echo( f"{click.style('Running test:', fg='yellow', bold=True)}" f"\n{click.style(' '.join(streamlit_command), fg='yellow')}" f"\n{click.style(' '.join(cypress_command), fg='yellow')}" ) # Start the streamlit command with AsyncSubprocess(streamlit_command, cwd=FRONTEND_DIR) as streamlit_proc: # Run the Cypress spec to completion. cypress_result = subprocess.run( cypress_command, cwd=FRONTEND_DIR, capture_output=True, text=True, ) # Terminate the streamlit command and get its output streamlit_stdout = streamlit_proc.terminate() def print_output(): click.echo( f"\n\n{click.style('Streamlit output:', fg='yellow', bold=True)}" f"\n{streamlit_stdout}" f"\n\n{click.style('Cypress output:', fg='yellow', bold=True)}" f"\n{cypress_result.stdout}" f"\n" ) if cypress_result.returncode == 0: result = SUCCESS click.echo(click.style("Success!\n", fg="green", bold=True)) if show_output: print_output() else: # The test failed. Print the output of the Streamlit command # and the Cypress command. click.echo(click.style("Failure!", fg="red", bold=True)) print_output() if ctx.always_continue: result = SKIP else: # Prompt the user for what to do next. user_input = click.prompt( "[R]etry, [U]pdate snapshots, [S]kip, or [Q]uit?", default="r", ) key = user_input[0].lower() if key == "s": result = SKIP elif key == "q": result = QUIT elif key == "r": result = RETRY elif key == "u": ctx.update_snapshots = True result = RETRY else: # Retry if key not recognized result = RETRY if result != SUCCESS: ctx.any_failed = True if result == QUIT: raise QuitException() return result == SUCCESS
Run e2e tests. If any fail, exit with non-zero status.
def run_e2e_tests( always_continue: bool, record_results: bool, update_snapshots: bool, tests: List[str], verbose: bool, ): """Run e2e tests. If any fail, exit with non-zero status.""" kill_streamlits() kill_app_servers() app_server = run_app_server() # Clear reports from previous runs remove_if_exists("frontend/test_results/cypress") ctx = Context() ctx.always_continue = always_continue ctx.record_results = record_results ctx.update_snapshots = update_snapshots ctx.tests_dir_name = "e2e" try: p = Path(join(ROOT_DIR, ctx.tests_dir_name, "specs")).resolve() if tests: paths = [Path(t).resolve() for t in tests] else: paths = sorted(p.glob("*.spec.js")) for spec_path in paths: if basename(spec_path) == "st_hello.spec.js": # Test "streamlit hello" in both headless and non-headless mode. run_test( ctx, str(spec_path), ["streamlit", "hello", "--server.headless=false"], show_output=verbose, ) run_test( ctx, str(spec_path), ["streamlit", "hello", "--server.headless=true"], show_output=verbose, ) elif basename(spec_path) == "staticfiles_app.spec.js": test_name, _ = splitext(basename(spec_path)) test_name, _ = splitext(test_name) test_path = join( ctx.tests_dir, "scripts", "staticfiles_apps", "streamlit_static_app.py", ) if os.path.exists(test_path): run_test( ctx, str(spec_path), [ "streamlit", "run", "--server.enableStaticServing=true", test_path, ], show_output=verbose, ) elif basename(spec_path) == "hostframe.spec.js": test_name, _ = splitext(basename(spec_path)) test_name, _ = splitext(test_name) test_path = join( ctx.tests_dir, "scripts", "hostframe", "hostframe_app.py" ) if os.path.exists(test_path): run_test( ctx, str(spec_path), [ "streamlit", "run", test_path, ], show_output=verbose, ) else: test_name, _ = splitext(basename(spec_path)) test_name, _ = splitext(test_name) test_path = join(ctx.tests_dir, "scripts", f"{test_name}.py") if os.path.exists(test_path): run_test( ctx, str(spec_path), ["streamlit", "run", test_path], show_output=verbose, ) except QuitException: # Swallow the exception we raise if the user chooses to exit early. pass finally: if app_server: app_server.terminate() if ctx.any_failed: sys.exit(1)
Return True if the path is relative to another path or False. This function is backported from Python 3.9 - Path.relativeto.
def is_relative_to(path: Path, *other): """Return True if the path is relative to another path or False. This function is backported from Python 3.9 - Path.relativeto. """ try: path.relative_to(*other) return True except ValueError: return False
Create a slack message
def send_notification(): """Create a slack message""" webhook = os.getenv("SLACK_WEBHOOK") if not webhook: raise Exception("Unable to retrieve SLACK_WEBHOOK") nightly_slack_messages = { "tag": "to create a tag", "python": "on python tests", "js": "on javascript tests", "py_prod": "on python prod dependencies test", "cypress": "on cypress tests", "playwright": "on playwright tests", "build": "to release", } run_id = os.getenv("RUN_ID") workflow = sys.argv[1] message_key = sys.argv[2] payload = None if workflow == "nightly": failure = nightly_slack_messages[message_key] payload = { "text": f":blobonfire: Nightly build failed {failure} - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>" } if workflow == "candidate": if message_key == "success": payload = {"text": ":rocket: Release Candidate was successful!"} else: payload = { "text": f":blobonfire: Release Candidate failed - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>" } if workflow == "release": if message_key == "success": payload = {"text": ":rocket: Release was successful!"} else: payload = { "text": f":blobonfire: Release failed - <https://github.com/streamlit/streamlit/actions/runs/{run_id}|Link to run>" } if payload: response = requests.post(webhook, json=payload) if response.status_code != 200: raise Exception( f"Unable to send slack message, HTTP response: {response.text}" )
Update files with new project name.
def update_files(project_name: str, files: Dict[str, str]) -> None: """Update files with new project name.""" for filename, regex in files.items(): filename = os.path.join(BASE_DIR, filename) matched = False pattern = re.compile(regex) for line in fileinput.input(filename, inplace=True): line = line.rstrip() if pattern.match(line): line = re.sub( regex, rf"\g<pre_match>{project_name}\g<post_match>", line ) matched = True print(line) if not matched: raise Exception(f'In file "{filename}", did not find regex "{regex}"')
Verify if version is PEP440 compliant. https://github.com/pypa/packaging/blob/16.7/packaging/version.py#L191 We might need pre, post, alpha, rc in the future so might as well use an object that does all that. This verifies its a valid version.
def verify_pep440(version): """Verify if version is PEP440 compliant. https://github.com/pypa/packaging/blob/16.7/packaging/version.py#L191 We might need pre, post, alpha, rc in the future so might as well use an object that does all that. This verifies its a valid version. """ try: return packaging.version.Version(version) except packaging.version.InvalidVersion as e: raise (e)
Verify if version is compliant with semantic versioning. https://semver.org/
def verify_semver(version): """Verify if version is compliant with semantic versioning. https://semver.org/ """ try: return str(semver.VersionInfo.parse(version)) except ValueError as e: raise (e)
Update files with new version number.
def update_files(data, version): """Update files with new version number.""" for filename, regex in data.items(): filename = os.path.join(BASE_DIR, filename) matched = False pattern = re.compile(regex) for line in fileinput.input(filename, inplace=True): if pattern.match(line.rstrip()): matched = True line = re.sub(regex, r"\g<pre>%s\g<post>" % version, line.rstrip()) print(line) if not matched: raise Exception('In file "%s", did not find regex "%s"' % (filename, regex))
Run main loop.
def main(): """Run main loop.""" if len(sys.argv) != 2: e = Exception( 'Specify semvver version as an argument, e.g.: "%s 1.2.3"' % sys.argv[0] ) raise (e) # We need two flavors of the version - one that's semver-compliant for Node, one that's # PEP440-compliant for Python. We allow for the incoming version to be either semver-compliant # PEP440-compliant. # - `verify_pep440` automatically converts semver to PEP440-compliant pep440_version = verify_pep440(sys.argv[1]) # - Attempt to convert to semver-compliant. If a failure occurs, manually attempt to convert. semver_version = None try: semver_version = verify_semver(sys.argv[1]) except ValueError: semver_version = verify_semver( sys.argv[1].replace("rc", "-rc.").replace(".dev", "-dev") ) update_files(PYTHON, pep440_version) update_files(NODE_ROOT, semver_version) update_files(NODE_APP, semver_version) update_files(NODE_LIB, semver_version) update_files(NODE_APP_ST_LIB, semver_version)
Add breakpoint to our persisted settings
def record_breakpoint_by_full_path(file_name, line, column=-1, status='disabled', breakpointId=None): """ Add breakpoint to our persisted settings """ breaks = get_breakpoints_by_full_path(file_name) if not line in breaks: breaks[line] = {} breaks[line]['status'] = status breaks[line]['breakpointId'] = str(breakpointId) if column != -1: breaks[line]['column'] = str(column) else: breaks[line]['status'] = status breaks[line]['breakpointId'] = str(breakpointId) if column != -1: breaks[line]['column'] = str(column) save_breaks()
Convert a Sublime View into an SWIDebugView
def wrap_view(v): """ Convert a Sublime View into an SWIDebugView """ if isinstance(v, SwiDebugView): return v if isinstance(v, sublime.View): id = v.buffer_id() # Take this opportunity to replace the wrapped view, # if it's against the same buffer as the previously # seen view if id in config.buffers: config.buffers[id].view = v else: config.buffers[id] = SwiDebugView(v) return config.buffers[id] return None
turn on/off the tracability. tracable: boolean value. if set True, tracability is enabled.
def enableTrace(tracable): """ turn on/off the tracability. tracable: boolean value. if set True, tracability is enabled. """ global traceEnabled traceEnabled = tracable if tracable: if not logger.handlers: logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.DEBUG)
Set the global timeout setting to connect. timeout: default socket timeout time. This value is second.
def setdefaulttimeout(timeout): """ Set the global timeout setting to connect. timeout: default socket timeout time. This value is second. """ global default_timeout default_timeout = timeout