code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def _register_data(all_df_metadata: AllDfMetadata) -> vm.Dashboard: """Register the dashboard data in data manager.""" from vizro.managers import data_manager for name, metadata in all_df_metadata.all_df_metadata.items(): data_manager[name] = metadata.df
Register the dashboard data in data manager.
_register_data
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/utils.py
Apache-2.0
def _extract_overall_imports_and_code( custom_charts_code: list[list[dict[str, str]]], custom_charts_imports: list[list[dict[str, str]]] ) -> tuple[set[str], set[str]]: """Extract custom functions and imports from the custom charts code. Args: custom_charts_code: A list of lists of dictionaries, where each dictionary contains the custom chart code for a component. The outer list represents pages, the inner list represents components on a page, and the dictionary maps component IDs to their code. custom_charts_imports: A list of lists of dictionaries, where each dictionary contains the custom chart imports for a component. The outer list represents pages, the inner list represents components on a page, and the dictionary maps component IDs to their imports. Returns: A tuple containing: - A set of custom function code snippets - A set of import statements """ custom_functions = { code for page_components in custom_charts_code for component_code in page_components for code in component_code.values() } imports = { component_imports for page_components in custom_charts_imports for component_code in page_components for component_imports in component_code.values() } return custom_functions, imports
Extract custom functions and imports from the custom charts code. Args: custom_charts_code: A list of lists of dictionaries, where each dictionary contains the custom chart code for a component. The outer list represents pages, the inner list represents components on a page, and the dictionary maps component IDs to their code. custom_charts_imports: A list of lists of dictionaries, where each dictionary contains the custom chart imports for a component. The outer list represents pages, the inner list represents components on a page, and the dictionary maps component IDs to their imports. Returns: A tuple containing: - A set of custom function code snippets - A set of import statements
_extract_overall_imports_and_code
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/utils.py
Apache-2.0
def _create_prompt_template(additional_info: str) -> ChatPromptTemplate: """Create the ChatPromptTemplate from the base prompt and additional info.""" return ChatPromptTemplate.from_messages( [ ("system", BASE_PROMPT.format(df_info="{df_info}", additional_info=additional_info)), ("placeholder", "{message}"), ] )
Create the ChatPromptTemplate from the base prompt and additional info.
_create_prompt_template
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
Apache-2.0
def _create_message_content( query: str, df_info: Any, validation_error: Optional[str] = None, retry: bool = False ) -> dict: """Create the message content for the LLM model.""" message_content = {"message": [HumanMessage(content=query)], "df_info": df_info} if retry: message_content["validation_error"] = validation_error return message_content
Create the message content for the LLM model.
_create_message_content
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
Apache-2.0
def _get_pydantic_model( query: str, llm_model: BaseChatModel, response_model: BaseModel, df_info: Optional[Any] = None, # TODO: this should potentially not be part of this function. max_retry: int = 2, ) -> BaseModel: # TODO: fix typing similar to instructor library, ie the return type should be the same as response_model # At the very least it should include the string type of the validation error """Get the pydantic output from the LLM model with retry logic.""" for attempt in range(max_retry): attempt_is_retry = attempt > 0 prompt = _create_prompt(retry=attempt_is_retry) message_content = _create_message_content( query, df_info, str(last_validation_error) if attempt_is_retry else None, retry=attempt_is_retry ) try: kwargs = {} # Only pass `method` parameter if the model's with_structured_output accepts it # This is determined by checking the signature of the method # By the time this code written, the `method` parameter is supported by # model providers like OpenAI, MistralAI, VertexAI, etc. try: sig = signature(llm_model.with_structured_output) if "method" in sig.parameters: kwargs["method"] = "function_calling" # method 'json_schema' does not work with `pattern` in Field except (ValueError, AttributeError): pass pydantic_llm = prompt | llm_model.with_structured_output(response_model, **kwargs) return pydantic_llm.invoke(message_content) except ValidationError as validation_error: last_validation_error = validation_error # TODO: should this be shifted to logging so that that one can control what output gets shown (e.g. in public demos) raise last_validation_error
Get the pydantic output from the LLM model with retry logic.
_get_pydantic_model
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_pydantic_output.py
Apache-2.0
def _continue_to_pages(state: GraphState) -> list[Send]: """Map-reduce logic to build pages in parallel.""" all_df_metadata = state.all_df_metadata return [ Send(node="_build_page", arg={"page_plan": v, "all_df_metadata": all_df_metadata}) for v in state.dashboard_plan.pages ]
Map-reduce logic to build pages in parallel.
_continue_to_pages
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_graph/dashboard_creation.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_graph/dashboard_creation.py
Apache-2.0
def create(self, model: BaseChatModel, all_df_metadata: AllDfMetadata) -> ComponentResult: """Create the component based on its type. Args: model: The llm used. all_df_metadata: Metadata for all available dataframes. Returns: ComponentResult containing: - component: The created component (vm.Card, vm.AgGrid, or vm.Graph) - code: Optional string containing the code used to generate the component (for Graph type only) """ try: if self.component_type == "Graph": from vizro_ai import VizroAI vizro_ai = VizroAI(model=model) result = vizro_ai.plot( df=all_df_metadata.get_df(self.df_name), user_input=self.component_description, max_debug_retry=2, # TODO must be flexible return_elements=True, _minimal_output=True, ) return ComponentResult( component=vm.Graph( id=self.component_id, figure=result.get_fig_object(chart_name=self.component_id, data_frame=self.df_name, vizro=True), ), imports=result._get_imports(vizro=True), code=result._get_chart_code(chart_name=self.component_id, vizro=True), ) elif self.component_type == "AgGrid": return ComponentResult( component=vm.AgGrid(id=self.component_id, figure=dash_ag_grid(data_frame=self.df_name)) ) elif self.component_type == "Card": card_prompt = f""" The Card uses the dcc.Markdown component from Dash as its underlying text component. Create a card based on the card description: {self.component_description}. """ result_proxy = _get_pydantic_model(query=card_prompt, llm_model=model, response_model=vm.Card) proxy_dict = result_proxy.model_dump() proxy_dict["id"] = self.component_id return ComponentResult(component=vm.Card(**proxy_dict)) except (DebugFailure, ValidationError) as e: logger.warning( f""" [FALLBACK] Failed to build `Component`: {self.component_id}. Reason: {e} Relevant prompt: {self.component_description} """ ) return ComponentResult( component=vm.Card(id=self.component_id, text=f"Failed to build component: {self.component_id}") )
Create the component based on its type. Args: model: The llm used. all_df_metadata: Metadata for all available dataframes. Returns: ComponentResult containing: - component: The created component (vm.Card, vm.AgGrid, or vm.Graph) - code: Optional string containing the code used to generate the component (for Graph type only)
create
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_response_models/components.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_response_models/components.py
Apache-2.0
def validate_date_picker_column(cls, data: Any): """Validate the column for date picker.""" column = data.get("column") selector = data.get("selector") if selector and hasattr(selector, "type") and selector.type == "date_picker": if not pd.api.types.is_datetime64_any_dtype(df_schema[column]): raise ValueError( f""" The column '{column}' is not of datetime type. Selector type 'date_picker' is not allowed. Use 'dropdown' instead. """ ) return data
Validate the column for date picker.
validate_date_picker_column
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_response_models/controls.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_response_models/controls.py
Apache-2.0
def _get_df_info(df: pd.DataFrame) -> tuple[dict[str, str], pd.DataFrame]: """Get the dataframe schema and sample.""" formatted_pairs = dict(df.dtypes.astype(str)) df_sample = df.sample(5, replace=True, random_state=19) return formatted_pairs, df_sample
Get the dataframe schema and sample.
_get_df_info
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_response_models/df_info.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_response_models/df_info.py
Apache-2.0
def _validate_component_id_unique(components_list): """Validate the component id is unique.""" component_ids = [comp.component_id for comp in components_list] duplicates = [id for id, count in Counter(component_ids).items() if count > 1] if duplicates: raise ValueError(f"Component ids must be unique. Duplicated component ids: {duplicates}") return components_list
Validate the component id is unique.
_validate_component_id_unique
python
mckinsey/vizro
vizro-ai/src/vizro_ai/dashboard/_response_models/page.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/_response_models/page.py
Apache-2.0
def _strip_markdown(code_string: str) -> str: """Remove any code block wrappers (markdown or triple quotes).""" wrappers = [("```python\n", "```"), ("```py\n", "```"), ("```\n", "```"), ('"""', '"""'), ("'''", "'''")] for start, end in wrappers: if code_string.startswith(start) and code_string.endswith(end): code_string = code_string[len(start) : -len(end)] break return code_string.strip()
Remove any code block wrappers (markdown or triple quotes).
_strip_markdown
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_response_models.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_response_models.py
Apache-2.0
def _exec_code(code: str, namespace: dict) -> dict: """Execute code and return the local dictionary.""" # Need the global namespace for the imports to work for executed code # Tried just handling it in local scope, ie getting the import statement into ldict, but it didn't work # TODO: ideally in future we properly handle process and namespace separation, or even Docke execution # TODO: this is also important as it can affect unit-tests influencing one another, which is really not good! ldict = {} exec(code, namespace, ldict) # nosec namespace.update(ldict) return namespace
Execute code and return the local dictionary.
_exec_code
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_response_models.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_response_models.py
Apache-2.0
def validator_code(v, info: ValidationInfo): """Test the execution of the chart code.""" imports = "\n".join(info.data.get("imports", [])) code_to_validate = imports + "\n\n" + v try: _safeguard_check(code_to_validate) except Exception as e: raise ValueError( f"Produced code failed the safeguard validation: <{e}>. Please check the code and try again." ) try: namespace = globals() namespace = _exec_code(code_to_validate, namespace) custom_chart = namespace[f"{CUSTOM_CHART_NAME}"] fig = custom_chart(data_frame.sample(10, replace=True)) except Exception as e: raise ValueError( f"Produced code execution failed the following error: <{e}>. Please check the code and try again, " f"alternatively try with a more powerful model." ) assert isinstance(fig, go.Figure), ( f"Expected chart code to return a plotly go.Figure object, but got {type(fig)}" ) return v
Test the execution of the chart code.
validator_code
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_response_models.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_response_models.py
Apache-2.0
def get_fig_object(self, data_frame: Union[pd.DataFrame, str], chart_name: Optional[str] = None, vizro=True): """Execute code to obtain the plotly go.Figure object. Be sure to check code to be executed before running. Args: data_frame: Dataframe or string representation of the dataframe. chart_name: Name of the chart function. Defaults to `None`, in which case it remains as `custom_chart`. vizro: Whether to add decorator to make it `vizro-core` compatible. Defaults to `True`. """ chart_name = chart_name or CUSTOM_CHART_NAME code_to_execute = self._get_complete_code(chart_name=chart_name, vizro=vizro) namespace = globals() namespace = _exec_code(code_to_execute, namespace) chart = namespace[f"{chart_name}"] return chart(data_frame)
Execute code to obtain the plotly go.Figure object. Be sure to check code to be executed before running. Args: data_frame: Dataframe or string representation of the dataframe. chart_name: Name of the chart function. Defaults to `None`, in which case it remains as `custom_chart`. vizro: Whether to add decorator to make it `vizro-core` compatible. Defaults to `True`.
get_fig_object
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_response_models.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_response_models.py
Apache-2.0
def __new__(cls, data_frame: pd.DataFrame, chart_plan: type[BaseChartPlan] = ChartPlan) -> type[BaseChartPlan]: """Creates a chart plan model with additional validation. Args: data_frame: DataFrame to use for validation chart_plan: Chart plan model to run extended validation against. Defaults to ChartPlan. Returns: Chart plan model with additional validation """ return create_model( "ChartPlanDynamic", __base__=chart_plan, __validators__={ "validator1": field_validator("chart_code")(_test_execute_chart_code(data_frame)), }, )
Creates a chart plan model with additional validation. Args: data_frame: DataFrame to use for validation chart_plan: Chart plan model to run extended validation against. Defaults to ChartPlan. Returns: Chart plan model with additional validation
__new__
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_response_models.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_response_models.py
Apache-2.0
def _check_data_handling(node: ast.stmt): """Check usage of unsafe data file loading and saving.""" code = ast.unparse(node) redlisted_data_handling = [method for method in REDLISTED_DATA_HANDLING if method in code] if redlisted_data_handling: methods_str = ", ".join(redlisted_data_handling) raise Exception(f"Unsafe loading or saving of data files is used in code: {methods_str} in line {code}")
Check usage of unsafe data file loading and saving.
_check_data_handling
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
Apache-2.0
def _check_class_method_usage(node: ast.stmt): """Check usage of unsafe builtin in code.""" code = ast.unparse(node) redlisted_builtins = [funct for funct in REDLISTED_CLASS_METHODS if funct in code] if redlisted_builtins: functions_str = ", ".join(redlisted_builtins) raise Exception( f"Unsafe methods {functions_str} are used in generated code line: {code} and cannot be executed." )
Check usage of unsafe builtin in code.
_check_class_method_usage
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
Apache-2.0
def _check_builtin_function_usage(node: ast.stmt): """Check usage of unsafe builtin functions.""" code = ast.unparse(node) builtin_list = [name for name, obj in vars(builtins).items()] non_whitelisted_builtins = [ builtin for builtin in builtin_list if re.search(r"\b" + re.escape(builtin) + r"\b", code) and builtin not in WHITELISTED_BUILTINS ] if non_whitelisted_builtins: builtin_str = ", ".join(non_whitelisted_builtins) raise Exception( f"Unsafe builtin functions {builtin_str} are used in generated code line: {code} and cannot be executed. If" f" you require a builtin package, reach out to the Vizro team." )
Check usage of unsafe builtin functions.
_check_builtin_function_usage
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
Apache-2.0
def _safeguard_check(code: str): """Perform safeguard checks to avoid execution of malicious code.""" try: tree = ast.parse(code) except (SyntaxError, UnicodeDecodeError): raise ValueError(f"Generated code is not valid: {code}") except OverflowError: raise ValueError(f"Generated code is too long to be parsed by ast: {code}") except Exception as e: raise ValueError(f"Generate code {code} cannot be parsed by ast due to error: {e}") for node in tree.body: _analyze_node(node)
Perform safeguard checks to avoid execution of malicious code.
_safeguard_check
python
mckinsey/vizro
vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/plot/_utils/_safeguard.py
Apache-2.0
def _get_df_info(df: pd.DataFrame, n_sample: int = 5) -> tuple[str, str]: """Get the dataframe schema and head info as string.""" formatted_pairs = [f"{col_name}: {dtype}" for col_name, dtype in df.dtypes.items()] schema_string = "\n".join(formatted_pairs) return schema_string, df.sample(n_sample, replace=True, random_state=19).to_markdown()
Get the dataframe schema and head info as string.
_get_df_info
python
mckinsey/vizro
vizro-ai/src/vizro_ai/utils/helper.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/utils/helper.py
Apache-2.0
def logic( # noqa: PLR0912, PLR0913, PLR0915 dashboard, model_name, dash_duo, prompt_tier, prompt_name, prompt_text, config: dict, ): """Calculates all separate scores. Creates csv report. Attributes: dashboard: VizroAI generated dashboard model_name: GenAI model name dash_duo: dash_duo fixture prompt_tier: complexity of the prompt prompt_name: short prompt description prompt_text: prompt text config: json config of the expected dashboard """ # TODO: Add layout score report_dir = "tests/e2e/reports" os.makedirs(report_dir, exist_ok=True) try: app = Vizro().build(dashboard).dash dash_duo.start_server(app) app_started = 1 app_started_report = "App started!" except Exception as e: app_started = 0 app_started_report = "App didn't start!" print(f"App start exception: {e}") # noqa: T201 try: assert dash_duo.get_logs() == [] no_browser_console_errors = 1 no_browser_console_errors_report = "No error logs in browser console!" except AssertionError as e: no_browser_console_errors = 0 no_browser_console_errors_report = "Error logs in browser console found!" print(f"Browser console exception: {e}") # noqa: T201 Vizro._reset() try: vizro_type = os.environ["VIZRO_TYPE"] branch = os.environ["BRANCH"] python_version = os.environ["PYTHON_VERSION"] except KeyError: vizro_type = "local_env" branch = "local" python_version = "local" pages_exist = [1 if dashboard.pages else 0][0] pages_exist_report = bool(pages_exist) pages_num = [1 if len(dashboard.pages) == len(config["pages"]) else 0] pages_num_report = [f"{len(config['pages'])} page(s) for dashboard is {bool(pages_num[0])}"] components_num = [] components_num_report = [] for page in range(len(config["pages"])): try: components = [ 1 if len(dashboard.pages[page].components) == len(config["pages"][page]["components"]) else 0 ][0] except IndexError: components = 0 components_num.append(components) components_num_report.append( f"{len(config['pages'][page]['components'])} component(s) for page {page} is {bool(components)}" ) controls_num = [] controls_num_report = [] for page in range(len(config["pages"])): try: controls = [1 if len(dashboard.pages[page].controls) == len(config["pages"][page]["controls"]) else 0][0] except IndexError: controls = 0 controls_num.append(controls) controls_num_report.append( f"{len(config['pages'][page]['controls'])} control(s) for page {page} is {bool(controls)}" ) components_types_names = [] components_types_names_report = [] try: for page in range(len(config["pages"])): components_dashboard = Counter([component.type for component in dashboard.pages[page].components]) components_config = Counter([component.type for component in config["pages"][page]["components"]]) for component_name in components_config: components_types = [ 1 if components_config[component_name] == components_dashboard[component_name] else 0 ][0] components_types_names.append(components_types) components_types_names_report.append( f"{components_config[component_name]} components_type(s) {component_name} " f"for page {page} is {bool(components_types)}" ) except IndexError: components_types = 0 components_types_names.append(components_types) components_types_names_report.append("page or component does not exists") controls_types_names = [] controls_types_names_report = [] try: for page in range(len(config["pages"])): controls_dashboard = Counter([control.type for control in dashboard.pages[page].controls]) controls_config = Counter([control.type for control in config["pages"][page]["controls"]]) for control_name in controls_config: controls_types = [1 if controls_config[control_name] == controls_dashboard[control_name] else 0][0] controls_types_names.append(controls_types) controls_types_names_report.append( f"{controls_config[control_name]} controls_type(s) {control_name} " f"for page {page} is {bool(controls_types)}" ) except IndexError: controls_types = 0 controls_types_names.append(controls_types) controls_types_names_report.append("page or control does not exists") # Every separate score has its own weight. scores = [ {"score_name": "app_started_score", "weight": 0.4, "score": app_started}, {"score_name": "no_browser_console_errors_score", "weight": 0.1, "score": no_browser_console_errors}, {"score_name": "pages_score", "weight": 0.3, "score": pages_exist}, {"score_name": "pages_number", "weight": 0.2, "score": score_calculator(metrics_score=pages_num)}, {"score_name": "components_score", "weight": 0.2, "score": score_calculator(metrics_score=components_num)}, { "score_name": "component_types_score", "weight": 0.2, "score": score_calculator(metrics_score=components_types_names), }, {"score_name": "controls_score", "weight": 0.2, "score": score_calculator(metrics_score=controls_num)}, { "score_name": "controls_types_score", "weight": 0.2, "score": score_calculator(metrics_score=controls_types_names), }, ] scores_values = np.array([score["score"] for score in scores]) weights = np.array([score["weight"] for score in scores]) weighted_score = np.average(scores_values, weights=weights) # csv report creation data_rows = [ datetime.now(), vizro_type, branch, python_version, model_name, prompt_tier, prompt_name, weighted_score, ] data_rows.extend(score["score"] for score in scores) data_rows.extend([prompt_text]) with open(f"{report_dir}/report_model_{model_name}_{vizro_type}.csv", "a", newline=""): with open(f"{report_dir}/report_model_{model_name}_{vizro_type}.csv", "r+", newline="") as csvfile: writer = csv.writer(csvfile, delimiter=",") first_line = csvfile.readline() if not first_line: header_rows = [ "timestamp", "vizro_type", "branch", "python_version", "model", "prompt_tier", "prompt_name", "weighted_score", ] header_rows.extend(score["score_name"] for score in scores) header_rows.extend(["prompt_text"]) writer.writerow(header_rows) writer.writerow(data_rows) # Readable report for the console output print(f"App started: {app_started_report}") # noqa: T201 print(f"Console errors: {no_browser_console_errors_report}") # noqa: T201 print(f"Pages exists: {pages_exist_report}") # noqa: T201 print(f"Correct pages number: {pages_num_report}") # noqa: T201 print(f"Components: {components_num_report}") # noqa: T201 print(f"Correct controls number: {controls_num_report}") # noqa: T201 print(f"Correct components types: {components_types_names_report}") # noqa: T201 print(f"Correct controls types: {controls_types_names_report}") # noqa: T201 print(f"Weighted score: {weighted_score}") # noqa: T201 print(f"Scores: {scores}") # noqa: T201
Calculates all separate scores. Creates csv report. Attributes: dashboard: VizroAI generated dashboard model_name: GenAI model name dash_duo: dash_duo fixture prompt_tier: complexity of the prompt prompt_name: short prompt description prompt_text: prompt text config: json config of the expected dashboard
logic
python
mckinsey/vizro
vizro-ai/tests/e2e/test_dashboard.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/tests/e2e/test_dashboard.py
Apache-2.0
def test_chart_plan_factory_validation_failure(sample_df): """Test factory validation fails with invalid code.""" ValidatedModel = ChartPlanFactory(data_frame=sample_df, chart_plan=BaseChartPlan) with pytest.raises(ValueError, match="The chart code must be wrapped in a function named"): ValidatedModel(chart_type="scatter", imports=["import plotly.express as px"], chart_code="invalid_code")
Test factory validation fails with invalid code.
test_chart_plan_factory_validation_failure
python
mckinsey/vizro
vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
Apache-2.0
def test_chart_plan_factory_preserves_fields(sample_df, valid_chart_code): """Test factory preserves all fields from base class.""" ValidatedModel = ChartPlanFactory(data_frame=sample_df, chart_plan=ChartPlan) instance = ValidatedModel( chart_type="scatter", imports=["import plotly.express as px"], chart_code=valid_chart_code, chart_insights="Test insights", code_explanation="Test explanation", ) # Check all fields are preserved assert instance.chart_type == "scatter" assert instance.imports == ["import plotly.express as px"] assert instance.chart_code == valid_chart_code assert instance.chart_insights == "Test insights" assert instance.code_explanation == "Test explanation"
Test factory preserves all fields from base class.
test_chart_plan_factory_preserves_fields
python
mckinsey/vizro
vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
Apache-2.0
def test_base_chart_plan_no_explanatory_fields(valid_chart_code): """Test BaseChartPlan doesn't have explanatory fields.""" instance = BaseChartPlan(chart_type="scatter", imports=["import plotly.express as px"], chart_code=valid_chart_code) with pytest.raises(AttributeError): _ = instance.chart_insights with pytest.raises(AttributeError): _ = instance.code_explanation
Test BaseChartPlan doesn't have explanatory fields.
test_base_chart_plan_no_explanatory_fields
python
mckinsey/vizro
vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
https://github.com/mckinsey/vizro/blob/master/vizro-ai/tests/unit/vizro-ai/plot/test_response_model.py
Apache-2.0
def make_subheading(label, link): """Creates a subheading with a link to the docs.""" slug = label.replace(" ", "") heading = html.H3( html.Span( [ label, html.A( html.I(className="bi bi-book h4 ms-2"), href=f"{DBC_DOCS}{link}", target="_blank", id=f"tooltip_target_{slug}", ), ], ), ) return html.Div( [ heading, dbc.Tooltip(f"See {label} documentation", target=f"tooltip_target_{slug}"), ], className="mt-3", )
Creates a subheading with a link to the docs.
make_subheading
python
mckinsey/vizro
vizro-core/examples/bootstrap/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/bootstrap/app.py
Apache-2.0
def scatter_with_line(data_frame, x, y, hline=None, title=None): """Custom scatter chart based on px.""" fig = px.scatter(data_frame=data_frame, x=x, y=y, title=title) fig.add_hline(y=hline, line_color="orange") return fig
Custom scatter chart based on px.
scatter_with_line
python
mckinsey/vizro
vizro-core/examples/dev/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/dev/app.py
Apache-2.0
def waterfall(data_frame, measure, x, y, text, title=None): """Custom waterfall chart based on go.""" fig = go.Figure() fig.add_traces( go.Waterfall( measure=data_frame[measure], x=data_frame[x], y=data_frame[y], text=data_frame[text], decreasing={"marker": {"color": "#ff5267"}}, increasing={"marker": {"color": "#08bdba"}}, totals={"marker": {"color": "#00b4ff"}}, ) ) fig.update_layout(title=title) return fig
Custom waterfall chart based on go.
waterfall
python
mckinsey/vizro
vizro-core/examples/dev/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/dev/app.py
Apache-2.0
def my_custom_table(data_frame=None, chosen_columns: Optional[list[str]] = None): """Custom table with added logic to filter on chosen columns.""" columns = [{"name": i, "id": i} for i in chosen_columns] defaults = { "style_as_list_view": True, "style_data": {"border_bottom": "1px solid var(--border-subtleAlpha01)", "height": "40px"}, "style_header": { "border_bottom": "1px solid var(--stateOverlays-selectedHover)", "border_top": "None", "height": "32px", }, } return dash_table.DataTable(data=data_frame.to_dict("records"), columns=columns, **defaults)
Custom table with added logic to filter on chosen columns.
my_custom_table
python
mckinsey/vizro
vizro-core/examples/dev/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/dev/app.py
Apache-2.0
def build(self): """Extend existing component by calling the super build and update properties.""" range_slider_build_obj = super().build() range_slider_build_obj[self.id].allowCross = False range_slider_build_obj[self.id].tooltip = {"always_visible": True, "placement": "bottom"} return range_slider_build_obj
Extend existing component by calling the super build and update properties.
build
python
mckinsey/vizro
vizro-core/examples/dev/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/dev/app.py
Apache-2.0
def multiple_cards(data_frame: pd.DataFrame, n_rows: Optional[int] = 1) -> html.Div: """Creates a list with a variable number of `vm.Card` components from the provided data_frame. Args: data_frame: Data frame containing the data. n_rows: Number of rows to use from the data_frame. Defaults to 1. Returns: html.Div with a list of dbc.Card objects generated from the data. """ texts = data_frame.head(n_rows)["text"] return html.Div( [dbc.Card(dcc.Markdown(f"### Card #{i}\n{text}")) for i, text in enumerate(texts, 1)], className="multiple-cards-container", )
Creates a list with a variable number of `vm.Card` components from the provided data_frame. Args: data_frame: Data frame containing the data. n_rows: Number of rows to use from the data_frame. Defaults to 1. Returns: html.Div with a list of dbc.Card objects generated from the data.
multiple_cards
python
mckinsey/vizro
vizro-core/examples/dev/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/dev/app.py
Apache-2.0
def bar_mean(data_frame, x, y): """Creates a custom bar chart with aggregated data (mean).""" df_agg = data_frame.groupby(x).agg({y: "mean"}).reset_index() fig = px.bar(df_agg, x=x, y=y, labels={"tip": "Average Tip ($)"}) fig.update_traces(width=0.6) return fig
Creates a custom bar chart with aggregated data (mean).
bar_mean
python
mckinsey/vizro
vizro-core/examples/tutorial/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/tutorial/app.py
Apache-2.0
def make_chart_card(page: Union[vm.Page, IncompletePage]) -> vm.Card: """Makes a card with svg icon, linked to the right page if page is complete. Args: page: page to make card for Returns: card with svg icon, linked to the right page if page is complete. """ # There's one SVG per chart title, so that e.g. pages distribution-butterfly and deviation-butterfly, which both # have title "Butterfly", correspond to butterfly.svg. # Incomplete pages have page.path = "" so won't be linked to here. svg_name = page.title.lower().replace(" ", "-") return vm.Card( text=f""" ![](assets/images/charts/{svg_name}.svg#chart-icon) #### {page.title} """, href=page.path, )
Makes a card with svg icon, linked to the right page if page is complete. Args: page: page to make card for Returns: card with svg icon, linked to the right page if page is complete.
make_chart_card
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/app.py
Apache-2.0
def make_homepage_container(chart_group: ChartGroup) -> vm.Container: """Makes a container with cards for each completed and incomplete chart in chart_group. Args: chart_group: group of charts to make container for. Returns: container with cards for each chart in chart_group. """ # Pages are sorted in title's alphabetical order and deduplicated so that e.g. pages distribution-butterfly and # deviation-butterfly, which both have title "Butterfly", correspond to a single card. return vm.Container( title=chart_group.name, layout=vm.Grid(grid=[[0, 1, 1]], col_gap="40px"), components=[ Markdown(text=chart_group.intro_text, classname="intro-text"), vm.Container( layout=vm.Flex(direction="row", wrap=True), components=[ make_chart_card(page) for page in sorted( _remove_duplicates(chart_group.pages + chart_group.incomplete_pages), key=lambda page: page.title, ) ], ), ], )
Makes a container with cards for each completed and incomplete chart in chart_group. Args: chart_group: group of charts to make container for. Returns: container with cards for each chart in chart_group.
make_homepage_container
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/app.py
Apache-2.0
def make_navlink(chart_group: ChartGroup) -> vm.NavLink: """Makes a navlink with icon and links to every complete page within chart_group. Args: chart_group: chart_group to make a navlink for. Returns: navlink for chart_group. """ # Pages are sorted in alphabetical order within each chart group. return vm.NavLink( label=chart_group.name, pages={chart_group.name: [page.id for page in sorted(chart_group.pages, key=lambda page: page.title)]}, icon=chart_group.icon, )
Makes a navlink with icon and links to every complete page within chart_group. Args: chart_group: chart_group to make a navlink for. Returns: navlink for chart_group.
make_navlink
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/app.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/app.py
Apache-2.0
def butterfly(data_frame: pd.DataFrame, **kwargs) -> go.Figure: """Creates a butterfly chart based on px.bar. A butterfly chart is a type of bar chart where two sets of bars are displayed back-to-back, often used to compare two sets of data. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Butterfly chart. """ fig = px.bar(data_frame, **kwargs) orientation = fig.data[0].orientation x_or_y = "x" if orientation == "h" else "y" # Create new x or y axis with scale reversed (so going from 0 at the midpoint outwards) to do back-to-back bars. fig.update_traces({f"{x_or_y}axis": f"{x_or_y}2"}, selector=1) fig.update_layout({f"{x_or_y}axis2": fig.layout[f"{x_or_y}axis"]}) fig.update_layout( {f"{x_or_y}axis": {"autorange": "reversed", "domain": [0, 0.5]}, f"{x_or_y}axis2": {"domain": [0.5, 1]}} ) if orientation == "h": fig.add_vline(x=0, line_width=2, line_color="grey") else: fig.add_hline(y=0, line_width=2, line_color="grey") return fig
Creates a butterfly chart based on px.bar. A butterfly chart is a type of bar chart where two sets of bars are displayed back-to-back, often used to compare two sets of data. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Butterfly chart.
butterfly
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def sankey(data_frame: pd.DataFrame, source: str, target: str, value: str, labels: list[str]) -> go.Figure: """Creates a Sankey chart based on go.Sankey. A Sankey chart is a type of flow diagram where the width of the arrows is proportional to the flow rate. It is used to visualize the flow of resources or data between different stages or categories. For detailed information on additional parameters and customization, refer to the Plotly documentation: https://plotly.com/python/reference/sankey/ Args: data_frame: DataFrame for the chart. source: The name of the column in data_frame for source nodes. target: The name of the column in data_frame for target nodes. value: The name of the column in data_frame for the values representing the flow between nodes. labels: A list of labels for the nodes. Returns: go.Figure: Sankey chart. """ return go.Figure( data=go.Sankey( node={ "pad": 16, "thickness": 16, "label": labels, }, link={ "source": data_frame[source], "target": data_frame[target], "value": data_frame[value], "label": labels, "color": "rgba(205, 209, 228, 0.4)", }, ), layout={"barmode": "relative"}, )
Creates a Sankey chart based on go.Sankey. A Sankey chart is a type of flow diagram where the width of the arrows is proportional to the flow rate. It is used to visualize the flow of resources or data between different stages or categories. For detailed information on additional parameters and customization, refer to the Plotly documentation: https://plotly.com/python/reference/sankey/ Args: data_frame: DataFrame for the chart. source: The name of the column in data_frame for source nodes. target: The name of the column in data_frame for target nodes. value: The name of the column in data_frame for the values representing the flow between nodes. labels: A list of labels for the nodes. Returns: go.Figure: Sankey chart.
sankey
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def column_and_line( data_frame: pd.DataFrame, x: Union[str, pd.Series, list[str], list[pd.Series]], y_column: Union[str, pd.Series, list[str], list[pd.Series]], y_line: Union[str, pd.Series, list[str], list[pd.Series]], ) -> go.Figure: """Creates a combined column and line chart based on px.bar and px.line. This function generates a chart with a bar graph for one variable (y-axis 1) and a line graph for another variable (y-axis 2), sharing the same x-axis. The y-axes for the bar and line graphs are synchronized and overlaid. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. x: Either a name of a column in data_frame, or a pandas Series or array_like object. y_column: Either a name of a column in data_frame, or a pandas Series or array_like object. y_line: Either a name of a column in data_frame, or a pandas Series or array_like object. Returns: go.Figure: Combined column and line chart. """ # We use px.bar and px.line so that we get the plotly express hoverdata, axes titles etc. Bar is used arbitrarily # selected as the "base" plot and then line added on top of it. This means manually incrementing # color_discrete_sequence for the line plot so that the colors are not the same for bar and line. bar = px.bar(data_frame, x=x, y=y_column) fig = make_subplots(figure=bar, specs=[[{"secondary_y": True}]]) line = px.line( data_frame, x=x, y=y_line, markers=True, color_discrete_sequence=fig.layout.template.layout.colorway[len(bar.data) :], ) for trace in line.data: fig.add_trace(trace, secondary_y=True) fig.update_layout(yaxis2={"tickmode": "sync", "overlaying": "y", "title": line.layout.yaxis.title}) return fig
Creates a combined column and line chart based on px.bar and px.line. This function generates a chart with a bar graph for one variable (y-axis 1) and a line graph for another variable (y-axis 2), sharing the same x-axis. The y-axes for the bar and line graphs are synchronized and overlaid. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. x: Either a name of a column in data_frame, or a pandas Series or array_like object. y_column: Either a name of a column in data_frame, or a pandas Series or array_like object. y_line: Either a name of a column in data_frame, or a pandas Series or array_like object. Returns: go.Figure: Combined column and line chart.
column_and_line
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def categorical_column(data_frame: pd.DataFrame, **kwargs) -> go.Figure: """Creates categorical bar chart based on px.bar. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Categorical column chart. """ fig = px.bar(data_frame, **kwargs) # So ticks are aligned with bars when xaxes values are numbers (e.g. years) fig.update_xaxes(type="category") return fig
Creates categorical bar chart based on px.bar. Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Categorical column chart.
categorical_column
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def waterfall(data_frame: pd.DataFrame, x: str, y: str, measure: list[str]) -> go.Figure: """Creates a waterfall chart based on go.Waterfall. A Waterfall chart visually breaks down the cumulative effect of sequential positive and negative values, showing how each value contributes to the total. For additional parameters and customization options, see the Plotly documentation: https://plotly.com/python/reference/waterfall/ Args: data_frame: TDataFrame for the chart. x: Column name in data_frame for x-axis values. y: Column name in data_frame for y-axis values. measure: List specifying the type of each bar, can be "relative", "total", or "absolute". Returns: go.Figure: Waterfall chart. """ return go.Figure( data=go.Waterfall(x=data_frame[x], y=data_frame[y], measure=data_frame[measure]), layout={"showlegend": False}, )
Creates a waterfall chart based on go.Waterfall. A Waterfall chart visually breaks down the cumulative effect of sequential positive and negative values, showing how each value contributes to the total. For additional parameters and customization options, see the Plotly documentation: https://plotly.com/python/reference/waterfall/ Args: data_frame: TDataFrame for the chart. x: Column name in data_frame for x-axis values. y: Column name in data_frame for y-axis values. measure: List specifying the type of each bar, can be "relative", "total", or "absolute". Returns: go.Figure: Waterfall chart.
waterfall
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def radar(data_frame: pd.DataFrame, **kwargs) -> go.Figure: """Creates a radar chart based on px.line_polar. A radar chart is a type of data visualization in which there are three or more variables represented on axes that originate from the same central point. Args: data_frame: DataFrame for the chart. **kwargs: Keyword arguments to pass into px.line_polar (e.g. r, theta). See https://plotly.com/python-api-reference/generated/plotly.express.line_polar.html. Returns: go.Figure: A Plotly Figure object of the radar chart. """ fig = px.line_polar(data_frame, **kwargs) fig.update_traces(fill="toself") return fig
Creates a radar chart based on px.line_polar. A radar chart is a type of data visualization in which there are three or more variables represented on axes that originate from the same central point. Args: data_frame: DataFrame for the chart. **kwargs: Keyword arguments to pass into px.line_polar (e.g. r, theta). See https://plotly.com/python-api-reference/generated/plotly.express.line_polar.html. Returns: go.Figure: A Plotly Figure object of the radar chart.
radar
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def dumbbell(data_frame: pd.DataFrame, **kwargs) -> go.Figure: """Creates a dumbbell chart based on px.scatter. A dumbbell plot is a type of dot plot where the points, displaying different groups, are connected with a straight line. They are ideal for illustrating differences or gaps between two points. Inspired by: https://community.plotly.com/t/how-to-make-dumbbell-plots-in-plotly-python/47762 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.scatter (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.scatter.html. Returns: go.Figure: Dumbbell chart. """ fig = px.scatter(data_frame, **kwargs) orientation = fig.data[0].orientation x_or_y = "x" if orientation == "h" else "y" y_or_x = "y" if orientation == "h" else "x" # Add lines between every pair of points. for x_or_y_0, x_or_y_1, y_or_x_0, y_or_x_1 in zip( fig.data[0][x_or_y], fig.data[1][x_or_y], fig.data[0][y_or_x], fig.data[1][y_or_x], ): fig.add_shape( **{f"{x_or_y}0": x_or_y_0, f"{x_or_y}1": x_or_y_1, f"{y_or_x}0": y_or_x_0, f"{y_or_x}1": y_or_x_1}, type="line", layer="below", line_color="grey", line_width=3, ) fig.update_traces(marker_size=12) return fig
Creates a dumbbell chart based on px.scatter. A dumbbell plot is a type of dot plot where the points, displaying different groups, are connected with a straight line. They are ideal for illustrating differences or gaps between two points. Inspired by: https://community.plotly.com/t/how-to-make-dumbbell-plots-in-plotly-python/47762 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.scatter (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.scatter.html. Returns: go.Figure: Dumbbell chart.
dumbbell
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def diverging_stacked_bar(data_frame: pd.DataFrame, **kwargs) -> go.Figure: """Creates a diverging stacked bar chart based on px.bar. This type of chart is a variant of the standard stacked bar chart, with bars aligned on a central baseline to show both positive and negative values. Each bar is segmented to represent different categories. This function is not suitable for diverging stacked bar charts that include a neutral category. The first half of bars plotted are assumed to be negative ("Disagree") and the second half are assumed to be positive ("Agree"). Inspired by: https://community.plotly.com/t/need-help-in-making-diverging-stacked-bar-charts/34023 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Diverging stacked bar chart. """ fig = px.bar(data_frame, **kwargs) # Fix legend position according to the order of traces. This ensures that "Strongly disagree" comes before # "Disagree". for i, trace in enumerate(fig.data): trace.update(legendrank=i) if "color_discrete_sequence" not in kwargs and "color_discrete_map" not in kwargs: # Make a discrete diverging colorscale by sampling the right number of colors. # Need to explicitly convert colorscale to list of lists due to plotly bug/inconsistency: # https://github.com/plotly/plotly.py/issues/4808 colorscale = [list(x) for x in fig.layout.template.layout.colorscale.diverging] colors = px.colors.sample_colorscale(colorscale, len(fig.data), 0.2, 0.8) for trace, color in zip(fig.data, colors): trace.update(marker_color=color) # Plotly draws traces in order they appear in fig.data, starting from x=0 and then stacking outwards. # We need negative traces to be ordered so that "Disagree" comes before "Strongly disagree", so reverse the # order of first half of traces. mutable_traces = list(fig.data) mutable_traces[: len(fig.data) // 2] = reversed(fig.data[: len(fig.data) // 2]) fig.data = mutable_traces # Create new x or y axis with scale reversed (so going from 0 at the midpoint outwards) to do negative bars. orientation = fig.data[0].orientation x_or_y = "x" if orientation == "h" else "y" for trace_idx in range(len(fig.data) // 2, len(fig.data)): fig.update_traces({f"{x_or_y}axis": f"{x_or_y}2"}, selector=trace_idx) # Add ticksuffix and range limitations on both sids for correct interpretation of diverging stacked bar # with percentage data fig.update_layout({f"{x_or_y}axis": {"ticksuffix": "%"}}) fig.update_layout({f"{x_or_y}axis2": fig.layout[f"{x_or_y}axis"]}) fig.update_layout( { f"{x_or_y}axis": {"domain": [0, 0.5], "range": [100, 0]}, f"{x_or_y}axis2": {"domain": [0.5, 1], "range": [0, 100]}, } ) if orientation == "h": fig.add_vline(x=0, line_width=2, line_color="grey") else: fig.add_hline(y=0, line_width=2, line_color="grey") return fig
Creates a diverging stacked bar chart based on px.bar. This type of chart is a variant of the standard stacked bar chart, with bars aligned on a central baseline to show both positive and negative values. Each bar is segmented to represent different categories. This function is not suitable for diverging stacked bar charts that include a neutral category. The first half of bars plotted are assumed to be negative ("Disagree") and the second half are assumed to be positive ("Agree"). Inspired by: https://community.plotly.com/t/need-help-in-making-diverging-stacked-bar-charts/34023 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.bar (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.express.bar.html. Returns: go.Figure: Diverging stacked bar chart.
diverging_stacked_bar
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def lollipop(data_frame: pd.DataFrame, **kwargs): """Creates a lollipop based on px.scatter. A lollipop chart is a variation of a bar chart where each data point is represented by a line and a dot at the end to mark the value. Inspired by: https://towardsdatascience.com/lollipop-dumbbell-charts-with-plotly-696039d5f85 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.scatter (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.scatter.html. Returns: go.Figure: Lollipop chart. """ # Plots the dots of the lollipop chart fig = px.scatter(data_frame, **kwargs) # Enables the orientation of the chart to be either horizontal or vertical orientation = fig.data[0].orientation x_or_y = "x" if orientation == "h" else "y" y_or_x = "y" if orientation == "h" else "x" # Plots the lines of the lollipop chart for x_or_y_value, y_or_x_value in zip(fig.data[0][x_or_y], fig.data[0][y_or_x]): fig.add_trace(go.Scatter({x_or_y: [0, x_or_y_value], y_or_x: [y_or_x_value, y_or_x_value], "mode": "lines"})) # Styles the lollipop chart and makes it uni-colored fig.update_traces( marker_size=12, line_width=3, line_color=fig.layout.template.layout.colorway[0], ) fig.update_layout( { "showlegend": False, f"{x_or_y}axis_showgrid": True, f"{y_or_x}axis_showgrid": False, f"{x_or_y}axis_rangemode": "tozero", }, ) return fig
Creates a lollipop based on px.scatter. A lollipop chart is a variation of a bar chart where each data point is represented by a line and a dot at the end to mark the value. Inspired by: https://towardsdatascience.com/lollipop-dumbbell-charts-with-plotly-696039d5f85 Args: data_frame: DataFrame for the chart. Can be long form or wide form. See https://plotly.com/python/wide-form/. **kwargs: Keyword arguments to pass into px.scatter (e.g. x, y, labels). See https://plotly.com/python-api-reference/generated/plotly.scatter.html. Returns: go.Figure: Lollipop chart.
lollipop
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_charts.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_charts.py
Apache-2.0
def build(self): """Returns the code clipboard component inside an accordion.""" markdown_code = "\n".join([f"```{self.language}", self.code, "```"]) pycafe_link = dbc.Button( [ "Edit code live on PyCafe", html.Span("open_in_new", className="material-symbols-outlined open-in-new"), ], href=f"https://py.cafe/snippet/vizro/v1#code={quote(self.code)}", target="_blank", class_name="pycafe-link", ) return html.Div( [ pycafe_link if self.mode == "vizro" else None, dcc.Clipboard(target_id=self.id, className="code-clipboard"), dcc.Markdown(markdown_code, id=self.id), ], className="code-clipboard-container", )
Returns the code clipboard component inside an accordion.
build
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_components.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_components.py
Apache-2.0
def build(self): """Returns a markdown component with an optional classname.""" return dcc.Markdown( id=self.id, children=self.text, dangerously_allow_html=False, className=self.classname, link_target="_blank" )
Returns a markdown component with an optional classname.
build
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/custom_components.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/custom_components.py
Apache-2.0
def butterfly_factory(group: str): """Reusable function to create the page content for the butterfly chart with a unique ID.""" return vm.Page( id=f"{group}-butterfly", path=f"{group}/butterfly", title="Butterfly", layout=vm.Grid(grid=PAGE_GRID), components=[ vm.Card( text=""" #### What is a butterfly chart? A butterfly chart (also called a tornado chart) is a bar chart for displaying two sets of data series side by side. &nbsp; #### When should I use it? Use a butterfly chart when you wish to emphasize the comparison between two data sets sharing the same parameters. Sharing this chart with your audience will help them see at a glance how two groups differ within the same parameters. You can also **stack** two bars on each side to divide your categories. """ ), vm.Graph(figure=butterfly.fig), vm.Tabs( tabs=[ vm.Container( title="Vizro dashboard", components=[make_code_clipboard_from_py_file("butterfly.py", mode="vizro")], ), vm.Container( title="Plotly figure", components=[make_code_clipboard_from_py_file("butterfly.py", mode="plotly")], ), ] ), ], )
Reusable function to create the page content for the butterfly chart with a unique ID.
butterfly_factory
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_factories.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_factories.py
Apache-2.0
def connected_scatter_factory(group: str): """Reusable function to create the page content for the column chart with a unique ID.""" return vm.Page( id=f"{group}-connected-scatter", path=f"{group}/connected-scatter", title="Connected scatter", layout=vm.Grid(grid=PAGE_GRID), components=[ vm.Card( text=""" #### What is a connected scatter chart? A connected scatter chart visualizes two variables (x and y) using dots, with lines connecting the dots in the order of the data points. One variable is plotted along the x-axis and the other along the y-axis, showing both the relationship and a sequence of the data. &nbsp; #### When should I use it? Use connected scatter charts to show the relationship between two variables and the sequence of data points. They are ideal for paired numerical data, helping to reveal trends and patterns over time or in a specific order. Remember, correlation is not causation, so ensure your audience understands this to avoid misinterpretation. """ ), vm.Graph(figure=connected_scatter.fig), vm.Tabs( tabs=[ vm.Container( title="Vizro dashboard", components=[make_code_clipboard_from_py_file("connected_scatter.py", mode="vizro")], ), vm.Container( title="Plotly figure", components=[make_code_clipboard_from_py_file("connected_scatter.py", mode="plotly")], ), ] ), ], )
Reusable function to create the page content for the column chart with a unique ID.
connected_scatter_factory
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_factories.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_factories.py
Apache-2.0
def column_and_line_factory(group: str): """Reusable function to create the page content for the column+line chart with a unique ID.""" return vm.Page( id=f"{group}-column-and-line", path=f"{group}/column-and-line", title="Column and line", layout=vm.Grid(grid=PAGE_GRID), components=[ vm.Card( text=""" #### What is a column and line chart? A combined column and line chart helps you demonstrate the relationship between an amount (displayed in columns) and a trend or rate (displayed as a line running across the columns). &nbsp; #### When should I use it? Use this type of chart when you wish to compare quantities of one item with changes in another item. It's ideal for showing patterns over time (e.g., monthly sales and growth rates) but can also be used for other types of data comparisons. """ ), vm.Graph(figure=column_and_line.fig), vm.Tabs( tabs=[ vm.Container( title="Vizro dashboard", components=[make_code_clipboard_from_py_file("column_and_line.py", mode="vizro")], ), vm.Container( title="Plotly figure", components=[make_code_clipboard_from_py_file("column_and_line.py", mode="plotly")], ), ] ), ], )
Reusable function to create the page content for the column+line chart with a unique ID.
column_and_line_factory
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_factories.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_factories.py
Apache-2.0
def waterfall_factory(group: str): """Reusable function to create the page content for the column chart with a unique ID.""" return vm.Page( id=f"{group}-waterfall", path=f"{group}/waterfall", title="Waterfall", layout=vm.Grid(grid=PAGE_GRID), components=[ vm.Card( text=""" #### What is a waterfall chart? A waterfall chart is a bar chart that shows the cumulative effect of sequential positive or negative values. It starts with an initial value, displays individual changes as steps, and ends with the final total. &nbsp; #### When should I use it? Use a waterfall chart to visualize how individual factors contribute to a total, such as changes in revenue or costs by category. It helps you understand the incremental impact of each factor, making data analysis and interpretation easier. Ensure all bars and changes are clearly labeled, use consistent colors for positive and negative values, and arrange categories logically to tell a coherent story. """ ), vm.Graph(figure=waterfall.fig), vm.Tabs( tabs=[ vm.Container( title="Vizro dashboard", components=[make_code_clipboard_from_py_file("waterfall.py", mode="vizro")], ), vm.Container( title="Plotly figure", components=[make_code_clipboard_from_py_file("waterfall.py", mode="plotly")], ), ] ), ], )
Reusable function to create the page content for the column chart with a unique ID.
waterfall_factory
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_factories.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_factories.py
Apache-2.0
def lollipop_factory(group: str): """Reusable function to create the page content for the lollipop chart with a unique ID.""" return vm.Page( id=f"{group}-lollipop", path=f"{group}/lollipop", title="Lollipop", layout=vm.Grid(grid=PAGE_GRID), components=[ vm.Card( text=""" #### What is a lollipop chart? A lollipop chart is a variation of a bar chart where each data point is represented by a line and a dot at the end to mark the value. It functions like a bar chart but offers a cleaner visual, especially useful when dealing with a large number of high values, to avoid the clutter of tall columns. However, it can be less precise due to the difficulty in judging the exact center of the circle. &nbsp; #### When should I use it? Use a lollipop chart to compare values across categories, especially when dealing with many high values. It highlights differences and trends clearly without the visual bulk of a bar chart. Ensure clarity by limiting categories, using consistent scales, and clearly labeling axes. Consider alternatives if precise value representation is crucial. """ ), vm.Graph(figure=lollipop.fig), vm.Tabs( tabs=[ vm.Container( title="Vizro dashboard", components=[make_code_clipboard_from_py_file("lollipop.py", mode="vizro")], ), vm.Container( title="Plotly figure", components=[make_code_clipboard_from_py_file("lollipop.py", mode="plotly")], ), ] ), ], )
Reusable function to create the page content for the lollipop chart with a unique ID.
lollipop_factory
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_factories.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_factories.py
Apache-2.0
def _format_and_lint(code_string: str, line_length: int) -> str: """Inspired by vizro.models._base._format_and_lint. The only difference is that this does isort too.""" # Tracking https://github.com/astral-sh/ruff/issues/659 for proper Python API # Good example: https://github.com/astral-sh/ruff/issues/8401#issuecomment-1788806462 # While we wait for the API, we can use autoflake and black to process code strings # Isort is needed since otherwise example code looks quite strange sometimes. Autoflake is needed since isort can't # remove imports by itself: https://github.com/PyCQA/isort/issues/1105. removed_imports = autoflake.fix_code(code_string, remove_all_unused_imports=True) sorted_imports = isort.code(removed_imports) # Black doesn't yet have a Python API, so format_str might not work at some point in the future. # https://black.readthedocs.io/en/stable/faq.html#does-black-have-an-api formatted = black.format_str(sorted_imports, mode=black.Mode(line_length=line_length)) return formatted
Inspired by vizro.models._base._format_and_lint. The only difference is that this does isort too.
_format_and_lint
python
mckinsey/vizro
vizro-core/examples/visual-vocabulary/pages/_pages_utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/examples/visual-vocabulary/pages/_pages_utils.py
Apache-2.0
def __init__(self, **kwargs): """Initializes Dash app, stored in `self.dash`. Args: **kwargs : Passed through to `Dash.__init__`, e.g. `assets_folder`, `url_base_pathname`. See [Dash documentation](https://dash.plotly.com/reference#dash.dash) for possible arguments. """ # Set suppress_callback_exceptions=True for the following reasons: # 1. Prevents the following Dash exception when using html.Div as placeholders in build methods: # "Property 'cellClicked' was used with component ID '__input_ag_grid_id' in one of the Input # items of a callback. This ID is assigned to a dash_html_components.Div component in the layout, # which does not support this property." # 2. Improves performance by bypassing layout validation. self.dash = dash.Dash( **kwargs, pages_folder="", # TODO: Considering removing the call to suppress_callback_exceptions once vm.Table is deprecated and # we've confirmed that all initialized pages not included in the Dashboard no longer trigger console errors. # See above note for why we might want to keep it though. suppress_callback_exceptions=True, title="Vizro", use_pages=True, ) # When Vizro is used as a framework, we want to include the library and framework resources. # Dash serves resources in the order 1. external_stylesheets/scripts; 2. library resources from the # ComponentRegistry; 3. resources added by append_css/scripts. # Vizro library resources are already present thanks to ComponentRegistry.registry.add("vizro") in # __init__.py. However, since Dash serves these before those added below it means that vizro-bootstrap.css would # be served *after* Vizro library's figures.css. We always want vizro-bootstrap.css to be served first # so that it can be overridden. For pure Dash users this is achieved vizro-bootstrap.css is supplied as an # external_stylesheet. We could add vizro-bootstrap.css as an external_stylesheet here but it is awkward # because it means providing href="_dash-component-suite/..." or using the external_url. Instead we remove # Vizro as a component library and then just serve all the resources again. ValueError is suppressed so that # repeated calls to Vizro() don't give an error. with suppress(ValueError): ComponentRegistry.registry.discard("vizro") # vizro-bootstrap.min.css must be first so that it can be overridden, e.g. by bootstrap_overrides.css. # After that, all other items are sorted alphabetically. for path in sorted( VIZRO_ASSETS_PATH.rglob("*.*"), key=lambda file: (file.name != "vizro-bootstrap.min.css", file) ): if path.suffix == ".css": self.dash.css.append_css(_make_resource_spec(path)) elif path.suffix == ".js": self.dash.scripts.append_script(_make_resource_spec(path)) else: # map files and fonts and images. These are treated like scripts since this is how Dash handles them. # This adds paths to self.dash.registered_paths so that they can be accessed without throwing an # error in dash._validate.validate_js_path. self.dash.scripts.append_script(_make_resource_spec(path)) data_manager.cache.init_app(self.dash.server)
Initializes Dash app, stored in `self.dash`. Args: **kwargs : Passed through to `Dash.__init__`, e.g. `assets_folder`, `url_base_pathname`. See [Dash documentation](https://dash.plotly.com/reference#dash.dash) for possible arguments.
__init__
python
mckinsey/vizro
vizro-core/src/vizro/_vizro.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/_vizro.py
Apache-2.0
def build(self, dashboard: Dashboard): """Builds the `dashboard`. Args: dashboard (Dashboard): [`Dashboard`][vizro.models.Dashboard] object. Returns: self: Vizro app """ # Set global chart template to vizro_light or vizro_dark. # The choice between these is generally meaningless because chart colors in the two are identical, and # everything else gets overridden in the clientside theme selector callback. # Note this setting of global template isn't undone anywhere. If we really wanted to then we could try and # put in some teardown code, but it would probably never be 100% reliable. Vizro._reset can't do this well # either because it's a staticmethod so can't access self.old_theme (though we could use a global variable to # store it). Remember this template setting can't go in run() though since it's needed even in deployment. # Probably the best solution if we do want to fix this would be to have two separate paths that are followed: # 1. In deployment (or just outside Jupyter?), set the theme here and never revert it. # 2. In other contexts, use context manager in run method. pio.templates.default = dashboard.theme # Note that model instantiation and pre_build are independent of Dash. self._pre_build() self.dash.layout = dashboard.build() # Add data-bs-theme attribute that is always present, even for pages without theme selector, # i.e. the Dash "Loading..." screen. bootstrap_theme = dashboard.theme.removeprefix("vizro_") self.dash.index_string = self.dash.index_string.replace("<html>", f"<html data-bs-theme='{bootstrap_theme}'>") # Note Dash.index uses self.dash.title instead of self.dash.config.title for backwards compatibility. if dashboard.title: self.dash.title = dashboard.title return self
Builds the `dashboard`. Args: dashboard (Dashboard): [`Dashboard`][vizro.models.Dashboard] object. Returns: self: Vizro app
build
python
mckinsey/vizro
vizro-core/src/vizro/_vizro.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/_vizro.py
Apache-2.0
def run(self, *args, **kwargs): # if type annotated, mkdocstring stops seeing the class """Runs the dashboard. Args: *args : Passed through to `dash.run`. **kwargs : Passed through to `dash.run`. """ data_manager._frozen_state = True model_manager._frozen_state = True if kwargs.get("processes", 1) > 1 and type(data_manager.cache.cache) is SimpleCache: warnings.warn( "`SimpleCache` is designed to support only single process environments. If you would like to use " "multiple processes then you should change to a cache that supports it such as `FileSystemCache` or " "`RedisCache`." ) self.dash.run(*args, **kwargs)
Runs the dashboard. Args: *args : Passed through to `dash.run`. **kwargs : Passed through to `dash.run`.
run
python
mckinsey/vizro
vizro-core/src/vizro/_vizro.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/_vizro.py
Apache-2.0
def _pre_build(): """Runs pre_build method on all models in the model_manager.""" # Note that a pre_build method can itself add a model (e.g. an Action) to the model manager, and so we need to # iterate through set(model_manager) rather than model_manager itself or we loop through something that # changes size. # Any models that are created during the pre-build process *will not* themselves have pre_build run on them. # In future may add a second pre_build loop after the first one. for filter in cast(Iterable[Filter], model_manager._get_models(Filter)): # Run pre_build on all filters first, then on all other models. This handles dependency between Filter # and Page pre_build and ensures that filters are pre-built before the Page objects that use them. # This is important because the Page pre_build method checks whether filters are dynamic or not, which is # defined in the filter's pre_build method. Also, the calculation of the data_frame Parameter targets # depends on the filter targets, so they should be pre-built after the filters as well. filter.pre_build() for model_id in set(model_manager): model = model_manager[model_id] if hasattr(model, "pre_build") and not isinstance(model, Filter): model.pre_build()
Runs pre_build method on all models in the model_manager.
_pre_build
python
mckinsey/vizro
vizro-core/src/vizro/_vizro.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/_vizro.py
Apache-2.0
def _reset(): """Private method that clears all state in the `Vizro` app. This deliberately does not clear the data manager cache - see comments in data_manager._clear for explanation. """ data_manager._clear() model_manager._clear() dash._callback.GLOBAL_CALLBACK_LIST = [] dash._callback.GLOBAL_CALLBACK_MAP = {} dash._callback.GLOBAL_INLINE_SCRIPTS = [] dash.page_registry.clear() dash._pages.CONFIG.clear() dash._pages.CONFIG.__dict__.clear() # To reset state to as if Vizro() hadn't been ran we need to make sure vizro is in the component # registry. This is a set so it's not possible to duplicate the entry. This handles the very edge case that # probably only occurs in our tests where someone does import vizro; Vizro(); Dash(), which means the Vizro # library components are no longer available. This would work correctly with import vizro; Vizro(); # Vizro.reset(); Dash(). ComponentRegistry.registry.add("vizro")
Private method that clears all state in the `Vizro` app. This deliberately does not clear the data manager cache - see comments in data_manager._clear for explanation.
_reset
python
mckinsey/vizro
vizro-core/src/vizro/_vizro.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/_vizro.py
Apache-2.0
def outputs(self) -> Union[list[_IdOrIdProperty], dict[str, _IdOrIdProperty]]: # type: ignore[override] """Must be defined by concrete action, even if there's no output. This should return a dictionary of the form `{"key": "dropdown.value"}`, where the key corresponds to the key in the dictionary returned by the action `function`, and the value `"dropdown.value"` is converted into `Output("dropdown", "value", allow_duplicate=True)`. """ # There should be no need to support dictionary IDs here. The only possible use is for pattern-matching IDs, but # that will probably only be needed for built-in inputs. export_data currently overrides transformed_outputs to # supply a dictionary ID but in future will probably change to use a single built-in vizro_download component. # See https://github.com/mckinsey/vizro/pull/1054#discussion_r1989405177. # # We should probably not build in behavior here e.g. to generate outputs automatically from certain reserved # arguments since this would only work well for class-based actions and not @capture("action") ones. Instead # the code that does make_outputs_from_targets would be put into a reusable function. # # TODO-AV2 D 4: build in a vizro_download component. At some point after that consider changing export_data to # use it, but that's not urgent. See https://github.com/mckinsey/vizro/pull/1054#discussion_r1989405177. pass
Must be defined by concrete action, even if there's no output. This should return a dictionary of the form `{"key": "dropdown.value"}`, where the key corresponds to the key in the dictionary returned by the action `function`, and the value `"dropdown.value"` is converted into `Output("dropdown", "value", allow_duplicate=True)`.
outputs
python
mckinsey/vizro
vizro-core/src/vizro/actions/_abstract_action.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_abstract_action.py
Apache-2.0
def _apply_filter_controls( data_frame: pd.DataFrame, ctds_filter: list[CallbackTriggerDict], target: ModelID ) -> pd.DataFrame: """Applies filters from a vm.Filter model in the controls. Args: data_frame: unfiltered DataFrame. ctds_filter: list of CallbackTriggerDict for filters. target: id of targeted Figure. Returns: filtered DataFrame. """ from vizro.actions._filter_action import _filter for ctd in ctds_filter: selector_value = ctd["value"] selector_value = selector_value if isinstance(selector_value, list) else [selector_value] selector_actions = _get_component_actions(model_manager[ctd["id"]]) for action in selector_actions: # TODO-AV2 A 1: simplify this as in # https://github.com/mckinsey/vizro/pull/1054/commits/f4c8c5b153f3a71b93c018e9f8c6f1b918ca52f6 if not isinstance(action, _filter) or target not in action.targets or ALL_OPTION in selector_value: continue mask = action.filter_function(data_frame[action.column], selector_value) data_frame = data_frame[mask] return data_frame
Applies filters from a vm.Filter model in the controls. Args: data_frame: unfiltered DataFrame. ctds_filter: list of CallbackTriggerDict for filters. target: id of targeted Figure. Returns: filtered DataFrame.
_apply_filter_controls
python
mckinsey/vizro
vizro-core/src/vizro/actions/_actions_utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_actions_utils.py
Apache-2.0
def _apply_filter_interaction( data_frame: pd.DataFrame, ctds_filter_interaction: list[dict[str, CallbackTriggerDict]], target: ModelID ) -> pd.DataFrame: """Applies filters from a filter_interaction. This will be removed in future when filter interactions are implemented using controls. Args: data_frame: unfiltered DataFrame. ctds_filter_interaction: structure containing CallbackTriggerDict for filter interactions. target: id of targeted Figure. Returns: filtered DataFrame. """ for ctd_filter_interaction in ctds_filter_interaction: triggered_model = model_manager[ctd_filter_interaction["modelID"]["id"]] data_frame = cast(FigureWithFilterInteractionType, triggered_model)._filter_interaction( data_frame=data_frame, target=target, ctd_filter_interaction=ctd_filter_interaction, ) return data_frame
Applies filters from a filter_interaction. This will be removed in future when filter interactions are implemented using controls. Args: data_frame: unfiltered DataFrame. ctds_filter_interaction: structure containing CallbackTriggerDict for filter interactions. target: id of targeted Figure. Returns: filtered DataFrame.
_apply_filter_interaction
python
mckinsey/vizro
vizro-core/src/vizro/actions/_actions_utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_actions_utils.py
Apache-2.0
def _get_target_dot_separated_strings(dot_separated_strings: list[str], target: ModelID, data_frame: bool) -> list[str]: """Filters list of dot separated strings to get just those relevant for a single target. Args: dot_separated_strings: list of dot separated strings that can be targeted by a vm.Parameter, e.g. ["target_name.data_frame.arg", "target_name.x"] target: id of targeted Figure. data_frame: whether to return only DataFrame parameters starting "data_frame." or only non-DataFrame parameters. Returns: List of dot separated strings for target. """ result = [] for dot_separated_string_with_target in dot_separated_strings: if dot_separated_string_with_target.startswith(f"{target}."): dot_separated_string = dot_separated_string_with_target.removeprefix(f"{target}.") # We only want data_frame parameters when data_frame = True. if dot_separated_string.startswith("data_frame.") == data_frame: result.append(dot_separated_string) return result
Filters list of dot separated strings to get just those relevant for a single target. Args: dot_separated_strings: list of dot separated strings that can be targeted by a vm.Parameter, e.g. ["target_name.data_frame.arg", "target_name.x"] target: id of targeted Figure. data_frame: whether to return only DataFrame parameters starting "data_frame." or only non-DataFrame parameters. Returns: List of dot separated strings for target.
_get_target_dot_separated_strings
python
mckinsey/vizro
vizro-core/src/vizro/actions/_actions_utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_actions_utils.py
Apache-2.0
def _get_parametrized_config( ctds_parameter: list[CallbackTriggerDict], target: ModelID, data_frame: bool ) -> dict[str, Any]: """Convert parameters into a keyword-argument dictionary. Args: ctds_parameter: list of CallbackTriggerDicts for vm.Parameter. target: id of targeted figure. data_frame: whether to return only DataFrame parameters starting "data_frame." or only non-DataFrame parameters. Returns: keyword-argument dictionary. """ from vizro.actions._parameter_action import _parameter if data_frame: # This entry is inserted (but will always be empty) even for static data so that the load/_multi_load calls # look identical for dynamic data with no arguments and static data. Note it's not possible to address nested # argument of data_frame like data_frame.x.y, just top-level ones like data_frame.x. config: dict[str, Any] = {"data_frame": {}} else: # TODO - avoid calling _captured_callable. Once we have done this we can remove _arguments from # CapturedCallable entirely. This might mean not being able to address nested parameters. config = deepcopy(cast(FigureType, model_manager[target]).figure._arguments) del config["data_frame"] for ctd in ctds_parameter: # TODO: needs to be refactored so that it is independent of implementation details parameter_value = ctd["value"] selector = cast(SelectorType, model_manager[ctd["id"]]) if hasattr(parameter_value, "__iter__") and ALL_OPTION in parameter_value: # type: ignore[operator] # Even if an option is provided as list[dict], the Dash component only returns a list of values. # So we need to ensure that we always return a list only as well to provide consistent types. parameter_value = [option["value"] if isinstance(option, dict) else option for option in selector.options] parameter_value = _validate_selector_value_none(parameter_value) # type: ignore[arg-type] for action in _get_component_actions(selector): # TODO-AV2 A 1: simplify this as in # https://github.com/mckinsey/vizro/pull/1054/commits/f4c8c5b153f3a71b93c018e9f8c6f1b918ca52f6 # Potentially this function would move to the filter_interaction action. That will be deprecated so # no need to worry too much if it doesn't work well, but we'll need to do something similar for the # new interaction functionality anyway. if not isinstance(action, _parameter): continue for dot_separated_string in _get_target_dot_separated_strings(action.targets, target, data_frame): config = _update_nested_figure_properties( figure_config=config, dot_separated_string=dot_separated_string, value=parameter_value ) return config
Convert parameters into a keyword-argument dictionary. Args: ctds_parameter: list of CallbackTriggerDicts for vm.Parameter. target: id of targeted figure. data_frame: whether to return only DataFrame parameters starting "data_frame." or only non-DataFrame parameters. Returns: keyword-argument dictionary.
_get_parametrized_config
python
mckinsey/vizro
vizro-core/src/vizro/actions/_actions_utils.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_actions_utils.py
Apache-2.0
def function(self, _controls: _Controls) -> dict[ModelID, Any]: """Applies _controls to charts on page once filter is applied. Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}. """ # This is identical to _on_page_load. # TODO-AV2 A 1: _controls is not currently used but instead taken out of the Dash context. This # will change in future once the structure of _controls has been worked out and we know how to pass ids through. # See https://github.com/mckinsey/vizro/pull/880 return _get_modified_page_figures( ctds_filter=ctx.args_grouping["external"]["_controls"]["filters"], ctds_parameter=ctx.args_grouping["external"]["_controls"]["parameters"], ctds_filter_interaction=ctx.args_grouping["external"]["_controls"]["filter_interaction"], targets=self.targets, )
Applies _controls to charts on page once filter is applied. Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}.
function
python
mckinsey/vizro
vizro-core/src/vizro/actions/_filter_action.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_filter_action.py
Apache-2.0
def _get_triggered_model(self) -> FigureWithFilterInteractionType: # type: ignore[return] """Gets the model that triggers the action with "action_id".""" # In future we should have a better way of doing this: # - maybe through the model manager # - pass trigger into callback as a built-in keyword # - maybe need to be able to define inputs property for actions that subclass _AbstractAction for actions_chain in cast(Iterable[ActionsChain], model_manager._get_models(ActionsChain)): if self in actions_chain.actions: return model_manager[actions_chain.trigger.component_id]
Gets the model that triggers the action with "action_id".
_get_triggered_model
python
mckinsey/vizro
vizro-core/src/vizro/actions/_filter_interaction.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_filter_interaction.py
Apache-2.0
def function(self, _controls: _Controls) -> dict[ModelID, Any]: """Applies _controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}. """ # TODO-AV2 A 1: _controls is not currently used but instead taken out of the Dash context. This # will change in future once the structure of _controls has been worked out and we know how to pass ids through. # See https://github.com/mckinsey/vizro/pull/880 return _get_modified_page_figures( ctds_filter=ctx.args_grouping["external"]["_controls"]["filters"], ctds_parameter=ctx.args_grouping["external"]["_controls"]["parameters"], ctds_filter_interaction=ctx.args_grouping["external"]["_controls"]["filter_interaction"], targets=self.targets, )
Applies _controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}.
function
python
mckinsey/vizro
vizro-core/src/vizro/actions/_filter_interaction.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_filter_interaction.py
Apache-2.0
def function(self, _controls: _Controls) -> dict[ModelID, Any]: """Applies controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}. """ # TODO-AV2 A 1: _controls is not currently used but instead taken out of the Dash context. This # will change in future once the structure of _controls has been worked out and we know how to pass ids through. # See https://github.com/mckinsey/vizro/pull/880 return _get_modified_page_figures( ctds_filter=ctx.args_grouping["external"]["_controls"]["filters"], ctds_parameter=ctx.args_grouping["external"]["_controls"]["parameters"], ctds_filter_interaction=ctx.args_grouping["external"]["_controls"]["filter_interaction"], targets=self.targets, )
Applies controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}.
function
python
mckinsey/vizro
vizro-core/src/vizro/actions/_on_page_load.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_on_page_load.py
Apache-2.0
def function(self, _controls: _Controls) -> dict[ModelID, Any]: """Applies _controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}. """ # This is identical to _on_page_load but with self._target_ids rather than self.targets. # TODO-AV2 A 1: _controls is not currently used but instead taken out of the Dash context. This # will change in future once the structure of _controls has been worked out and we know how to pass ids through. # See https://github.com/mckinsey/vizro/pull/880 return _get_modified_page_figures( ctds_filter=ctx.args_grouping["external"]["_controls"]["filters"], ctds_parameter=ctx.args_grouping["external"]["_controls"]["parameters"], ctds_filter_interaction=ctx.args_grouping["external"]["_controls"]["filter_interaction"], targets=self._target_ids, )
Applies _controls to charts on page once the page is opened (or refreshed). Returns: Dict mapping target chart ids to modified figures e.g. {"my_scatter": Figure(...)}.
function
python
mckinsey/vizro
vizro-core/src/vizro/actions/_parameter_action.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_parameter_action.py
Apache-2.0
def _build_actions_models(): """Builds a callback for each `Action` model and returns required components for these callbacks. Returns: List of required components for each `Action` in the `Dashboard` e.g. list[dcc.Download] """ return html.Div( [action.build() for action in cast(Iterable[_BaseAction], model_manager._get_models(_BaseAction))], id="app_action_models_components_div", hidden=True, )
Builds a callback for each `Action` model and returns required components for these callbacks. Returns: List of required components for each `Action` in the `Dashboard` e.g. list[dcc.Download]
_build_actions_models
python
mckinsey/vizro
vizro-core/src/vizro/actions/_action_loop/_action_loop.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_action_loop/_action_loop.py
Apache-2.0
def _build_action_loop_callbacks() -> None: """Creates all required dash callbacks for the action loop.""" # actions_chain and actions are not iterated over multiple times so conversion to list is not technically needed, # but it prevents future bugs and matches _get_action_loop_components. actions_chains: list[ActionsChain] = list(model_manager._get_models(ActionsChain)) actions: list[_BaseAction] = list(model_manager._get_models(_BaseAction)) if not actions_chains: return gateway_inputs: list[Input] = [] for actions_chain in actions_chains: # Recalculating the trigger component id to use the underlying callable object as a trigger component if needed. actions_chain_trigger_component_id = actions_chain.trigger.component_id try: actions_chain_trigger_component = model_manager[actions_chain_trigger_component_id] # Use underlying callable object as a trigger component. if hasattr(actions_chain_trigger_component, "_inner_component_id"): actions_chain_trigger_component_id = actions_chain_trigger_component._inner_component_id # Not all action_chain_trigger_components are included in model_manager e.g. on_page_load_action_trigger except KeyError: pass # Callback that enables gateway callback to work in the multiple page app clientside_callback( ClientsideFunction(namespace="build_action_loop_callbacks", function_name="trigger_to_global_store"), Output({"type": "gateway_input", "trigger_id": actions_chain.id}, "data"), Input( component_id=actions_chain_trigger_component_id, component_property=actions_chain.trigger.component_property, ), State({"type": "gateway_input", "trigger_id": actions_chain.id}, "data"), prevent_initial_call=True, ) gateway_inputs.append( Input( component_id={"type": "gateway_input", "trigger_id": actions_chain.id}, component_property="data", ) ) # Determines the final sequence of actions to be triggered. clientside_callback( ClientsideFunction(namespace="build_action_loop_callbacks", function_name="gateway"), output=[Output("remaining_actions", "data")] + [Output({"type": "action_trigger", "action_name": action.id}, "data") for action in actions], inputs=[ State("remaining_actions", "data"), State("trigger_to_actions_chain_mapper", "data"), State("action_trigger_actions_id", "data"), Input("cycle_breaker_div", "n_clicks"), *gateway_inputs, ], prevent_initial_call=True, ) # Callback that triggers the next iteration clientside_callback( ClientsideFunction(namespace="build_action_loop_callbacks", function_name="after_action_cycle_breaker"), Output("cycle_breaker_empty_output_store", "data"), Input("action_finished", "data"), prevent_initial_call=True, )
Creates all required dash callbacks for the action loop.
_build_action_loop_callbacks
python
mckinsey/vizro
vizro-core/src/vizro/actions/_action_loop/_build_action_loop_callbacks.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_action_loop/_build_action_loop_callbacks.py
Apache-2.0
def _get_action_loop_components() -> html.Div: """Gets all required components for the action loop. Returns: List of dcc or html components. """ # actions_chain and actions are iterated over multiple times so must be realized into a list. actions_chains: list[ActionsChain] = list(model_manager._get_models(ActionsChain)) actions: list[_BaseAction] = list(model_manager._get_models(_BaseAction)) if not actions_chains: return html.Div(id="action_loop_components_div") # Fundamental components required for the smooth operation of the action loop mechanism. components = [ dcc.Store(id="action_finished"), dcc.Store(id="remaining_actions", data=[]), html.Div(id="cycle_breaker_div", hidden=True), dcc.Store(id="cycle_breaker_empty_output_store"), ] # Additional component for every ActionChain in the system. # Represents a proxy component between visible UI component and the gateway of the action loop mechanism. # Required to avoid the "Unknown callback Input" issue for multiple page app examples. components.extend( [ dcc.Store( id={"type": "gateway_input", "trigger_id": actions_chain.id}, data=f"{actions_chain.id}", ) for actions_chain in actions_chains ] ) # Additional component for every Action in the system. # This component is injected as the only Input (trigger) inside each Action. # It enables that the action can be triggered only from the action loop mechanism. components.extend([dcc.Store(id={"type": "action_trigger", "action_name": action.id}) for action in actions]) # Additional store with all action_triggers ids. components.append(dcc.Store(id="action_trigger_actions_id", data=[action.id for action in actions])) # Additional store that maps the actions chain trigger id and the list of action ids that should be executed. components.append( dcc.Store( id="trigger_to_actions_chain_mapper", data={ actions_chain.id: [action.id for action in actions_chain.actions] for actions_chain in actions_chains }, ) ) return html.Div(components, id="action_loop_components_div")
Gets all required components for the action loop. Returns: List of dcc or html components.
_get_action_loop_components
python
mckinsey/vizro
vizro-core/src/vizro/actions/_action_loop/_get_action_loop_components.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/actions/_action_loop/_get_action_loop_components.py
Apache-2.0
def kpi_card( data_frame: pd.DataFrame, value_column: str, *, value_format: str = "{value}", agg_func: str = "sum", title: Optional[str] = None, icon: Optional[str] = None, ) -> dbc.Card: """Creates a styled KPI (Key Performance Indicator) card displaying a value. **Warning:** Note that the format string provided to `value_format` is being evaluated, so ensure that only trusted user input is provided to prevent potential security risks. Args: data_frame: DataFrame containing the data. value_column: Column name of the value to be shown. value_format: Format string to be applied to the value. It must be a [valid Python format](https://docs.python.org/3/library/string.html#format-specification-mini-language) string where any of the below placeholders can be used. Defaults to "{value}". - value: `value_column` aggregated by `agg_func`. **Common examples include:** - "{value}": Displays the raw value. - "${value:0.2f}": Formats the value as a currency with two decimal places. - "{value:.0%}": Formats the value as a percentage without decimal places. - "{value:,}": Formats the value with comma as a thousands separator. agg_func: String function name to be used for aggregating the data. Common options include "sum", "mean" or "median". Default is "sum". For more information on possible functions, see https://stackoverflow.com/questions/65877567/passing-function-names-as-strings-to-pandas-groupby-aggregrate. title: KPI title displayed on top of the card. If not provided, it defaults to the capitalized `value_column`. icon: Name of the icon from the [Google Material Icon Library](https://fonts.google.com/icons) to be displayed on the left side of the KPI title. If not provided, no icon is displayed. Returns: A Dash Bootstrap Components card (`dbc.Card`) containing the formatted KPI value. Examples: Wrap inside `vm.Figure` to use as a component inside `vm.Page` or `vm.Container`. >>> import vizro.models as vm >>> from vizro.figures import kpi_card >>> vm.Page(title="Page", components=[vm.Figure(figure=kpi_card(...))]) """ title = title or f"{agg_func} {value_column}".title() value = data_frame[value_column].agg(agg_func) header = dbc.CardHeader( [ html.P(icon, className="material-symbols-outlined") if icon else None, html.H4(title, className="card-kpi-title"), ] ) body = dbc.CardBody(value_format.format(value=value)) return dbc.Card([header, body], class_name="card-kpi")
Creates a styled KPI (Key Performance Indicator) card displaying a value. **Warning:** Note that the format string provided to `value_format` is being evaluated, so ensure that only trusted user input is provided to prevent potential security risks. Args: data_frame: DataFrame containing the data. value_column: Column name of the value to be shown. value_format: Format string to be applied to the value. It must be a [valid Python format](https://docs.python.org/3/library/string.html#format-specification-mini-language) string where any of the below placeholders can be used. Defaults to "{value}". - value: `value_column` aggregated by `agg_func`. **Common examples include:** - "{value}": Displays the raw value. - "${value:0.2f}": Formats the value as a currency with two decimal places. - "{value:.0%}": Formats the value as a percentage without decimal places. - "{value:,}": Formats the value with comma as a thousands separator. agg_func: String function name to be used for aggregating the data. Common options include "sum", "mean" or "median". Default is "sum". For more information on possible functions, see https://stackoverflow.com/questions/65877567/passing-function-names-as-strings-to-pandas-groupby-aggregrate. title: KPI title displayed on top of the card. If not provided, it defaults to the capitalized `value_column`. icon: Name of the icon from the [Google Material Icon Library](https://fonts.google.com/icons) to be displayed on the left side of the KPI title. If not provided, no icon is displayed. Returns: A Dash Bootstrap Components card (`dbc.Card`) containing the formatted KPI value. Examples: Wrap inside `vm.Figure` to use as a component inside `vm.Page` or `vm.Container`. >>> import vizro.models as vm >>> from vizro.figures import kpi_card >>> vm.Page(title="Page", components=[vm.Figure(figure=kpi_card(...))])
kpi_card
python
mckinsey/vizro
vizro-core/src/vizro/figures/_kpi_cards.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/figures/_kpi_cards.py
Apache-2.0
def kpi_card_reference( # noqa: PLR0913 data_frame: pd.DataFrame, value_column: str, reference_column: str, *, value_format: str = "{value}", reference_format: str = "{delta_relative:+.1%} vs. reference ({reference})", agg_func: str = "sum", title: Optional[str] = None, icon: Optional[str] = None, reverse_color: bool = False, ) -> dbc.Card: """Creates a styled KPI (Key Performance Indicator) card displaying a value in comparison to a reference value. **Warning:** Note that the format string provided to `value_format` and `reference_format` is being evaluated, so ensure that only trusted user input is provided to prevent potential security risks. Args: data_frame: DataFrame containing the data. value_column: Column name of the value to be shown. reference_column: Column name of the reference value for comparison. value_format: Format string to be applied to the value. It must be a [valid Python format](https://docs.python.org/3/library/string.html#format-specification-mini-language) string where any of the below placeholders can be used. Defaults to "{value}". - value: `value_column` aggregated by `agg_func`. - reference: `reference_column` aggregated by `agg_func`. - delta: Difference between `value` and `reference`. - delta_relative: Relative difference between `value` and `reference`. **Common examples include:** - "{value}": Displays the raw value. - "${value:0.2f}": Formats the value as a currency with two decimal places. - "{value:.0%}": Formats the value as a percentage without decimal places. - "{value:,}": Formats the value with comma as a thousands separator. reference_format: Format string to be applied to the reference. For more details on possible placeholders, see docstring on `value_format`. Defaults to "{delta_relative:+.1%} vs. reference ({reference})". agg_func: String function name to be used for aggregating the data. Common options include "sum", "mean" or "median". Default is "sum". For more information on possible functions, see https://stackoverflow.com/questions/65877567/passing-function-names-as-strings-to-pandas-groupby-aggregrate. title: KPI title displayed on top of the card. If not provided, it defaults to the capitalized `value_column`. icon: Name of the icon from the [Google Material Icon Library](https://fonts.google.com/icons) to be displayed on the left side of the KPI title. If not provided, no icon is displayed. reverse_color: If `False`, a positive delta will be colored positively (e.g., blue) and a negative delta negatively (e.g., red). If `True`, the colors will be inverted: a positive delta will be colored negatively (e.g., red) and a negative delta positively (e.g., blue). Defaults to `False`. Returns: A Dash Bootstrap Components card (`dbc.Card`) containing the formatted KPI value and reference. Examples: Wrap inside `vm.Figure` to use as a component inside `vm.Page` or `vm.Container`. >>> import vizro.models as vm >>> from vizro.figures import kpi_card_reference >>> vm.Page(title="Page", components=[vm.Figure(figure=kpi_card_reference(...))]) """ title = title or f"{agg_func} {value_column}".title() value, reference = data_frame[[value_column, reference_column]].agg(agg_func) delta = value - reference delta_relative = delta / reference if reference else np.nan pos_color, neg_color = ("color-neg", "color-pos") if reverse_color else ("color-pos", "color-neg") footer_class = pos_color if delta > 0 else neg_color if delta < 0 else "" header = dbc.CardHeader( [ html.P(icon, className="material-symbols-outlined") if icon else None, html.H4(title, className="card-kpi-title"), ] ) body = dbc.CardBody( value_format.format(value=value, reference=reference, delta=delta, delta_relative=delta_relative) ) footer = dbc.CardFooter( [ html.Span( "arrow_circle_up" if delta > 0 else "arrow_circle_down" if delta < 0 else "arrow_circle_right", className="material-symbols-outlined", ), html.Span( reference_format.format(value=value, reference=reference, delta=delta, delta_relative=delta_relative) ), ], class_name=footer_class, ) return dbc.Card([header, body, footer], class_name="card-kpi")
Creates a styled KPI (Key Performance Indicator) card displaying a value in comparison to a reference value. **Warning:** Note that the format string provided to `value_format` and `reference_format` is being evaluated, so ensure that only trusted user input is provided to prevent potential security risks. Args: data_frame: DataFrame containing the data. value_column: Column name of the value to be shown. reference_column: Column name of the reference value for comparison. value_format: Format string to be applied to the value. It must be a [valid Python format](https://docs.python.org/3/library/string.html#format-specification-mini-language) string where any of the below placeholders can be used. Defaults to "{value}". - value: `value_column` aggregated by `agg_func`. - reference: `reference_column` aggregated by `agg_func`. - delta: Difference between `value` and `reference`. - delta_relative: Relative difference between `value` and `reference`. **Common examples include:** - "{value}": Displays the raw value. - "${value:0.2f}": Formats the value as a currency with two decimal places. - "{value:.0%}": Formats the value as a percentage without decimal places. - "{value:,}": Formats the value with comma as a thousands separator. reference_format: Format string to be applied to the reference. For more details on possible placeholders, see docstring on `value_format`. Defaults to "{delta_relative:+.1%} vs. reference ({reference})". agg_func: String function name to be used for aggregating the data. Common options include "sum", "mean" or "median". Default is "sum". For more information on possible functions, see https://stackoverflow.com/questions/65877567/passing-function-names-as-strings-to-pandas-groupby-aggregrate. title: KPI title displayed on top of the card. If not provided, it defaults to the capitalized `value_column`. icon: Name of the icon from the [Google Material Icon Library](https://fonts.google.com/icons) to be displayed on the left side of the KPI title. If not provided, no icon is displayed. reverse_color: If `False`, a positive delta will be colored positively (e.g., blue) and a negative delta negatively (e.g., red). If `True`, the colors will be inverted: a positive delta will be colored negatively (e.g., red) and a negative delta positively (e.g., blue). Defaults to `False`. Returns: A Dash Bootstrap Components card (`dbc.Card`) containing the formatted KPI value and reference. Examples: Wrap inside `vm.Figure` to use as a component inside `vm.Page` or `vm.Container`. >>> import vizro.models as vm >>> from vizro.figures import kpi_card_reference >>> vm.Page(title="Page", components=[vm.Figure(figure=kpi_card_reference(...))])
kpi_card_reference
python
mckinsey/vizro
vizro-core/src/vizro/figures/_kpi_cards.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/figures/_kpi_cards.py
Apache-2.0
def catalog_from_project( project_path: Union[str, Path], env: Optional[str] = None, extra_params: Optional[dict[str, Any]] = None ) -> CatalogProtocol: """Return the Kedro Data Catalog associated to a Kedro project. Args: project_path: Path to the Kedro project root directory. env: Kedro configuration environment to be used. Defaults to "local". extra_params: Optional dictionary containing extra project parameters for underlying KedroContext. If specified, will update (and therefore take precedence over) the parameters retrieved from the project configuration. Returns: A Kedro Data Catalog. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> catalog = kedro_integration.catalog_from_project("/path/to/kedro/project") """ bootstrap_project(project_path) with KedroSession.create( project_path=project_path, env=env, save_on_close=False, extra_params=extra_params ) as session: return session.load_context().catalog
Return the Kedro Data Catalog associated to a Kedro project. Args: project_path: Path to the Kedro project root directory. env: Kedro configuration environment to be used. Defaults to "local". extra_params: Optional dictionary containing extra project parameters for underlying KedroContext. If specified, will update (and therefore take precedence over) the parameters retrieved from the project configuration. Returns: A Kedro Data Catalog. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> catalog = kedro_integration.catalog_from_project("/path/to/kedro/project")
catalog_from_project
python
mckinsey/vizro
vizro-core/src/vizro/integrations/kedro/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/integrations/kedro/_data_manager.py
Apache-2.0
def pipelines_from_project(project_path: Union[str, Path]) -> dict[str, Pipeline]: """Return the Kedro Pipelines associated to a Kedro project. Args: project_path: Path to the Kedro project root directory. Returns: A dictionary mapping pipeline names to Kedro Pipelines. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> pipelines = kedro_integration.pipelines_from_project("/path/to/kedro/project") """ bootstrap_project(project_path) from kedro.framework.project import pipelines return pipelines
Return the Kedro Pipelines associated to a Kedro project. Args: project_path: Path to the Kedro project root directory. Returns: A dictionary mapping pipeline names to Kedro Pipelines. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> pipelines = kedro_integration.pipelines_from_project("/path/to/kedro/project")
pipelines_from_project
python
mckinsey/vizro
vizro-core/src/vizro/integrations/kedro/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/integrations/kedro/_data_manager.py
Apache-2.0
def datasets_from_catalog(catalog: CatalogProtocol, *, pipeline: Pipeline = None) -> dict[str, pd_DataFrameCallable]: """Return the Kedro Dataset loading functions associated to a Kedro Data Catalog. Args: catalog: Path to the Kedro project root directory. pipeline: Optional Kedro pipeline. If specified, the factory-based Kedro datasets it defines are returned. Returns: A dictionary mapping dataset names to Kedro Dataset loading functions. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> dataset_loaders = kedro_integration.datasets_from_catalog(catalog) """ if parse(version("kedro")) < parse("0.19.9"): return _legacy_datasets_from_catalog(catalog) # This doesn't include things added to the catalog at run time but that is ok for our purposes. config_resolver = catalog.config_resolver kedro_datasets = config_resolver.config.copy() if pipeline: # Go through all dataset names that weren't in catalog and try to resolve them. Those that cannot be # resolved give an empty dictionary and are ignored. for dataset_name in set(pipeline.datasets()) - set(kedro_datasets): if dataset_config := config_resolver.resolve_pattern(dataset_name): kedro_datasets[dataset_name] = dataset_config def _catalog_release_load(dataset_name: str): # release is needed to clear the Kedro load version cache so that the dashboard always fetches the most recent # version rather than being stuck on the same version as when the app started. catalog.release(dataset_name) return catalog.load(dataset_name) vizro_data_sources = {} for dataset_name, dataset_config in kedro_datasets.items(): # "type" key always exists because we filtered out patterns that resolve to empty dictionary above. if "pandas" in dataset_config["type"]: # We need to bind dataset_name=dataset_name early to avoid dataset_name late-binding to the last value in # the for loop. vizro_data_sources[dataset_name] = lambda dataset_name=dataset_name: _catalog_release_load(dataset_name) return vizro_data_sources
Return the Kedro Dataset loading functions associated to a Kedro Data Catalog. Args: catalog: Path to the Kedro project root directory. pipeline: Optional Kedro pipeline. If specified, the factory-based Kedro datasets it defines are returned. Returns: A dictionary mapping dataset names to Kedro Dataset loading functions. Examples: >>> from vizro.integrations import kedro as kedro_integration >>> dataset_loaders = kedro_integration.datasets_from_catalog(catalog)
datasets_from_catalog
python
mckinsey/vizro
vizro-core/src/vizro/integrations/kedro/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/integrations/kedro/_data_manager.py
Apache-2.0
def __setitem__(self, name: DataSourceName, data: Union[pd.DataFrame, pd_DataFrameCallable]): """Adds `data` to the `DataManager` with key `name`.""" if callable(data): # __qualname__ is required by flask-caching (even if we specify our own make_name) but # not defined for partial functions and just '<lambda>' for lambda functions. Defining __qualname__ # means it's possible to have non-interfering caches for lambda functions (similarly if we # end up using CapturedCallable, or that could instead set its own __qualname__). # We handle __name__ the same way even though it's not currently essential to functioning of flask-caching # in case they change the underlying implementation to use it. # We use partial to effectively make an independent copy of the underlying data function. This means that # it's possible to set __qualname__ independently for each data source. This is not essential for # functions other than lambda, but it is essential for bound methods, as flask-caching cannot easily # independently timeout different instances with different bound methods but the same underlying function # data.__func__. If we don't do this then the bound method case needs some uglier hacking to make work # correctly - see https://github.com/mckinsey/vizro/blob/abb7eebb230ba7e6cfdf6150dc56b211a78b1cd5/ # vizro-core/src/vizro/managers/_data_manager.py. # Once partial has been used, all dynamic data sources are on equal footing since they're all treated as # functions rather than bound methods, e.g. by flask_caching.utils.function_namespace. This makes it much # simpler to use flask-caching reliably. # Note that for kedro>=0.19.9 we use lambda: catalog.load(dataset_name) rather than dataset.load so the # bound method case no longer arises when using kedro integration. # It's important the __qualname__ is the same across all workers, so use the data source name rather than # e.g. the repr method that includes the id of the instance so would only work in the case that gunicorn is # running with --preload. # __module__ is also required in flask_caching.utils.function_namespace and not defined for partial # functions in some versions of Python. # update_wrapper ensures that __module__, __name__, __qualname__, __annotations__ and __doc__ are # assigned to the new partial(data) the same as they were in data. This isn't strictly necessary but makes # inspecting these functions easier. data = functools.update_wrapper(partial(data), data) data.__module__ = getattr(data, "__module__", "<nomodule>") data.__name__ = ".".join([getattr(data, "__name__", "<unnamed>"), name]) data.__qualname__ = ".".join([getattr(data, "__qualname__", "<unnamed>"), name]) self.__data[name] = _DynamicData(data) elif isinstance(data, pd.DataFrame): self.__data[name] = _StaticData(data) else: raise TypeError( f"Data source {name} must be a pandas DataFrame or function that returns a pandas DataFrame." )
Adds `data` to the `DataManager` with key `name`.
__setitem__
python
mckinsey/vizro
vizro-core/src/vizro/managers/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_data_manager.py
Apache-2.0
def __getitem__(self, name: DataSourceName) -> Union[_DynamicData, _StaticData]: """Returns the `_DynamicData` or `_StaticData` object associated with `name`.""" try: return self.__data[name] except KeyError as exc: raise KeyError(f"Data source {name} does not exist.") from exc
Returns the `_DynamicData` or `_StaticData` object associated with `name`.
__getitem__
python
mckinsey/vizro
vizro-core/src/vizro/managers/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_data_manager.py
Apache-2.0
def _multi_load(self, multi_name_load_kwargs: list[tuple[DataSourceName, dict[str, Any]]]) -> list[pd.DataFrame]: """Loads multiple data sources as efficiently as possible. Deduplicates a list of (data source name, load keyword argument dictionary) tuples so that each one corresponds to only a single load() call. In the worst case scenario where there are no repeated tuples then performance of this function is identical to doing a load call for each tuple. If a data source is static then load keyword argument dictionary must be {}. Args: multi_name_load_kwargs: List of (data source name, load keyword argument dictionary). Returns: Loaded data in the same order as `multi_name_load_kwargs` was supplied. """ # Easiest way to make a key to de-duplicate each (data source name, load keyword argument dictionary) tuple. def encode_load_key(name, load_kwargs): return json.dumps([name, load_kwargs], sort_keys=True) def decode_load_key(key): return json.loads(key) # dict.fromkeys does the de-duplication. load_key_to_data = dict.fromkeys( encode_load_key(name, load_kwargs) for name, load_kwargs in multi_name_load_kwargs ) # Load each key only once. for load_key in load_key_to_data.keys(): name, load_kwargs = decode_load_key(load_key) load_key_to_data[load_key] = self[name].load(**load_kwargs) return [load_key_to_data[encode_load_key(name, load_kwargs)] for name, load_kwargs in multi_name_load_kwargs]
Loads multiple data sources as efficiently as possible. Deduplicates a list of (data source name, load keyword argument dictionary) tuples so that each one corresponds to only a single load() call. In the worst case scenario where there are no repeated tuples then performance of this function is identical to doing a load call for each tuple. If a data source is static then load keyword argument dictionary must be {}. Args: multi_name_load_kwargs: List of (data source name, load keyword argument dictionary). Returns: Loaded data in the same order as `multi_name_load_kwargs` was supplied.
_multi_load
python
mckinsey/vizro
vizro-core/src/vizro/managers/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_data_manager.py
Apache-2.0
def _cache_has_app(self) -> bool: """Detects whether self.cache.init_app has been called (as it is in Vizro) to attach a Flask app to the cache. Note that even NullCache needs to have an app attached before it can be "used". The only time the cache would not have an app attached is if the user tries to interact with the cache before Vizro() has been called. """ cache_has_app = hasattr(self.cache, "app") if not cache_has_app and self.cache.config["CACHE_TYPE"] != "NullCache": # Try to prevent anyone from setting data_manager.cache after they've instantiated Vizro(). # No need to emit a warning if the cache is left as NullCache; we only care about this if someone has # explicitly set a cache. # Eventually Vizro should probably have init_app method explicitly to clear this up so the order of # operations is more reliable. Alternatively we could just initialize cache at run time rather than build # time, which is what Flask-Caching is really designed for. This would require an extra step for users # though, since it could not go in Vizro.run() since that is not used in the case of gunicorn. warnings.warn( "Cache does not have Vizro app attached and so is not operational. Make sure you call " "Vizro() after you set data_manager.cache." ) return cache_has_app
Detects whether self.cache.init_app has been called (as it is in Vizro) to attach a Flask app to the cache. Note that even NullCache needs to have an app attached before it can be "used". The only time the cache would not have an app attached is if the user tries to interact with the cache before Vizro() has been called.
_cache_has_app
python
mckinsey/vizro
vizro-core/src/vizro/managers/_data_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_data_manager.py
Apache-2.0
def __iter__(self) -> Generator[ModelID, None, None]: """Iterates through all models. Note this yields model IDs rather key/value pairs to match the interface for a dictionary. """ # TODO: should this yield models rather than model IDs? Should model_manager be more like set with a special # lookup by model ID or more like dictionary? yield from self.__models
Iterates through all models. Note this yields model IDs rather key/value pairs to match the interface for a dictionary.
__iter__
python
mckinsey/vizro
vizro-core/src/vizro/managers/_model_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_model_manager.py
Apache-2.0
def _get_models( self, model_type: Optional[Union[type[Model], tuple[type[Model], ...], type[FIGURE_MODELS]]] = None, root_model: Optional[VizroBaseModel] = None, ) -> Generator[Model, None, None]: """Iterates through all models of type `model_type` (including subclasses). If `model_type` is specified, return only models matching that type. Otherwise, include all types. If `root_model` is specified, return only models that are descendants of the given `root_model`. """ import vizro.models as vm if model_type is FIGURE_MODELS: model_type = (vm.Graph, vm.AgGrid, vm.Table, vm.Figure) # type: ignore[assignment] models = self.__get_model_children(root_model) if root_model is not None else self.__models.values() # Convert to list to avoid changing size when looping through at runtime. for model in list(models): if model_type is None or isinstance(model, model_type): yield model # type: ignore[misc]
Iterates through all models of type `model_type` (including subclasses). If `model_type` is specified, return only models matching that type. Otherwise, include all types. If `root_model` is specified, return only models that are descendants of the given `root_model`.
_get_models
python
mckinsey/vizro
vizro-core/src/vizro/managers/_model_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_model_manager.py
Apache-2.0
def __get_model_children(self, model: Model) -> Generator[Model, None, None]: """Iterates through children of `model`. Currently, this method looks only through certain fields (components, tabs, controls, actions, selector) and their children so might miss some children models. """ from vizro.models import VizroBaseModel if isinstance(model, VizroBaseModel): yield model elif isinstance(model, Mapping): # We don't look through keys because Vizro models aren't hashable. for single_model in model.values(): yield from self.__get_model_children(single_model) elif isinstance(model, Collection) and not isinstance(model, str): for single_model in model: yield from self.__get_model_children(single_model) # TODO: in future this list should not be maintained manually. Instead we should look through all model children # by looking at model.model_fields. model_fields = ["components", "tabs", "controls", "actions", "selector"] for model_field in model_fields: if (model_field_value := getattr(model, model_field, None)) is not None: yield from self.__get_model_children(model_field_value) # TODO: Add navigation, accordions and other page objects. Won't be needed once have made whole model # manager work better recursively and have better ways to navigate the hierarchy. In pydantic v2 this would use # model_fields. Maybe we'd also use Page (or sometimes Dashboard) as the central model for navigating the # hierarchy rather than it being so generic.
Iterates through children of `model`. Currently, this method looks only through certain fields (components, tabs, controls, actions, selector) and their children so might miss some children models.
__get_model_children
python
mckinsey/vizro
vizro-core/src/vizro/managers/_model_manager.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/managers/_model_manager.py
Apache-2.0
def _get_layout_discriminator(layout: Any) -> Optional[str]: """Helper function for callable discriminator used for LayoutType.""" # It is not immediately possible to introduce a discriminated union as a field type without it breaking existing # YAML/dictionary configuration in which `type` is not specified. This function is needed to handle the legacy case. if isinstance(layout, dict): # If type is supplied then use that (like saying discriminator="type"). Otherwise, it's the legacy case where # type is not specified, in which case we want to use vm.Layout, which has type="legacy_layout". try: return layout["type"] except KeyError: warnings.warn( "`layout` without an explicit `type` specified will no longer work in Vizro 0.2.0. To ensure " "future compatibility, specify `type: grid` for your `layout`.", FutureWarning, stacklevel=3, ) return "legacy_layout" # If a model has been specified then this is equivalent to saying discriminator="type". When None is returned, # union_tag_not_found error is raised. return getattr(layout, "type", None)
Helper function for callable discriminator used for LayoutType.
_get_layout_discriminator
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def _get_action_discriminator(action: Any) -> Optional[str]: """Helper function for callable discriminator used for ActionType.""" # It is not immediately possible to introduce a discriminated union as a field type without it breaking existing # YAML/dictionary configuration in which `type` is not specified. This function is needed to handle the legacy case. if isinstance(action, dict): # If type is supplied then use that (like saying discriminator="type"). Otherwise, it's the legacy case where # type is not specified, in which case we want to use vm.Action, which has type="action". try: # TODO-AV2 C 1: Put in deprecation warning. return action["type"] except KeyError: return "action" # If a model has been specified then this is equivalent to saying discriminator="type". When None is returned, # union_tag_not_found error is raised. return getattr(action, "type", None)
Helper function for callable discriminator used for ActionType.
_get_action_discriminator
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def validate_captured_callable(cls, value, info: ValidationInfo): """Reusable validator for the `figure` argument of Figure like models.""" # Bypass validation so that legacy vm.Action(function=filter_interaction(...)) and # vm.Action(function=export_data(...)) work. from vizro.actions import export_data, filter_interaction if isinstance(value, (export_data, filter_interaction)): return value # TODO[MS]: We may want to double check on the mechanism of how field info is brought to. This seems # to get deprecated in V3 json_schema_extra: JsonSchemaExtraType = cls.model_fields[info.field_name].json_schema_extra return CapturedCallable._validate_captured_callable( captured_callable_config=value, json_schema_extra=json_schema_extra )
Reusable validator for the `figure` argument of Figure like models.
validate_captured_callable
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def __init__(self, function, /, *args, **kwargs): """Creates a new `CapturedCallable` object that will be able to re-run `function`. Partially binds *args and **kwargs to the function call. Raises: ValueError if `function` contains positional-only or variadic positional parameters (*args). """ # It is difficult to get positional-only and variadic positional arguments working at the same time as # variadic keyword arguments. Ideally we would do the __call__ as # self.__function(*bound_arguments.args, **bound_arguments.kwargs) as in the # Python documentation. This would handle positional-only and variadic positional arguments better but makes # it more difficult to handle variadic keyword arguments due to https://bugs.python.org/issue41745. # Hence we abandon bound_arguments.args and bound_arguments.kwargs in favor of just using # self.__function(**bound_arguments.arguments). parameters = inspect.signature(function).parameters invalid_params = { param.name for param in parameters.values() if param.kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.VAR_POSITIONAL] } if invalid_params: raise ValueError( f"Invalid parameter {', '.join(invalid_params)}. CapturedCallable does not accept functions with " f"positional-only or variadic positional parameters (*args)." ) self.__function = function self.__bound_arguments = inspect.signature(function).bind_partial(*args, **kwargs).arguments self.__unbound_arguments = [ param for param in parameters.values() if param.name not in self.__bound_arguments ] # Maintaining the same order here is important. # A function can only ever have one variadic keyword parameter. {""} is just here so that var_keyword_param # is always unpacking a one element set. (var_keyword_param,) = { param.name for param in parameters.values() if param.kind == inspect.Parameter.VAR_KEYWORD } or {""} # Since we do __call__ as self.__function(**bound_arguments.arguments), we need to restructure the arguments # a bit to put the kwargs in the right place. # For a function with parameter **kwargs this converts self.__bound_arguments = {"kwargs": {"a": 1}} into # self.__bound_arguments = {"a": 1}. if var_keyword_param in self.__bound_arguments: self.__bound_arguments.update(self.__bound_arguments[var_keyword_param]) del self.__bound_arguments[var_keyword_param] # Used in later validations of the captured callable. self._mode = None self._model_example = None
Creates a new `CapturedCallable` object that will be able to re-run `function`. Partially binds *args and **kwargs to the function call. Raises: ValueError if `function` contains positional-only or variadic positional parameters (*args).
__init__
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def __call__(self, *args, **kwargs): """Run the `function` with the initially bound arguments overridden by `**kwargs`. *args are possible here, but cannot be used to override arguments bound in `__init__` - just to provide additional arguments. You can still override arguments that were originally given as positional using their argument name. """ if args and kwargs: # In theory we could probably lift this restriction, but currently we don't need to and we'd need # to give careful thought on the right way to handle cases where there's ambiguity in the # self.__function call as the same argument is potentially being provided through both *args and **kwargs. raise ValueError("CapturedCallable does not support calling with both positional and keyword arguments.") # In order to avoid any ambiguity in the call to self.__function, we cannot provide use the *args directly. # Instead they must converted to keyword arguments and so we need to match them up with the right keywords. # Since positional-only or variadic positional parameters are not possible (they raise ValueError in __init__) # the only possible type of argument *args could be address is positional-or-keyword. if args: unbound_positional_arguments = [ param.name for param in self.__unbound_arguments if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] if len(args) > len(unbound_positional_arguments): # TypeError to match the standard Python exception raised in this case. raise TypeError( f"CapturedCallable takes {len(unbound_positional_arguments)} " f"positional arguments but {len(args)} were given." ) # No need to handle case that len(args) < len(unbound_positional_arguments), # since this will already raise error in the following function call. return self.__function(**dict(zip(unbound_positional_arguments, args)), **self.__bound_arguments) return self.__function(**{**self.__bound_arguments, **kwargs})
Run the `function` with the initially bound arguments overridden by `**kwargs`. *args are possible here, but cannot be used to override arguments bound in `__init__` - just to provide additional arguments. You can still override arguments that were originally given as positional using their argument name.
__call__
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def _parse_json( cls, captured_callable_config: Union[_SupportsCapturedCallable, CapturedCallable, dict[str, Any]], json_schema_extra: JsonSchemaExtraType, ) -> Union[CapturedCallable, _SupportsCapturedCallable]: """Parses captured_callable_config specification from JSON/YAML. If captured_callable_config is already _SupportCapturedCallable or CapturedCallable then it just passes through untouched. This uses the hydra syntax for _target_ but none of the other bits and we don't actually use hydra to implement it. In future, we might like to switch to using hydra's actual implementation which would allow nested functions (e.g. for transformers?) and to specify the path to a _target_ that lives outside of vizro.plotly_express. See https://hydra.cc/docs/advanced/instantiate_objects/overview/. """ if not isinstance(captured_callable_config, dict): return captured_callable_config # Try to import function given in _target_ from the import_path property of the pydantic field. try: function_name = captured_callable_config.pop("_target_") except KeyError as exc: raise ValueError( "CapturedCallable object must contain the key '_target_' that gives the target function." ) from exc import_path = json_schema_extra["import_path"] try: function = getattr(importlib.import_module(import_path), function_name) except (AttributeError, ModuleNotFoundError) as exc: raise ValueError(f"_target_={function_name} cannot be imported from {import_path}.") from exc # All the other items in figure are the keyword arguments to pass into function. function_kwargs = captured_callable_config # It would seem natural to return cls(function, **function_kwargs) here, but the function is already decorated # with @capture, and so that would return a nested CapturedCallable. return function(**function_kwargs)
Parses captured_callable_config specification from JSON/YAML. If captured_callable_config is already _SupportCapturedCallable or CapturedCallable then it just passes through untouched. This uses the hydra syntax for _target_ but none of the other bits and we don't actually use hydra to implement it. In future, we might like to switch to using hydra's actual implementation which would allow nested functions (e.g. for transformers?) and to specify the path to a _target_ that lives outside of vizro.plotly_express. See https://hydra.cc/docs/advanced/instantiate_objects/overview/.
_parse_json
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def _extract_from_attribute( cls, captured_callable: Union[_SupportsCapturedCallable, CapturedCallable] ) -> CapturedCallable: """Extracts CapturedCallable from _SupportCapturedCallable (e.g. _DashboardReadyFigure). If captured_callable is already CapturedCallable then it just passes through untouched. """ if not isinstance(captured_callable, _SupportsCapturedCallable): return captured_callable return captured_callable._captured_callable
Extracts CapturedCallable from _SupportCapturedCallable (e.g. _DashboardReadyFigure). If captured_callable is already CapturedCallable then it just passes through untouched.
_extract_from_attribute
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def _check_type( cls, captured_callable: CapturedCallable, json_schema_extra: JsonSchemaExtraType ) -> CapturedCallable: """Checks captured_callable is right type and mode.""" from vizro.actions import export_data, filter_interaction # Bypass validation so that legacy {"function": {"_target_": "filter_interaction"}} and # {"function": {"_target_": "export_data"}} work. if isinstance(captured_callable, (export_data, filter_interaction)): return captured_callable expected_mode = json_schema_extra["mode"] import_path = json_schema_extra["import_path"] if not isinstance(captured_callable, CapturedCallable): raise ValueError( f"Invalid CapturedCallable. Supply a function imported from {import_path} or defined with " f"decorator @capture('{expected_mode}')." ) if (mode := captured_callable._mode) != expected_mode: raise ValueError( f"CapturedCallable was defined with @capture('{mode}') rather than @capture('{expected_mode}') and so " "is not compatible with the model." ) return captured_callable
Checks captured_callable is right type and mode.
_check_type
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def __repr_clean__(self): """Alternative __repr__ method with cleaned module paths.""" args = ", ".join(f"{key}={value!r}" for key, value in self._arguments.items()) original_module_path = f"{self._function.__module__}" return f"{_clean_module_string(original_module_path)}{self._function.__name__}({args})"
Alternative __repr__ method with cleaned module paths.
__repr_clean__
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def _pio_templates_default(): """Sets pio.templates.default to "vizro_dark" and then reverts it. This is to ensure that in a Jupyter Notebook captured charts look the same as when they're in the dashboard. When the context manager exits the global theme is reverted just to keep things clean (e.g. if you really wanted to, you could compare a captured vs. non-captured chart in the same Python session). This works even if users have tweaked the templates, so long as pio.templates has been updated correctly and you refer to template by name rather than trying to take from vizro.themes. If pio.templates.default has already been set to vizro_dark or vizro_light then no change is made to allow a user to set these without it being overridden. """ old_default = pio.templates.default template_changed = False # If the user has set pio.templates.default to a vizro theme already, no need to change it. if old_default not in ["vizro_dark", "vizro_light"]: template_changed = True pio.templates.default = "vizro_dark" # Revert the template. This is done in a try/finally so that if the code wrapped inside the context manager (i.e. # plotting functions) raises an exception, pio.templates.default is still reverted. This is not very important # but easy to achieve. try: # This will always be vizro_light or vizro_dark and corresponds to the default theme that has been set. yield pio.templates.default finally: if template_changed: pio.templates.default = old_default
Sets pio.templates.default to "vizro_dark" and then reverts it. This is to ensure that in a Jupyter Notebook captured charts look the same as when they're in the dashboard. When the context manager exits the global theme is reverted just to keep things clean (e.g. if you really wanted to, you could compare a captured vs. non-captured chart in the same Python session). This works even if users have tweaked the templates, so long as pio.templates has been updated correctly and you refer to template by name rather than trying to take from vizro.themes. If pio.templates.default has already been set to vizro_dark or vizro_light then no change is made to allow a user to set these without it being overridden.
_pio_templates_default
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def __call__(self, func, /): """Produces a CapturedCallable or _DashboardReadyFigure. mode="action" and mode="table" give a CapturedCallable, while mode="graph" gives a _DashboardReadyFigure that contains a CapturedCallable. In both cases, the CapturedCallable is based on func and the provided *args and **kwargs. """ if self._mode == "graph": # The more difficult case, where we need to still have a valid plotly figure that renders in a notebook. # Hence we attach the CapturedCallable as a property instead of returning it directly. # TODO: move point of checking that data_frame argument exists earlier on. # TODO: also would be nice to raise errors in CapturedCallable.__init__ at point of function definition # rather than point of calling if possible. @functools.wraps(func) def wrapped(*args, **kwargs) -> _DashboardReadyFigure: if "data_frame" not in inspect.signature(func).parameters: raise ValueError(f"{func.__name__} must have data_frame argument to use capture('graph').") # We need to capture function upfront in order to find value of data_frame argument: since it could be # positional or keyword, this is much more robust than trying to get it out of arg or kwargs ourselves. captured_callable: CapturedCallable = CapturedCallable(func, *args, **kwargs) captured_callable._mode = self._mode captured_callable._model_example = self._model_example try: captured_callable["data_frame"] except KeyError as exc: raise ValueError(f"{func.__name__} must supply a value to data_frame argument.") from exc if isinstance(captured_callable["data_frame"], str): # Enable running e.g. px.scatter("iris") from the Python API. Don't actually run the function # because it won't work as there's no data. This case is not relevant for the JSON/YAML API, # which is handled separately through validation of CapturedCallable. fig = _DashboardReadyFigure() else: # Standard case for px.scatter(df: pd.DataFrame). # Set theme for the figure that gets shown in a Jupyter Notebook. This is to ensure that in a # Jupyter Notebook captured charts look the same as when they're in the dashboard. To mimic this, # we first use _pio_templates_default to set the global theme, as is done in the dashboard, and then # do the fig.layout.template update that is achieved by the theme selector. # We don't want to update the captured_callable in the same way, since it's only used inside the # dashboard, at which point the global pio.templates.default is always set anyway according to # the dashboard theme and then updated according to the theme selector. with _pio_templates_default() as default_template: fig = func(*args, **kwargs) # Update the fig.layout.template just to ensure absolute consistency with how the dashboard # works. In a dashboard this is done with the update_graph_theme clientside callback. # The only exception here is the edge case that the user has specified template="vizro_light" or # "vizro_dark" in the plotting function, in which case we don't want to change it. This makes # it easier for a user to try out both themes simultaneously in a notebook. if fig.layout.template not in (pio.templates["vizro_dark"], pio.templates["vizro_light"]): fig.layout.template = default_template fig.__class__ = _DashboardReadyFigure fig._captured_callable = captured_callable return fig return wrapped elif self._mode == "action": # The "normal" case where we just capture the function call. @functools.wraps(func) def wrapped(*args, **kwargs) -> CapturedCallable: # Note this is basically the same as partial(func, *args, **kwargs) captured_callable: CapturedCallable = CapturedCallable(func, *args, **kwargs) captured_callable._mode = self._mode captured_callable._model_example = self._model_example return captured_callable return wrapped elif self._mode in ["table", "ag_grid", "figure"]: @functools.wraps(func) def wrapped(*args, **kwargs) -> CapturedCallable: if "data_frame" not in inspect.signature(func).parameters: raise ValueError(f"{func.__name__} must have data_frame argument to use capture('table').") captured_callable: CapturedCallable = CapturedCallable(func, *args, **kwargs) captured_callable._mode = self._mode captured_callable._model_example = self._model_example try: captured_callable["data_frame"] except KeyError as exc: raise ValueError(f"{func.__name__} must supply a value to data_frame argument.") from exc return captured_callable return wrapped raise ValueError( "Valid modes of the capture decorator are @capture('graph'), @capture('action'), @capture('table'), " "@capture('ag_grid') and @capture('figure')." )
Produces a CapturedCallable or _DashboardReadyFigure. mode="action" and mode="table" give a CapturedCallable, while mode="graph" gives a _DashboardReadyFigure that contains a CapturedCallable. In both cases, the CapturedCallable is based on func and the provided *args and **kwargs.
__call__
python
mckinsey/vizro
vizro-core/src/vizro/models/types.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/types.py
Apache-2.0
def add_type(cls, field_name: str, new_type: type[Any]): """Adds a new type to an existing field based on a discriminated union. Args: field_name: Field that new type will be added to new_type: New type to add to discriminated union """ field = cls.model_fields[field_name] old_type = cast(type[Any], field.annotation) new_annotation = ( _add_type_to_union(old_type, new_type) if _is_discriminated_union_via_field_info(field) else _add_type_to_annotated_union_if_found(old_type, new_type, field_name) ) cls.model_fields[field_name] = FieldInfo.merge_field_infos(field, annotation=new_annotation) # We need to resolve all ForwardRefs again e.g. in the case of Page, which requires update_forward_refs in # vizro.models. The vm.__dict__.copy() is inspired by pydantic's own implementation of update_forward_refs and # effectively replaces all ForwardRefs defined in vizro.models. import vizro.models as vm cls.model_rebuild(force=True, _types_namespace=vm.__dict__.copy()) new_type.model_rebuild(force=True, _types_namespace=vm.__dict__.copy())
Adds a new type to an existing field based on a discriminated union. Args: field_name: Field that new type will be added to new_type: New type to add to discriminated union
add_type
python
mckinsey/vizro
vizro-core/src/vizro/models/_base.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_base.py
Apache-2.0
def _to_python( self, extra_imports: Optional[set[str]] = None, extra_callable_defs: Optional[set[str]] = None ) -> str: """Converts a Vizro model to the Python code that would create it. Args: extra_imports: Extra imports to add to the Python code. Provide as a set of complete import strings. extra_callable_defs: Extra callable definitions to add to the Python code. Provide as a set of complete function definitions. Returns: str: Python code to create the Vizro model. Examples: Simple usage example with card model. >>> import vizro.models as vm >>> card = vm.Card(text="Hello, world!") >>> print(card._to_python()) Further options include adding extra imports and callable definitions. These will be included in the returned Python string. >>> print( ... card._to_python( ... extra_imports={"from typing import Optional"}, ... extra_callable_defs={"def test(foo: Optional[str]): return foo"}, ... ) ... ) """ # Imports extra_imports_concat = "\n".join(extra_imports) if extra_imports else None # CapturedCallable definitions - NOTE that order is not guaranteed callable_defs_set = _extract_captured_callable_source() | (extra_callable_defs or set()) callable_defs_concat = "\n".join(callable_defs_set) if callable_defs_set else None # Data Manager data_defs_set = _extract_captured_callable_data_info() data_defs_concat = "\n".join(data_defs_set) if data_defs_set else None # Model code model_dict = self.model_dump(context={"add_name": True}, exclude_unset=True) model_code = "model = " + _dict_to_python(model_dict) # Concatenate and lint code callable_defs_template = ( CALLABLE_TEMPLATE.format(callable_defs=callable_defs_concat) if callable_defs_concat else "" ) data_settings_template = DATA_TEMPLATE.format(data_setting=data_defs_concat) if data_defs_concat else "" unformatted_code = TO_PYTHON_TEMPLATE.format( code=model_code, extra_imports=extra_imports_concat or "", callable_defs_template=callable_defs_template, data_settings_template=data_settings_template, ) try: return _format_and_lint(unformatted_code) except Exception: logging.exception("Code formatting failed; returning unformatted code") return unformatted_code
Converts a Vizro model to the Python code that would create it. Args: extra_imports: Extra imports to add to the Python code. Provide as a set of complete import strings. extra_callable_defs: Extra callable definitions to add to the Python code. Provide as a set of complete function definitions. Returns: str: Python code to create the Vizro model. Examples: Simple usage example with card model. >>> import vizro.models as vm >>> card = vm.Card(text="Hello, world!") >>> print(card._to_python()) Further options include adding extra imports and callable definitions. These will be included in the returned Python string. >>> print( ... card._to_python( ... extra_imports={"from typing import Optional"}, ... extra_callable_defs={"def test(foo: Optional[str]): return foo"}, ... ) ... )
_to_python
python
mckinsey/vizro
vizro-core/src/vizro/models/_base.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_base.py
Apache-2.0
def _all_hidden(components: list[Component]): """Returns True if all `components` are either None and/or have hidden=True and/or className contains `d-none`.""" return all( component is None or getattr(component, "hidden", False) or "d-none" in getattr(component, "className", "d-inline") for component in components )
Returns True if all `components` are either None and/or have hidden=True and/or className contains `d-none`.
_all_hidden
python
mckinsey/vizro
vizro-core/src/vizro/models/_dashboard.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_dashboard.py
Apache-2.0
def build(self): """Creates empty flex container to later position components in.""" bs_wrap = "flex-wrap" if self.wrap else "flex-nowrap" component_container = html.Div( [], style={"gap": self.gap}, className=f"d-flex flex-{self.direction} {bs_wrap}", id=self.id, ) return component_container
Creates empty flex container to later position components in.
build
python
mckinsey/vizro
vizro-core/src/vizro/models/_flex.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_flex.py
Apache-2.0
def _convert_to_combined_grid_coord(matrix: ma.MaskedArray) -> ColRowGridLines: """Converts `matrix` coordinates from user `grid` to one combined grid area spanned by component i. Required for validation of grid areas spanned by components. User-provided grid: [[0, 1], [0, 2]] Matrix coordinates for component i=0: [(0, 0), (1, 0)] Grid coordinates for component i=0: ColRowGridLines(col_start=1, col_end=2, row_start=1, row_end=3) Args: matrix: Array that represents the user-provided grid with a mask on the relevant screen component i Returns: ColRowGridLines for combined area spanned by all placements of screen component i """ matrix_coord = [(x, y) for x, row in enumerate(matrix) for y, value in enumerate(row) if ma.is_masked(value)] row_idx, col_idx = zip(*matrix_coord) return ColRowGridLines( col_start=min(col_idx) + 1, col_end=max(col_idx) + 2, row_start=min(row_idx) + 1, row_end=max(row_idx) + 2 )
Converts `matrix` coordinates from user `grid` to one combined grid area spanned by component i. Required for validation of grid areas spanned by components. User-provided grid: [[0, 1], [0, 2]] Matrix coordinates for component i=0: [(0, 0), (1, 0)] Grid coordinates for component i=0: ColRowGridLines(col_start=1, col_end=2, row_start=1, row_end=3) Args: matrix: Array that represents the user-provided grid with a mask on the relevant screen component i Returns: ColRowGridLines for combined area spanned by all placements of screen component i
_convert_to_combined_grid_coord
python
mckinsey/vizro
vizro-core/src/vizro/models/_grid.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_grid.py
Apache-2.0
def _convert_to_single_grid_coord(matrix: ma.MaskedArray) -> list[ColRowGridLines]: """Converts `matrix` coordinates from user `grid` to list of grid areas spanned by each placement of component i. Required for validation of grid areas spanned by spaces, where the combined area does not need to be rectangular. User-provided grid: [[0, 1], [0, 2]] Matrix coordinates for component i=0: [(0, 0), (1, 0)] Grid coordinates for component i=0: [ColRowGridLines(col_start=1, col_end=2, row_start=1, row_end=2), ColRowGridLines(col_start=1, col_end=2, row_start=2, row_end=3)] Args: matrix: Array that represents the user-provided grid with a mask on the relevant screen component i Returns: List of ColRowGridLines for each individual placement of screen component i """ matrix_coord = [(x, y) for x, row in enumerate(matrix) for y, value in enumerate(row) if ma.is_masked(value)] return [ ColRowGridLines(col_start=col_idx + 1, col_end=col_idx + 2, row_start=row_idx + 1, row_end=row_idx + 2) for row_idx, col_idx in matrix_coord ]
Converts `matrix` coordinates from user `grid` to list of grid areas spanned by each placement of component i. Required for validation of grid areas spanned by spaces, where the combined area does not need to be rectangular. User-provided grid: [[0, 1], [0, 2]] Matrix coordinates for component i=0: [(0, 0), (1, 0)] Grid coordinates for component i=0: [ColRowGridLines(col_start=1, col_end=2, row_start=1, row_end=2), ColRowGridLines(col_start=1, col_end=2, row_start=2, row_end=3)] Args: matrix: Array that represents the user-provided grid with a mask on the relevant screen component i Returns: List of ColRowGridLines for each individual placement of screen component i
_convert_to_single_grid_coord
python
mckinsey/vizro
vizro-core/src/vizro/models/_grid.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_grid.py
Apache-2.0
def _do_rectangles_overlap(r1: ColRowGridLines, r2: ColRowGridLines) -> bool: """Checks if rectangles `r1` and `r2` overlap in areas. 1. Computes the min and max of r1 and r2 on both axes. 2. Computes the boundaries of the intersection rectangle (x1=left, x2=right, y1=top, y2=bottom) 3. Checks if the intersection is valid and has a positive non-zero area (x1 < x2 and y1 < y2) See: https://github.com/SFML/SFML/blob/12d81304e63e333174d943ba3ff572e38abd56e0/include/SFML/Graphics/Rect.inl#L109 Args: r1: Tuple containing grid coordinates for screen component i r2: Tuple containing grid coordinates for screen component j Returns: Bool if rectangular grid area spanned by component i overlaps with the area of component j """ x1 = max(min(r1.row_start, r1.row_end), min(r2.row_start, r2.row_end)) y1 = max(min(r1.col_start, r1.col_end), min(r2.col_start, r2.col_end)) x2 = min(max(r1.row_start, r1.row_end), max(r2.row_start, r2.row_end)) y2 = min(max(r1.col_start, r1.col_end), max(r2.col_start, r2.col_end)) return x1 < x2 and y1 < y2
Checks if rectangles `r1` and `r2` overlap in areas. 1. Computes the min and max of r1 and r2 on both axes. 2. Computes the boundaries of the intersection rectangle (x1=left, x2=right, y1=top, y2=bottom) 3. Checks if the intersection is valid and has a positive non-zero area (x1 < x2 and y1 < y2) See: https://github.com/SFML/SFML/blob/12d81304e63e333174d943ba3ff572e38abd56e0/include/SFML/Graphics/Rect.inl#L109 Args: r1: Tuple containing grid coordinates for screen component i r2: Tuple containing grid coordinates for screen component j Returns: Bool if rectangular grid area spanned by component i overlaps with the area of component j
_do_rectangles_overlap
python
mckinsey/vizro
vizro-core/src/vizro/models/_grid.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_grid.py
Apache-2.0
def _validate_grid_areas(grid_areas: list[ColRowGridLines]) -> None: """Validates `grid_areas` spanned by screen components in `Grid`.""" for i, r1 in enumerate(grid_areas): for r2 in grid_areas[i + 1 :]: if _do_rectangles_overlap(r1, r2): raise ValueError("Grid areas must be rectangular and not overlap!")
Validates `grid_areas` spanned by screen components in `Grid`.
_validate_grid_areas
python
mckinsey/vizro
vizro-core/src/vizro/models/_grid.py
https://github.com/mckinsey/vizro/blob/master/vizro-core/src/vizro/models/_grid.py
Apache-2.0