code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def __init__(self, stat="proportion", complementary=False): """Initialize the class with its parameters Parameters ---------- stat : {{"proportion", "percent", "count"}} Distribution statistic to compute. complementary : bool If True, use the complementary CDF (1 - CDF) """ _check_argument("stat", ["count", "percent", "proportion"], stat) self.stat = stat self.complementary = complementary
Initialize the class with its parameters Parameters ---------- stat : {{"proportion", "percent", "count"}} Distribution statistic to compute. complementary : bool If True, use the complementary CDF (1 - CDF)
__init__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def _eval_univariate(self, x, weights): """Inner function for ECDF of one variable.""" sorter = x.argsort() x = x[sorter] weights = weights[sorter] y = weights.cumsum() if self.stat in ["percent", "proportion"]: y = y / y.max() if self.stat == "percent": y = y * 100 x = np.r_[-np.inf, x] y = np.r_[0, y] if self.complementary: y = y.max() - y return y, x
Inner function for ECDF of one variable.
_eval_univariate
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __call__(self, x1, x2=None, weights=None): """Return proportion or count of observations below each sorted datapoint.""" x1 = np.asarray(x1) if weights is None: weights = np.ones_like(x1) else: weights = np.asarray(weights) if x2 is None: return self._eval_univariate(x1, weights) else: return self._eval_bivariate(x1, x2, weights)
Return proportion or count of observations below each sorted datapoint.
__call__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __init__(self, estimator, errorbar=None, **boot_kws): """ Data aggregator that produces an estimate and error bar interval. Parameters ---------- estimator : callable or string Function (or method name) that maps a vector to a scalar. errorbar : string, (string, number) tuple, or callable Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple with a method name and a level parameter, or a function that maps from a vector to a (min, max) interval, or None to hide errorbar. See the :doc:`errorbar tutorial </tutorial/error_bars>` for more information. boot_kws Additional keywords are passed to bootstrap when error_method is "ci". """ self.estimator = estimator method, level = _validate_errorbar_arg(errorbar) self.error_method = method self.error_level = level self.boot_kws = boot_kws
Data aggregator that produces an estimate and error bar interval. Parameters ---------- estimator : callable or string Function (or method name) that maps a vector to a scalar. errorbar : string, (string, number) tuple, or callable Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple with a method name and a level parameter, or a function that maps from a vector to a (min, max) interval, or None to hide errorbar. See the :doc:`errorbar tutorial </tutorial/error_bars>` for more information. boot_kws Additional keywords are passed to bootstrap when error_method is "ci".
__init__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __call__(self, data, var): """Aggregate over `var` column of `data` with estimate and error interval.""" vals = data[var] if callable(self.estimator): # You would think we could pass to vals.agg, and yet: # https://github.com/mwaskom/seaborn/issues/2943 estimate = self.estimator(vals) else: estimate = vals.agg(self.estimator) # Options that produce no error bars if self.error_method is None: err_min = err_max = np.nan elif len(data) <= 1: err_min = err_max = np.nan # Generic errorbars from user-supplied function elif callable(self.error_method): err_min, err_max = self.error_method(vals) # Parametric options elif self.error_method == "sd": half_interval = vals.std() * self.error_level err_min, err_max = estimate - half_interval, estimate + half_interval elif self.error_method == "se": half_interval = vals.sem() * self.error_level err_min, err_max = estimate - half_interval, estimate + half_interval # Nonparametric options elif self.error_method == "pi": err_min, err_max = _percentile_interval(vals, self.error_level) elif self.error_method == "ci": units = data.get("units", None) boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws) err_min, err_max = _percentile_interval(boots, self.error_level) return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
Aggregate over `var` column of `data` with estimate and error interval.
__call__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __init__(self, estimator, errorbar=None, **boot_kws): """ Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only "mean". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is "ci". boot_kws Additional keywords are passed to bootstrap when error_method is "ci". """ if estimator != "mean": # Note that, while other weighted estimators may make sense (e.g. median), # I'm not aware of an implementation in our dependencies. We can add one # in seaborn later, if there is sufficient interest. For now, limit to mean. raise ValueError(f"Weighted estimator must be 'mean', not {estimator!r}.") self.estimator = estimator method, level = _validate_errorbar_arg(errorbar) if method is not None and method != "ci": # As with the estimator, weighted 'sd' or 'pi' error bars may make sense. # But we'll keep things simple for now and limit to (bootstrap) CI. raise ValueError(f"Error bar method must be 'ci', not {method!r}.") self.error_method = method self.error_level = level self.boot_kws = boot_kws
Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only "mean". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is "ci". boot_kws Additional keywords are passed to bootstrap when error_method is "ci".
__init__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __call__(self, data, var): """Aggregate over `var` column of `data` with estimate and error interval.""" vals = data[var] weights = data["weight"] estimate = np.average(vals, weights=weights) if self.error_method == "ci" and len(data) > 1: def error_func(x, w): return np.average(x, weights=w) boots = bootstrap(vals, weights, func=error_func, **self.boot_kws) err_min, err_max = _percentile_interval(boots, self.error_level) else: err_min = err_max = np.nan return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
Aggregate over `var` column of `data` with estimate and error interval.
__call__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def __init__(self, k_depth, outlier_prop, trust_alpha): """ Compute percentiles of a distribution using various tail stopping rules. Parameters ---------- k_depth: "tukey", "proportion", "trustworthy", or "full" Stopping rule for choosing tail percentiled to show: - tukey: Show a similar number of outliers as in a conventional boxplot. - proportion: Show approximately `outlier_prop` outliers. - trust_alpha: Use `trust_alpha` level for most extreme tail percentile. outlier_prop: float Parameter for `k_depth="proportion"` setting the expected outlier rate. trust_alpha: float Parameter for `k_depth="trustworthy"` setting the confidence threshold. Notes ----- Based on the proposal in this paper: https://vita.had.co.nz/papers/letter-value-plot.pdf """ k_options = ["tukey", "proportion", "trustworthy", "full"] if isinstance(k_depth, str): _check_argument("k_depth", k_options, k_depth) elif not isinstance(k_depth, int): err = ( "The `k_depth` parameter must be either an integer or string " f"(one of {k_options}), not {k_depth!r}." ) raise TypeError(err) self.k_depth = k_depth self.outlier_prop = outlier_prop self.trust_alpha = trust_alpha
Compute percentiles of a distribution using various tail stopping rules. Parameters ---------- k_depth: "tukey", "proportion", "trustworthy", or "full" Stopping rule for choosing tail percentiled to show: - tukey: Show a similar number of outliers as in a conventional boxplot. - proportion: Show approximately `outlier_prop` outliers. - trust_alpha: Use `trust_alpha` level for most extreme tail percentile. outlier_prop: float Parameter for `k_depth="proportion"` setting the expected outlier rate. trust_alpha: float Parameter for `k_depth="trustworthy"` setting the confidence threshold. Notes ----- Based on the proposal in this paper: https://vita.had.co.nz/papers/letter-value-plot.pdf
__init__
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def _percentile_interval(data, width): """Return a percentile interval from data of a given width.""" edge = (100 - width) / 2 percentiles = edge, 100 - edge return np.nanpercentile(data, percentiles)
Return a percentile interval from data of a given width.
_percentile_interval
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def _validate_errorbar_arg(arg): """Check type and value of errorbar argument and assign default level.""" DEFAULT_LEVELS = { "ci": 95, "pi": 95, "se": 1, "sd": 1, } usage = "`errorbar` must be a callable, string, or (string, number) tuple" if arg is None: return None, None elif callable(arg): return arg, None elif isinstance(arg, str): method = arg level = DEFAULT_LEVELS.get(method, None) else: try: method, level = arg except (ValueError, TypeError) as err: raise err.__class__(usage) from err _check_argument("errorbar", list(DEFAULT_LEVELS), method) if level is not None and not isinstance(level, Number): raise TypeError(usage) return method, level
Check type and value of errorbar argument and assign default level.
_validate_errorbar_arg
python
mwaskom/seaborn
seaborn/_statistics.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_statistics.py
BSD-3-Clause
def _get_win_folder_from_registry(csidl_name): """This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import winreg as _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir
This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names.
_get_win_folder_from_registry
python
mwaskom/seaborn
seaborn/external/appdirs.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/appdirs.py
BSD-3-Clause
def strip_blank_lines(l): "Remove leading and trailing blank lines from a list of lines" while l and not l[0].strip(): del l[0] while l and not l[-1].strip(): del l[-1] return l
Remove leading and trailing blank lines from a list of lines
strip_blank_lines
python
mwaskom/seaborn
seaborn/external/docscrape.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/docscrape.py
BSD-3-Clause
def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data, list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset()
Parameters ---------- data : str String with lines separated by ' '.
__init__
python
mwaskom/seaborn
seaborn/external/docscrape.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/docscrape.py
BSD-3-Clause
def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'.""" m = self._func_rgx.match(text) if not m: raise ParseError(f"{text} is not a item name") role = m.group('role') name = m.group('name') if role else m.group('name2') return name, role, m.end() rest = [] for line in content: if not line.strip(): continue line_match = self._line_rgx.match(line) description = None if line_match: description = line_match.group('desc') if line_match.group('trailing') and description: self._error_location( 'Unexpected comma or period after function list at index %d of ' 'line "%s"' % (line_match.end('trailing'), line), error=False) if not description and line.startswith(' '): rest.append(line.strip()) elif line_match: funcs = [] text = line_match.group('allfuncs') while True: if not text.strip(): break name, role, match_end = parse_item_name(text) funcs.append((name, role)) text = text[match_end:].strip() if text and text[0] == ',': text = text[1:].strip() rest = list(filter(None, [description])) items.append((funcs, rest)) else: raise ParseError(f"{line} is not a item name") return items
func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3
_parse_see_also
python
mwaskom/seaborn
seaborn/external/docscrape.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/docscrape.py
BSD-3-Clause
def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$') if compiled.match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section()
Grab signature (if given) and summary
_parse_summary
python
mwaskom/seaborn
seaborn/external/docscrape.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/docscrape.py
BSD-3-Clause
def evaluate(self, points): """Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """ points = atleast_2d(asarray(points)) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = f"points have dimension {d}, dataset has dimension {self.d}" raise ValueError(msg) output_dtype = np.common_type(self.covariance, points) result = zeros((m,), dtype=output_dtype) whitening = linalg.cholesky(self.inv_cov) scaled_dataset = dot(whitening, self.dataset) scaled_points = dot(whitening, points) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = scaled_dataset[:, i, newaxis] - scaled_points energy = sum(diff * diff, axis=0) / 2.0 result += self.weights[i]*exp(-energy) else: # loop over points for i in range(m): diff = scaled_dataset - scaled_points[:, i, newaxis] energy = sum(diff * diff, axis=0) / 2.0 result[i] = sum(exp(-energy)*self.weights, axis=0) result = result / self._norm_factor return result
Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE.
evaluate
python
mwaskom/seaborn
seaborn/external/kde.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/kde.py
BSD-3-Clause
def set_bandwidth(self, bw_method=None): """Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11 """ if bw_method is None: pass elif bw_method == 'scott': self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method) and not isinstance(bw_method, str): self._bw_method = 'use constant' self.covariance_factor = lambda: bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ "or a callable." raise ValueError(msg) self._compute_covariance()
Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11
set_bandwidth
python
mwaskom/seaborn
seaborn/external/kde.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/kde.py
BSD-3-Clause
def _compute_covariance(self): """Computes the covariance matrix for each Gaussian kernel using covariance_factor(). """ self.factor = self.covariance_factor() # Cache covariance and inverse covariance of the data if not hasattr(self, '_data_inv_cov'): self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, bias=False, aweights=self.weights)) self._data_inv_cov = linalg.inv(self._data_covariance) self.covariance = self._data_covariance * self.factor**2 self.inv_cov = self._data_inv_cov / self.factor**2 self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
Computes the covariance matrix for each Gaussian kernel using covariance_factor().
_compute_covariance
python
mwaskom/seaborn
seaborn/external/kde.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/kde.py
BSD-3-Clause
def _parse_local_version(local: str) -> Optional[LocalType]: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) return None
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
_parse_local_version
python
mwaskom/seaborn
seaborn/external/version.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/external/version.py
BSD-3-Clause
def __contains__(self, key: str) -> bool: """Boolean check on whether a variable is defined in this dataset.""" if self.frame is None: return any(key in df for df in self.frames.values()) return key in self.frame
Boolean check on whether a variable is defined in this dataset.
__contains__
python
mwaskom/seaborn
seaborn/_core/data.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/data.py
BSD-3-Clause
def join( self, data: DataSource, variables: dict[str, VariableSpec] | None, ) -> PlotData: """Add, replace, or drop variables and return as a new dataset.""" # Inherit the original source of the upstream data by default if data is None: data = self.source_data # TODO allow `data` to be a function (that is called on the source data?) if not variables: variables = self.source_vars # Passing var=None implies that we do not want that variable in this layer disinherit = [k for k, v in variables.items() if v is None] # Create a new dataset with just the info passed here new = PlotData(data, variables) # -- Update the inherited DataSource with this new information drop_cols = [k for k in self.frame if k in new.frame or k in disinherit] parts = [self.frame.drop(columns=drop_cols), new.frame] # Because we are combining distinct columns, this is perhaps more # naturally thought of as a "merge"/"join". But using concat because # some simple testing suggests that it is marginally faster. frame = pd.concat(parts, axis=1, sort=False, copy=False) names = {k: v for k, v in self.names.items() if k not in disinherit} names.update(new.names) ids = {k: v for k, v in self.ids.items() if k not in disinherit} ids.update(new.ids) new.frame = frame new.names = names new.ids = ids # Multiple chained operations should always inherit from the original object new.source_data = self.source_data new.source_vars = self.source_vars return new
Add, replace, or drop variables and return as a new dataset.
join
python
mwaskom/seaborn
seaborn/_core/data.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/data.py
BSD-3-Clause
def _assign_variables( self, data: DataFrame | Mapping | None, variables: dict[str, VariableSpec], ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]: """ Assign values for plot variables given long-form data and/or vector inputs. Parameters ---------- data Input data where variable names map to vector values. variables Keys are names of plot variables (x, y, ...) each value is one of: - name of a column (or index level, or dictionary entry) in `data` - vector in any format that can construct a :class:`pandas.DataFrame` Returns ------- frame Table mapping seaborn variables (x, y, color, ...) to data vectors. names Keys are defined seaborn variables; values are names inferred from the inputs (or None when no name can be determined). ids Like the `names` dict, but `None` values are replaced by the `id()` of the data object that defined the variable. Raises ------ TypeError When data source is not a DataFrame or Mapping. ValueError When variables are strings that don't appear in `data`, or when they are non-indexed vector datatypes that have a different length from `data`. """ source_data: Mapping | DataFrame frame: DataFrame names: dict[str, str | None] ids: dict[str, str | int] plot_data = {} names = {} ids = {} given_data = data is not None if data is None: # Data is optional; all variables can be defined as vectors # But simplify downstream code by always having a usable source data object source_data = {} else: source_data = data # Variables can also be extracted from the index of a DataFrame if isinstance(source_data, pd.DataFrame): index = source_data.index.to_frame().to_dict("series") else: index = {} for key, val in variables.items(): # Simply ignore variables with no specification if val is None: continue # Try to treat the argument as a key for the data collection. # But be flexible about what can be used as a key. # Usually it will be a string, but allow other hashables when # taking from the main data object. Allow only strings to reference # fields in the index, because otherwise there is too much ambiguity. # TODO this will be rendered unnecessary by the following pandas fix: # https://github.com/pandas-dev/pandas/pull/41283 try: hash(val) val_is_hashable = True except TypeError: val_is_hashable = False val_as_data_key = ( # See https://github.com/pandas-dev/pandas/pull/41283 # (isinstance(val, abc.Hashable) and val in source_data) (val_is_hashable and val in source_data) or (isinstance(val, str) and val in index) ) if val_as_data_key: val = cast(ColumnName, val) if val in source_data: plot_data[key] = source_data[val] elif val in index: plot_data[key] = index[val] names[key] = ids[key] = str(val) elif isinstance(val, str): # This looks like a column name but, lookup failed. err = f"Could not interpret value `{val}` for `{key}`. " if not given_data: err += "Value is a string, but `data` was not passed." else: err += "An entry with this name does not appear in `data`." raise ValueError(err) else: # Otherwise, assume the value somehow represents data # Ignore empty data structures if isinstance(val, Sized) and len(val) == 0: continue # If vector has no index, it must match length of data table if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series): if isinstance(val, Sized) and len(data) != len(val): val_cls = val.__class__.__name__ err = ( f"Length of {val_cls} vectors must match length of `data`" f" when both are used, but `data` has length {len(data)}" f" and the vector passed to `{key}` has length {len(val)}." ) raise ValueError(err) plot_data[key] = val # Try to infer the original name using pandas-like metadata if hasattr(val, "name"): names[key] = ids[key] = str(val.name) # type: ignore # mypy/1424 else: names[key] = None ids[key] = id(val) # Construct a tidy plot DataFrame. This will convert a number of # types automatically, aligning on index in case of pandas objects # TODO Note: this fails when variable specs *only* have scalars! frame = pd.DataFrame(plot_data) return frame, names, ids
Assign values for plot variables given long-form data and/or vector inputs. Parameters ---------- data Input data where variable names map to vector values. variables Keys are names of plot variables (x, y, ...) each value is one of: - name of a column (or index level, or dictionary entry) in `data` - vector in any format that can construct a :class:`pandas.DataFrame` Returns ------- frame Table mapping seaborn variables (x, y, color, ...) to data vectors. names Keys are defined seaborn variables; values are names inferred from the inputs (or None when no name can be determined). ids Like the `names` dict, but `None` values are replaced by the `id()` of the data object that defined the variable. Raises ------ TypeError When data source is not a DataFrame or Mapping. ValueError When variables are strings that don't appear in `data`, or when they are non-indexed vector datatypes that have a different length from `data`.
_assign_variables
python
mwaskom/seaborn
seaborn/_core/data.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/data.py
BSD-3-Clause
def handle_data_source(data: object) -> pd.DataFrame | Mapping | None: """Convert the data source object to a common union representation.""" if isinstance(data, pd.DataFrame) or hasattr(data, "__dataframe__"): # Check for pd.DataFrame inheritance could be removed once # minimal pandas version supports dataframe interchange (1.5.0). data = convert_dataframe_to_pandas(data) elif data is not None and not isinstance(data, Mapping): err = f"Data source must be a DataFrame or Mapping, not {type(data)!r}." raise TypeError(err) return data
Convert the data source object to a common union representation.
handle_data_source
python
mwaskom/seaborn
seaborn/_core/data.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/data.py
BSD-3-Clause
def convert_dataframe_to_pandas(data: object) -> pd.DataFrame: """Use the DataFrame exchange protocol, or fail gracefully.""" if isinstance(data, pd.DataFrame): return data if not hasattr(pd.api, "interchange"): msg = ( "Support for non-pandas DataFrame objects requires a version of pandas " "that implements the DataFrame interchange protocol. Please upgrade " "your pandas version or coerce your data to pandas before passing " "it to seaborn." ) raise TypeError(msg) if _version_predates(pd, "2.0.2"): msg = ( "DataFrame interchange with pandas<2.0.2 has some known issues. " f"You are using pandas {pd.__version__}. " "Continuing, but it is recommended to carefully inspect the results and to " "consider upgrading." ) warnings.warn(msg, stacklevel=2) try: # This is going to convert all columns in the input dataframe, even though # we may only need one or two of them. It would be more efficient to select # the columns that are going to be used in the plot prior to interchange. # Solving that in general is a hard problem, especially with the objects # interface where variables passed in Plot() may only be referenced later # in Plot.add(). But noting here in case this seems to be a bottleneck. return pd.api.interchange.from_dataframe(data) except Exception as err: msg = ( "Encountered an exception when converting data source " "to a pandas DataFrame. See traceback above for details." ) raise RuntimeError(msg) from err
Use the DataFrame exchange protocol, or fail gracefully.
convert_dataframe_to_pandas
python
mwaskom/seaborn
seaborn/_core/data.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/data.py
BSD-3-Clause
def _during(cls, step: str, var: str = "") -> PlotSpecError: """ Initialize the class to report the failure of a specific operation. """ message = [] if var: message.append(f"{step} failed for the `{var}` variable.") else: message.append(f"{step} failed.") message.append("See the traceback above for more information.") return cls(" ".join(message))
Initialize the class to report the failure of a specific operation.
_during
python
mwaskom/seaborn
seaborn/_core/exceptions.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/exceptions.py
BSD-3-Clause
def __init__(self, order: list[str] | dict[str, list | None]): """ Initialize the GroupBy from grouping variables and optional level orders. Parameters ---------- order List of variable names or dict mapping names to desired level orders. Level order values can be None to use default ordering rules. The variables can include names that are not expected to appear in the data; these will be dropped before the groups are defined. """ if not order: raise ValueError("GroupBy requires at least one grouping variable") if isinstance(order, list): order = {k: None for k in order} self.order = order
Initialize the GroupBy from grouping variables and optional level orders. Parameters ---------- order List of variable names or dict mapping names to desired level orders. Level order values can be None to use default ordering rules. The variables can include names that are not expected to appear in the data; these will be dropped before the groups are defined.
__init__
python
mwaskom/seaborn
seaborn/_core/groupby.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/groupby.py
BSD-3-Clause
def _get_groups( self, data: DataFrame ) -> tuple[str | list[str], Index | MultiIndex]: """Return index with Cartesian product of ordered grouping variable levels.""" levels = {} for var, order in self.order.items(): if var in data: if order is None: order = categorical_order(data[var]) levels[var] = order grouper: str | list[str] groups: Index | MultiIndex if not levels: grouper = [] groups = pd.Index([]) elif len(levels) > 1: grouper = list(levels) groups = pd.MultiIndex.from_product(levels.values(), names=grouper) else: grouper, = list(levels) groups = pd.Index(levels[grouper], name=grouper) return grouper, groups
Return index with Cartesian product of ordered grouping variable levels.
_get_groups
python
mwaskom/seaborn
seaborn/_core/groupby.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/groupby.py
BSD-3-Clause
def _reorder_columns(self, res, data): """Reorder result columns to match original order with new columns appended.""" cols = [c for c in data if c in res] cols += [c for c in res if c not in data] return res.reindex(columns=pd.Index(cols))
Reorder result columns to match original order with new columns appended.
_reorder_columns
python
mwaskom/seaborn
seaborn/_core/groupby.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/groupby.py
BSD-3-Clause
def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame: """ Reduce each group to a single row in the output. The output will have a row for each unique combination of the grouping variable levels with null values for the aggregated variable(s) where those combinations do not appear in the dataset. """ grouper, groups = self._get_groups(data) if not grouper: # We will need to see whether there are valid usecases that end up here raise ValueError("No grouping variables are present in dataframe") res = ( data .groupby(grouper, sort=False, observed=False) .agg(*args, **kwargs) .reindex(groups) .reset_index() .pipe(self._reorder_columns, data) ) return res
Reduce each group to a single row in the output. The output will have a row for each unique combination of the grouping variable levels with null values for the aggregated variable(s) where those combinations do not appear in the dataset.
agg
python
mwaskom/seaborn
seaborn/_core/groupby.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/groupby.py
BSD-3-Clause
def apply( self, data: DataFrame, func: Callable[..., DataFrame], *args, **kwargs, ) -> DataFrame: """Apply a DataFrame -> DataFrame mapping to each group.""" grouper, groups = self._get_groups(data) if not grouper: return self._reorder_columns(func(data, *args, **kwargs), data) parts = {} for key, part_df in data.groupby(grouper, sort=False, observed=False): parts[key] = func(part_df, *args, **kwargs) stack = [] for key in groups: if key in parts: if isinstance(grouper, list): # Implies that we had a MultiIndex so key is iterable group_ids = dict(zip(grouper, cast(Iterable, key))) else: group_ids = {grouper: key} stack.append(parts[key].assign(**group_ids)) res = pd.concat(stack, ignore_index=True) return self._reorder_columns(res, data)
Apply a DataFrame -> DataFrame mapping to each group.
apply
python
mwaskom/seaborn
seaborn/_core/groupby.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/groupby.py
BSD-3-Clause
def build_plot_signature(cls): """ Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important. """ sig = inspect.signature(cls) params = [ inspect.Parameter("args", inspect.Parameter.VAR_POSITIONAL), inspect.Parameter("data", inspect.Parameter.KEYWORD_ONLY, default=None) ] params.extend([ inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None) for name in PROPERTIES ]) new_sig = sig.replace(parameters=params) cls.__signature__ = new_sig known_properties = textwrap.fill( ", ".join([f"|{p}|" for p in PROPERTIES]), width=78, subsequent_indent=" " * 8, ) if cls.__doc__ is not None: # support python -OO mode cls.__doc__ = cls.__doc__.format(known_properties=known_properties) return cls
Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important.
build_plot_signature
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def update(self, other: dict[str, Any] | None = None, /, **kwds): """Update the theme with a dictionary or keyword arguments of rc parameters.""" if other is not None: theme = self._filter_params(other) else: theme = {} theme.update(kwds) super().update(theme)
Update the theme with a dictionary or keyword arguments of rc parameters.
update
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def _resolve_positionals( self, args: tuple[DataSource | VariableSpec, ...], data: DataSource, variables: dict[str, VariableSpec], ) -> tuple[DataSource, dict[str, VariableSpec]]: """Handle positional arguments, which may contain data / x / y.""" if len(args) > 3: err = "Plot() accepts no more than 3 positional arguments (data, x, y)." raise TypeError(err) if ( isinstance(args[0], (abc.Mapping, pd.DataFrame)) or hasattr(args[0], "__dataframe__") ): if data is not None: raise TypeError("`data` given by both name and position.") data, args = args[0], args[1:] if len(args) == 2: x, y = args elif len(args) == 1: x, y = *args, None else: x = y = None for name, var in zip("yx", (y, x)): if var is not None: if name in variables: raise TypeError(f"`{name}` given by both name and position.") # Keep coordinates at the front of the variables dict # Cast type because we know this isn't a DataSource at this point variables = {name: cast(VariableSpec, var), **variables} return data, variables
Handle positional arguments, which may contain data / x / y.
_resolve_positionals
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def _clone(self) -> Plot: """Generate a new object with the same information as the current spec.""" new = Plot() # TODO any way to enforce that data does not get mutated? new._data = self._data new._layers.extend(self._layers) new._scales.update(self._scales) new._shares.update(self._shares) new._limits.update(self._limits) new._labels.update(self._labels) new._theme.update(self._theme) new._facet_spec.update(self._facet_spec) new._pair_spec.update(self._pair_spec) new._figure_spec.update(self._figure_spec) new._subplot_spec.update(self._subplot_spec) new._layout_spec.update(self._layout_spec) new._target = self._target return new
Generate a new object with the same information as the current spec.
_clone
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def on(self, target: Axes | SubFigure | Figure) -> Plot: """ Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot` first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class:`matplotlib.figure.Figure` or :class:`matplotlib.figure.SubFigure`. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst """ accepted_types: tuple # Allow tuple of various length accepted_types = ( mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure ) accepted_types_str = ( f"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}" ) if not isinstance(target, accepted_types): err = ( f"The `Plot.on` target must be an instance of {accepted_types_str}. " f"You passed an instance of {target.__class__} instead." ) raise TypeError(err) new = self._clone() new._target = target return new
Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot` first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class:`matplotlib.figure.Figure` or :class:`matplotlib.figure.SubFigure`. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst
on
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def add( self, mark: Mark, *transforms: Stat | Move, orient: str | None = None, legend: bool = True, label: str | None = None, data: DataSource = None, **variables: VariableSpec, ) -> Plot: """ Specify a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define a plot with multiple layers. Parameters ---------- mark : :class:`Mark` The visual representation of the data to use in this layer. transforms : :class:`Stat` or :class:`Move` Objects representing transforms to be applied before plotting the data. Currently, at most one :class:`Stat` can be used, and it must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which also affects how transforms are computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. legend : bool Option to suppress the mark/mappings for this layer from the legend. label : str A label to use for the layer in the legend, independent of any mappings. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be passed directly to the transforms without scaling. Examples -------- .. include:: ../docstrings/objects.Plot.add.rst """ if not isinstance(mark, Mark): msg = f"mark must be a Mark instance, not {type(mark)!r}." raise TypeError(msg) # TODO This API for transforms was a late decision, and previously Plot.add # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances. # It will take some work to refactor the internals so that Stat and Move are # treated identically, and until then well need to "unpack" the transforms # here and enforce limitations on the order / types. stat: Optional[Stat] move: Optional[List[Move]] error = False if not transforms: stat, move = None, None elif isinstance(transforms[0], Stat): stat = transforms[0] move = [m for m in transforms[1:] if isinstance(m, Move)] error = len(move) != len(transforms) - 1 else: stat = None move = [m for m in transforms if isinstance(m, Move)] error = len(move) != len(transforms) if error: msg = " ".join([ "Transforms must have at most one Stat type (in the first position),", "and all others must be a Move type. Given transform type(s):", ", ".join(str(type(t).__name__) for t in transforms) + "." ]) raise TypeError(msg) new = self._clone() new._layers.append({ "mark": mark, "stat": stat, "move": move, # TODO it doesn't work to supply scalars to variables, but it should "vars": variables, "source": data, "legend": legend, "label": label, "orient": {"v": "x", "h": "y"}.get(orient, orient), # type: ignore }) return new
Specify a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define a plot with multiple layers. Parameters ---------- mark : :class:`Mark` The visual representation of the data to use in this layer. transforms : :class:`Stat` or :class:`Move` Objects representing transforms to be applied before plotting the data. Currently, at most one :class:`Stat` can be used, and it must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which also affects how transforms are computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. legend : bool Option to suppress the mark/mappings for this layer from the legend. label : str A label to use for the layer in the legend, independent of any mappings. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be passed directly to the transforms without scaling. Examples -------- .. include:: ../docstrings/objects.Plot.add.rst
add
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def pair( self, x: VariableSpecList = None, y: VariableSpecList = None, wrap: int | None = None, cross: bool = True, ) -> Plot: """ Produce subplots by pairing multiple `x` and/or `y` variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only `x` or `y`, "wrap" subplots across a two-dimensional grid with this many columns (when using `x`) or rows (when using `y`). cross : bool When False, zip the `x` and `y` lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst """ # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows # This may also be possible by setting `wrap=1`, but is that too unobvious? # TODO PairGrid features not currently implemented: diagonals, corner pair_spec: PairSpec = {} axes = {"x": [] if x is None else x, "y": [] if y is None else y} for axis, arg in axes.items(): if isinstance(arg, (str, int)): err = f"You must pass a sequence of variable keys to `{axis}`" raise TypeError(err) pair_spec["variables"] = {} pair_spec["structure"] = {} for axis in "xy": keys = [] for i, col in enumerate(axes[axis]): key = f"{axis}{i}" keys.append(key) pair_spec["variables"][key] = col if keys: pair_spec["structure"][axis] = keys if not cross and len(axes["x"]) != len(axes["y"]): err = "Lengths of the `x` and `y` lists must match with cross=False" raise ValueError(err) pair_spec["cross"] = cross pair_spec["wrap"] = wrap new = self._clone() new._pair_spec.update(pair_spec) return new
Produce subplots by pairing multiple `x` and/or `y` variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only `x` or `y`, "wrap" subplots across a two-dimensional grid with this many columns (when using `x`) or rows (when using `y`). cross : bool When False, zip the `x` and `y` lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst
pair
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def facet( self, col: VariableSpec = None, row: VariableSpec = None, order: OrderSpec | dict[str, OrderSpec] = None, wrap: int | None = None, ) -> Plot: """ Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only `col` or `row`, wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst """ variables: dict[str, VariableSpec] = {} if col is not None: variables["col"] = col if row is not None: variables["row"] = row structure = {} if isinstance(order, dict): for dim in ["col", "row"]: dim_order = order.get(dim) if dim_order is not None: structure[dim] = list(dim_order) elif order is not None: if col is not None and row is not None: err = " ".join([ "When faceting on both col= and row=, passing `order` as a list" "is ambiguous. Use a dict with 'col' and/or 'row' keys instead." ]) raise RuntimeError(err) elif col is not None: structure["col"] = list(order) elif row is not None: structure["row"] = list(order) spec: FacetSpec = { "variables": variables, "structure": structure, "wrap": wrap, } new = self._clone() new._facet_spec.update(spec) return new
Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only `col` or `row`, wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst
facet
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def scale(self, **scales: Scale) -> Plot: """ Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.). A number of "magic" arguments are accepted, including: - The name of a transform (e.g., `"log"`, `"sqrt"`) - The name of a palette (e.g., `"viridis"`, `"muted"`) - A tuple of values, defining the output range (e.g. `(1, 5)`) - A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`) - A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`) For more explicit control, pass a scale spec object such as :class:`Continuous` or :class:`Nominal`. Or pass `None` to use an "identity" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst """ new = self._clone() new._scales.update(scales) return new
Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.). A number of "magic" arguments are accepted, including: - The name of a transform (e.g., `"log"`, `"sqrt"`) - The name of a palette (e.g., `"viridis"`, `"muted"`) - A tuple of values, defining the output range (e.g. `(1, 5)`) - A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`) - A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`) For more explicit control, pass a scale spec object such as :class:`Continuous` or :class:`Nominal`. Or pass `None` to use an "identity" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst
scale
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def label( self, *, title: str | None = None, legend: str | None = None, **variables: str | Callable[[str], str] ) -> Plot: """ Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass "" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, `title=` modifies the subplot-specific label, while `col=` and/or `row=` add a label for the faceting variable. When using a single subplot, `title=` sets its title. The `legend=` parameter sets the title for the "layer" legend (i.e., when using `label` in :meth:`Plot.add`). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst """ new = self._clone() if title is not None: new._labels["title"] = title if legend is not None: new._labels["legend"] = legend new._labels.update(variables) return new
Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass "" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, `title=` modifies the subplot-specific label, while `col=` and/or `row=` add a label for the faceting variable. When using a single subplot, `title=` sets its title. The `legend=` parameter sets the title for the "layer" legend (i.e., when using `label` in :meth:`Plot.add`). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst
label
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def layout( self, *, size: tuple[float, float] | Default = default, engine: str | None | Default = default, extent: tuple[float, float, float, float] | Default = default, ) -> Plot: """ Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future "experimental" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{"tight", "constrained", "none"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth:`Plot.on` is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when `engine="none"`. Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst """ # TODO add an "auto" mode for figsize that roughly scales with the rcParams # figsize (so that works), but expands to prevent subplots from being squished # Also should we have height=, aspect=, exclusive with figsize? Or working # with figsize when only one is defined? new = self._clone() if size is not default: new._figure_spec["figsize"] = size if engine is not default: new._layout_spec["engine"] = engine if extent is not default: new._layout_spec["extent"] = extent return new
Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future "experimental" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{"tight", "constrained", "none"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth:`Plot.on` is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when `engine="none"`. Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst
layout
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def theme(self, config: Mapping[str, Any], /) -> Plot: """ Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: https://matplotlib.org/stable/tutorials/introductory/customizing.html Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst """ new = self._clone() rc = mpl.RcParams(config) new._theme.update(rc) return new
Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: https://matplotlib.org/stable/tutorials/introductory/customizing.html Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst
theme
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def save(self, loc, **kwargs) -> Plot: """ Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. """ # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`.
save
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def show(self, **kwargs) -> None: """ Compile the plot and display it by hooking into pyplot. Calling this method is not necessary to render a plot in notebook context, but it may be in other environments (e.g., in a terminal). After compiling the plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters). Unlike other :class:`Plot` methods, there is no return value. This should be the last method you call when specifying a plot. """ # TODO make pyplot configurable at the class level, and when not using, # import IPython.display and call on self to populate cell output? # Keep an eye on whether matplotlib implements "attaching" an existing # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024 self.plot(pyplot=True).show(**kwargs)
Compile the plot and display it by hooking into pyplot. Calling this method is not necessary to render a plot in notebook context, but it may be in other environments (e.g., in a terminal). After compiling the plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters). Unlike other :class:`Plot` methods, there is no return value. This should be the last method you call when specifying a plot.
show
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def show(self, **kwargs) -> None: """ Display the plot by hooking into pyplot. This method calls :func:`matplotlib.pyplot.show` with any keyword parameters. """ # TODO if we did not create the Plotter with pyplot, is it possible to do this? # If not we should clearly raise. import matplotlib.pyplot as plt with theme_context(self._theme): plt.show(**kwargs)
Display the plot by hooking into pyplot. This method calls :func:`matplotlib.pyplot.show` with any keyword parameters.
show
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def _update_legend_contents( self, p: Plot, mark: Mark, data: PlotData, scales: dict[str, Scale], layer_label: str | None, ) -> None: """Add legend artists / labels for one layer in the plot.""" if data.frame.empty and data.frames: legend_vars: list[str] = [] for frame in data.frames.values(): frame_vars = frame.columns.intersection(list(scales)) legend_vars.extend(v for v in frame_vars if v not in legend_vars) else: legend_vars = list(data.frame.columns.intersection(list(scales))) # First handle layer legends, which occupy a single entry in legend_contents. if layer_label is not None: legend_title = str(p._labels.get("legend", "")) layer_key = (legend_title, -1) artist = mark._legend_artist([], None, {}) if artist is not None: for content in self._legend_contents: if content[0] == layer_key: content[1].append(artist) content[2].append(layer_label) break else: self._legend_contents.append((layer_key, [artist], [layer_label])) # Then handle the scale legends # First pass: Identify the values that will be shown for each variable schema: list[tuple[ tuple[str, str | int], list[str], tuple[list[Any], list[str]] ]] = [] schema = [] for var in legend_vars: var_legend = scales[var]._legend if var_legend is not None: values, labels = var_legend for (_, part_id), part_vars, _ in schema: if data.ids[var] == part_id: # Allow multiple plot semantics to represent same data variable part_vars.append(var) break else: title = self._resolve_label(p, var, data.names[var]) entry = (title, data.ids[var]), [var], (values, labels) schema.append(entry) # Second pass, generate an artist corresponding to each value contents: list[tuple[tuple[str, str | int], Any, list[str]]] = [] for key, variables, (values, labels) in schema: artists = [] for val in values: artist = mark._legend_artist(variables, val, scales) if artist is not None: artists.append(artist) if artists: contents.append((key, artists, labels)) self._legend_contents.extend(contents)
Add legend artists / labels for one layer in the plot.
_update_legend_contents
python
mwaskom/seaborn
seaborn/_core/plot.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/plot.py
BSD-3-Clause
def __init__(self, variable: str | None = None): """Initialize the property with the name of the corresponding plot variable.""" if not variable: variable = self.__class__.__name__.lower() self.variable = variable
Initialize the property with the name of the corresponding plot variable.
__init__
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def default_scale(self, data: Series) -> Scale: """Given data, initialize appropriate scale class.""" var_type = variable_type(data, boolean_type="boolean", strict_boolean=True) if var_type == "numeric": return Continuous() elif var_type == "datetime": return Temporal() elif var_type == "boolean": return Boolean() else: return Nominal()
Given data, initialize appropriate scale class.
default_scale
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def infer_scale(self, arg: Any, data: Series) -> Scale: """Given data and a scaling argument, initialize appropriate scale class.""" # TODO put these somewhere external for validation # TODO putting this here won't pick it up if subclasses define infer_scale # (e.g. color). How best to handle that? One option is to call super after # handling property-specific possibilities (e.g. for color check that the # arg is not a valid palette name) but that could get tricky. trans_args = ["log", "symlog", "logit", "pow", "sqrt"] if isinstance(arg, str): if any(arg.startswith(k) for k in trans_args): # TODO validate numeric type? That should happen centrally somewhere return Continuous(trans=arg) else: msg = f"Unknown magic arg for {self.variable} scale: '{arg}'." raise ValueError(msg) else: arg_type = type(arg).__name__ msg = f"Magic arg for {self.variable} scale must be str, not {arg_type}." raise TypeError(msg)
Given data and a scaling argument, initialize appropriate scale class.
infer_scale
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def get_mapping(self, scale: Scale, data: Series) -> Mapping: """Return a function that maps from data domain to property range.""" def identity(x): return x return identity
Return a function that maps from data domain to property range.
get_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _check_dict_entries(self, levels: list, values: dict) -> None: """Input check when values are provided as a dictionary.""" missing = set(levels) - set(values) if missing: formatted = ", ".join(map(repr, sorted(missing, key=str))) err = f"No entry in {self.variable} dictionary for {formatted}" raise ValueError(err)
Input check when values are provided as a dictionary.
_check_dict_entries
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _check_list_length(self, levels: list, values: list) -> list: """Input check when values are provided as a list.""" message = "" if len(levels) > len(values): message = " ".join([ f"\nThe {self.variable} list has fewer values ({len(values)})", f"than needed ({len(levels)}) and will cycle, which may", "produce an uninterpretable plot." ]) values = [x for _, x in zip(levels, itertools.cycle(values))] elif len(values) > len(levels): message = " ".join([ f"The {self.variable} list has more values ({len(values)})", f"than needed ({len(levels)}), which may not be intended.", ]) values = values[:len(levels)] # TODO look into custom PlotSpecWarning with better formatting if message: warnings.warn(message, UserWarning) return values
Input check when values are provided as a list.
_check_list_length
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def infer_scale(self, arg: Any, data: Series) -> Scale: """Given data and a scaling argument, initialize appropriate scale class.""" # TODO infer continuous based on log/sqrt etc? var_type = variable_type(data, boolean_type="boolean", strict_boolean=True) if var_type == "boolean": return Boolean(arg) elif isinstance(arg, (list, dict)): return Nominal(arg) elif var_type == "categorical": return Nominal(arg) elif var_type == "datetime": return Temporal(arg) # TODO other variable types else: return Continuous(arg)
Given data and a scaling argument, initialize appropriate scale class.
infer_scale
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def get_mapping(self, scale: Scale, data: Series) -> Mapping: """Return a function that maps from data domain to property range.""" if isinstance(scale, Nominal): return self._get_nominal_mapping(scale, data) elif isinstance(scale, Boolean): return self._get_boolean_mapping(scale, data) if scale.values is None: vmin, vmax = self._forward(self.default_range) elif isinstance(scale.values, tuple) and len(scale.values) == 2: vmin, vmax = self._forward(scale.values) else: if isinstance(scale.values, tuple): actual = f"{len(scale.values)}-tuple" else: actual = str(type(scale.values)) scale_class = scale.__class__.__name__ err = " ".join([ f"Values for {self.variable} variables with {scale_class} scale", f"must be 2-tuple; not {actual}.", ]) raise TypeError(err) def mapping(x): return self._inverse(np.multiply(x, vmax - vmin) + vmin) return mapping
Return a function that maps from data domain to property range.
get_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping: """Identify evenly-spaced values using interval or explicit mapping.""" levels = categorical_order(data, scale.order) values = self._get_values(scale, levels) def mapping(x): ixs = np.asarray(x, np.intp) out = np.full(len(x), np.nan) use = np.isfinite(x) out[use] = np.take(values, ixs[use]) return out return mapping
Identify evenly-spaced values using interval or explicit mapping.
_get_nominal_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping: """Identify evenly-spaced values using interval or explicit mapping.""" values = self._get_values(scale, [True, False]) def mapping(x): out = np.full(len(x), np.nan) use = np.isfinite(x) out[use] = np.where(x[use], *values) return out return mapping
Identify evenly-spaced values using interval or explicit mapping.
_get_boolean_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_values(self, scale: Scale, levels: list) -> list: """Validate scale.values and identify a value for each level.""" if isinstance(scale.values, dict): self._check_dict_entries(levels, scale.values) values = [scale.values[x] for x in levels] elif isinstance(scale.values, list): values = self._check_list_length(levels, scale.values) else: if scale.values is None: vmin, vmax = self.default_range elif isinstance(scale.values, tuple): vmin, vmax = scale.values else: scale_class = scale.__class__.__name__ err = " ".join([ f"Values for {self.variable} variables with {scale_class} scale", f"must be a dict, list or tuple; not {type(scale.values)}", ]) raise TypeError(err) vmin, vmax = self._forward([vmin, vmax]) values = list(self._inverse(np.linspace(vmax, vmin, len(levels)))) return values
Validate scale.values and identify a value for each level.
_get_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def default_range(self) -> tuple[float, float]: """Min and max values used by default for semantic mapping.""" base = mpl.rcParams["lines.linewidth"] return base * .5, base * 2
Min and max values used by default for semantic mapping.
default_range
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def default_range(self) -> tuple[float, float]: """Min and max values used by default for semantic mapping.""" base = mpl.rcParams["patch.linewidth"] return base * .5, base * 2
Min and max values used by default for semantic mapping.
default_range
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def default_range(self) -> tuple[float, float]: """Min and max values used by default for semantic mapping.""" base = mpl.rcParams["font.size"] return base * .5, base * 2
Min and max values used by default for semantic mapping.
default_range
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def get_mapping(self, scale: Scale, data: Series) -> Mapping: """Define mapping as lookup into list of object values.""" boolean_scale = isinstance(scale, Boolean) order = getattr(scale, "order", [True, False] if boolean_scale else None) levels = categorical_order(data, order) values = self._get_values(scale, levels) if boolean_scale: values = values[::-1] def mapping(x): ixs = np.asarray(np.nan_to_num(x), np.intp) return [ values[ix] if np.isfinite(x_i) else self.null_value for x_i, ix in zip(x, ixs) ] return mapping
Define mapping as lookup into list of object values.
get_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_values(self, scale: Scale, levels: list) -> list: """Validate scale.values and identify a value for each level.""" n = len(levels) if isinstance(scale.values, dict): self._check_dict_entries(levels, scale.values) values = [scale.values[x] for x in levels] elif isinstance(scale.values, list): values = self._check_list_length(levels, scale.values) elif scale.values is None: values = self._default_values(n) else: msg = " ".join([ f"Scale values for a {self.variable} variable must be provided", f"in a dict or list; not {type(scale.values)}." ]) raise TypeError(msg) values = [self.standardize(x) for x in values] return values
Validate scale.values and identify a value for each level.
_get_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _default_values(self, n: int) -> list[MarkerStyle]: """Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. """ # Start with marker specs that are well distinguishable markers = [ "o", "X", (4, 0, 45), "P", (4, 0, 0), (4, 1, 0), "^", (4, 1, 45), "v", ] # Now generate more from regular polygons of increasing order s = 5 while len(markers) < n: a = 360 / (s + 1) / 2 markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)]) s += 1 markers = [MarkerStyle(m) for m in markers[:n]] return markers
Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled.
_default_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _default_values(self, n: int) -> list[DashPatternWithOffset]: """Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. """ # Start with dash specs that are well distinguishable dashes: list[str | DashPattern] = [ "-", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1), ] # Now programmatically build as many as we need p = 3 while len(dashes) < n: # Take combinations of long and short dashes a = itertools.combinations_with_replacement([3, 1.25], p) b = itertools.combinations_with_replacement([4, 1], p) # Interleave the combinations, reversing one of the streams segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1])) # Now insert the gaps for segments in segment_list: gap = min(segments) spec = tuple(itertools.chain(*((seg, gap) for seg in segments))) dashes.append(spec) p += 1 return [self._get_dash_pattern(x) for x in dashes]
Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes.
_default_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset: """Convert linestyle arguments to dash pattern with offset.""" # Copied and modified from Matplotlib 3.4 # go from short hand -> full strings ls_mapper = {"-": "solid", "--": "dashed", "-.": "dashdot", ":": "dotted"} if isinstance(style, str): style = ls_mapper.get(style, style) # un-dashed styles if style in ["solid", "none", "None"]: offset = 0 dashes = None # dashed styles elif style in ["dashed", "dashdot", "dotted"]: offset = 0 dashes = tuple(mpl.rcParams[f"lines.{style}_pattern"]) else: options = [*ls_mapper.values(), *ls_mapper.keys()] msg = f"Linestyle string must be one of {options}, not {repr(style)}." raise ValueError(msg) elif isinstance(style, tuple): if len(style) > 1 and isinstance(style[1], tuple): offset, dashes = style elif len(style) > 1 and style[1] is None: offset, dashes = style else: offset = 0 dashes = style else: val_type = type(style).__name__ msg = f"Linestyle must be str or tuple, not {val_type}." raise TypeError(msg) # Normalize offset to be positive and shorter than the dash cycle if dashes is not None: try: dsum = sum(dashes) except TypeError as err: msg = f"Invalid dash pattern: {dashes}" raise TypeError(msg) from err if dsum: offset %= dsum return offset, dashes
Convert linestyle arguments to dash pattern with offset.
_get_dash_pattern
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike: """Convert color sequence to RGB(A) array, preserving but not adding alpha.""" def has_alpha(x): return to_rgba(x) != to_rgba(x, 1) if isinstance(colors, np.ndarray): needs_alpha = colors.shape[1] == 4 else: needs_alpha = any(has_alpha(x) for x in colors) if needs_alpha: return to_rgba_array(colors) else: return to_rgba_array(colors)[:, :3]
Convert color sequence to RGB(A) array, preserving but not adding alpha.
_standardize_color_sequence
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def get_mapping(self, scale: Scale, data: Series) -> Mapping: """Return a function that maps from data domain to color values.""" # TODO what is best way to do this conditional? # Should it be class-based or should classes have behavioral attributes? if isinstance(scale, Nominal): return self._get_nominal_mapping(scale, data) elif isinstance(scale, Boolean): return self._get_boolean_mapping(scale, data) if scale.values is None: # TODO Rethink best default continuous color gradient mapping = color_palette("ch:", as_cmap=True) elif isinstance(scale.values, tuple): # TODO blend_palette will strip alpha, but we should support # interpolation on all four channels mapping = blend_palette(scale.values, as_cmap=True) elif isinstance(scale.values, str): # TODO for matplotlib colormaps this will clip extremes, which is # different from what using the named colormap directly would do # This may or may not be desireable. mapping = color_palette(scale.values, as_cmap=True) elif callable(scale.values): mapping = scale.values else: scale_class = scale.__class__.__name__ msg = " ".join([ f"Scale values for {self.variable} with a {scale_class} mapping", f"must be string, tuple, or callable; not {type(scale.values)}." ]) raise TypeError(msg) def _mapping(x): # Remove alpha channel so it does not override alpha property downstream # TODO this will need to be more flexible to support RGBA tuples (see above) invalid = ~np.isfinite(x) out = mapping(x)[:, :3] out[invalid] = np.nan return out return _mapping
Return a function that maps from data domain to color values.
get_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_values(self, scale: Scale, levels: list) -> ArrayLike: """Validate scale.values and identify a value for each level.""" n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette("husl", n) else: scale_class = scale.__class__.__name__ msg = " ".join([ f"Scale values for {self.variable} with a {scale_class} mapping", f"must be string, list, tuple, or dict; not {type(scale.values)}." ]) raise TypeError(msg) return self._standardize_color_sequence(colors)
Validate scale.values and identify a value for each level.
_get_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _default_values(self, n: int) -> list: """Return a list of n values, alternating True and False.""" if n > 2: msg = " ".join([ f"The variable assigned to {self.variable} has more than two levels,", f"so {self.variable} values will cycle and may be uninterpretable", ]) # TODO fire in a "nice" way (see above) warnings.warn(msg, UserWarning) return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]
Return a list of n values, alternating True and False.
_default_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def get_mapping(self, scale: Scale, data: Series) -> Mapping: """Return a function that maps each data value to True or False.""" boolean_scale = isinstance(scale, Boolean) order = getattr(scale, "order", [True, False] if boolean_scale else None) levels = categorical_order(data, order) values = self._get_values(scale, levels) if boolean_scale: values = values[::-1] def mapping(x): ixs = np.asarray(np.nan_to_num(x), np.intp) return [ values[ix] if np.isfinite(x_i) else False for x_i, ix in zip(x, ixs) ] return mapping
Return a function that maps each data value to True or False.
get_mapping
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def _get_values(self, scale: Scale, levels: list) -> list: """Validate scale.values and identify a value for each level.""" if isinstance(scale.values, list): values = [bool(x) for x in scale.values] elif isinstance(scale.values, dict): values = [bool(scale.values[x]) for x in levels] elif scale.values is None: values = self._default_values(len(levels)) else: msg = " ".join([ f"Scale values for {self.variable} must be passed in", f"a list or dict; not {type(scale.values)}." ]) raise TypeError(msg) return values
Validate scale.values and identify a value for each level.
_get_values
python
mwaskom/seaborn
seaborn/_core/properties.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/properties.py
BSD-3-Clause
def variable_type( vector: Series, boolean_type: Literal["numeric", "categorical", "boolean"] = "numeric", strict_boolean: bool = False, ) -> VarType: """ Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in a few ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. - There is some flexibility about how to treat binary / boolean data. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric', 'categorical', or 'boolean' Type to use for vectors containing only 0s and 1s (and NAs). strict_boolean : bool If True, only consider data to be boolean when the dtype is bool or Boolean. Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """ # If a categorical dtype is set, infer categorical if isinstance(getattr(vector, 'dtype', None), pd.CategoricalDtype): return VarType("categorical") # Special-case all-na data, which is always "numeric" if pd.isna(vector).all(): return VarType("numeric") # Now drop nulls to simplify further type inference vector = vector.dropna() # Special-case binary/boolean data, allow caller to determine # This triggers a numpy warning when vector has strings/objects # https://github.com/numpy/numpy/issues/6784 # Because we reduce with .all(), we are agnostic about whether the # comparison returns a scalar or vector, so we will ignore the warning. # It triggers a separate DeprecationWarning when the vector has datetimes: # https://github.com/numpy/numpy/issues/13548 # This is considered a bug by numpy and will likely go away. with warnings.catch_warnings(): warnings.simplefilter( action='ignore', category=(FutureWarning, DeprecationWarning) # type: ignore # mypy bug? ) if strict_boolean: if isinstance(vector.dtype, pd.core.dtypes.base.ExtensionDtype): boolean_dtypes = ["bool", "boolean"] else: boolean_dtypes = ["bool"] boolean_vector = vector.dtype in boolean_dtypes else: try: boolean_vector = bool(np.isin(vector, [0, 1]).all()) except TypeError: # .isin comparison is not guaranteed to be possible under NumPy # casting rules, depending on the (unknown) dtype of 'vector' boolean_vector = False if boolean_vector: return VarType(boolean_type) # Defer to positive pandas tests if pd.api.types.is_numeric_dtype(vector): return VarType("numeric") if pd.api.types.is_datetime64_dtype(vector): return VarType("datetime") # --- If we get to here, we need to check the entries # Check for a collection where everything is a number def all_numeric(x): for x_i in x: if not isinstance(x_i, Number): return False return True if all_numeric(vector): return VarType("numeric") # Check for a collection where everything is a datetime def all_datetime(x): for x_i in x: if not isinstance(x_i, (datetime, np.datetime64)): return False return True if all_datetime(vector): return VarType("datetime") # Otherwise, our final fallback is to consider things categorical return VarType("categorical")
Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in a few ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. - There is some flexibility about how to treat binary / boolean data. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric', 'categorical', or 'boolean' Type to use for vectors containing only 0s and 1s (and NAs). strict_boolean : bool If True, only consider data to be boolean when the dtype is bool or Boolean. Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector.
variable_type
python
mwaskom/seaborn
seaborn/_core/rules.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/rules.py
BSD-3-Clause
def categorical_order(vector: Series, order: list | None = None) -> list: """ Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values. """ if order is not None: return order if vector.dtype.name == "category": order = list(vector.cat.categories) else: order = list(filter(pd.notnull, vector.unique())) if variable_type(pd.Series(order)) == "numeric": order.sort() return order
Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values.
categorical_order
python
mwaskom/seaborn
seaborn/_core/rules.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/rules.py
BSD-3-Clause
def tick(self, locator: Locator | None = None) -> Nominal: """ Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. Returns ------- Copy of self with new tick configuration. """ new = copy(self) new._tick_params = {"locator": locator} return new
Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. Returns ------- Copy of self with new tick configuration.
tick
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def label(self, formatter: Formatter | None = None) -> Nominal: """ Configure the selection of labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured matplotlib formatter; other parameters will not be used. Returns ------- scale Copy of self with new tick configuration. """ new = copy(self) new._label_params = {"formatter": formatter} return new
Configure the selection of labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured matplotlib formatter; other parameters will not be used. Returns ------- scale Copy of self with new tick configuration.
label
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def label( self, formatter: Formatter | None = None, *, like: str | Callable | None = None, base: int | None | Default = default, unit: str | None = None, ) -> Continuous: """ Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., `".2f"`), a format string with fields named `x` and/or `pos` (e.g., `"${x:.2f}"`), or a callable with a signature like `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the tick value and `pos` is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to `None` to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with `unit="g"`, a tick value of 5000 will appear as `5 kg`). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration. """ # Input checks if formatter is not None and not isinstance(formatter, Formatter): raise TypeError( f"Label formatter must be an instance of {Formatter!r}, " f"not {type(formatter)!r}" ) if like is not None and not (isinstance(like, str) or callable(like)): msg = f"`like` must be a string or callable, not {type(like).__name__}." raise TypeError(msg) new = copy(self) new._label_params = { "formatter": formatter, "like": like, "base": base, "unit": unit, } return new
Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., `".2f"`), a format string with fields named `x` and/or `pos` (e.g., `"${x:.2f}"`), or a callable with a signature like `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the tick value and `pos` is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to `None` to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with `unit="g"`, a tick value of 5000 will appear as `5 kg`). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration.
label
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def tick( self, locator: Locator | None = None, *, upto: int | None = None, ) -> Temporal: """ Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. upto : int Choose "nice" locations for ticks, but do not exceed this number. Returns ------- scale Copy of self with new tick configuration. """ if locator is not None and not isinstance(locator, Locator): err = ( f"Tick locator must be an instance of {Locator!r}, " f"not {type(locator)!r}." ) raise TypeError(err) new = copy(self) new._tick_params = {"locator": locator, "upto": upto} return new
Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. upto : int Choose "nice" locations for ticks, but do not exceed this number. Returns ------- scale Copy of self with new tick configuration.
tick
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def label( self, formatter: Formatter | None = None, *, concise: bool = False, ) -> Temporal: """ Configure the appearance of tick labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. concise : bool If True, use :class:`matplotlib.dates.ConciseDateFormatter` to make the tick labels as compact as possible. Returns ------- scale Copy of self with new label configuration. """ new = copy(self) new._label_params = {"formatter": formatter, "concise": concise} return new
Configure the appearance of tick labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. concise : bool If True, use :class:`matplotlib.dates.ConciseDateFormatter` to make the tick labels as compact as possible. Returns ------- scale Copy of self with new label configuration.
label
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def update_units(self, x): """Pass units to the internal converter, potentially updating its mapping.""" self.converter = mpl.units.registry.get_converter(x) if self.converter is not None: self.converter.default_units(x, self) info = self.converter.axisinfo(self.units, self) if info is None: return if info.majloc is not None: self.set_major_locator(info.majloc) if info.majfmt is not None: self.set_major_formatter(info.majfmt) # This is in matplotlib method; do we need this? # self.set_default_intervals()
Pass units to the internal converter, potentially updating its mapping.
update_units
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def convert_units(self, x): """Return a numeric representation of the input data.""" if np.issubdtype(np.asarray(x).dtype, np.number): return x elif self.converter is None: return x return self.converter.convert(x, self.units, self)
Return a numeric representation of the input data.
convert_units
python
mwaskom/seaborn
seaborn/_core/scales.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/scales.py
BSD-3-Clause
def _check_dimension_uniqueness( self, facet_spec: FacetSpec, pair_spec: PairSpec ) -> None: """Reject specs that pair and facet on (or wrap to) same figure dimension.""" err = None facet_vars = facet_spec.get("variables", {}) if facet_spec.get("wrap") and {"col", "row"} <= set(facet_vars): err = "Cannot wrap facets when specifying both `col` and `row`." elif ( pair_spec.get("wrap") and pair_spec.get("cross", True) and len(pair_spec.get("structure", {}).get("x", [])) > 1 and len(pair_spec.get("structure", {}).get("y", [])) > 1 ): err = "Cannot wrap subplots when pairing on both `x` and `y`." collisions = {"x": ["columns", "rows"], "y": ["rows", "columns"]} for pair_axis, (multi_dim, wrap_dim) in collisions.items(): if pair_axis not in pair_spec.get("structure", {}): continue elif multi_dim[:3] in facet_vars: err = f"Cannot facet the {multi_dim} while pairing on `{pair_axis}``." elif wrap_dim[:3] in facet_vars and facet_spec.get("wrap"): err = f"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``." elif wrap_dim[:3] in facet_vars and pair_spec.get("wrap"): err = f"Cannot wrap the {multi_dim} while faceting the {wrap_dim}." if err is not None: raise RuntimeError(err) # TODO what err class? Define PlotSpecError?
Reject specs that pair and facet on (or wrap to) same figure dimension.
_check_dimension_uniqueness
python
mwaskom/seaborn
seaborn/_core/subplots.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/subplots.py
BSD-3-Clause
def _determine_grid_dimensions( self, facet_spec: FacetSpec, pair_spec: PairSpec ) -> None: """Parse faceting and pairing information to define figure structure.""" self.grid_dimensions: dict[str, list] = {} for dim, axis in zip(["col", "row"], ["x", "y"]): facet_vars = facet_spec.get("variables", {}) if dim in facet_vars: self.grid_dimensions[dim] = facet_spec["structure"][dim] elif axis in pair_spec.get("structure", {}): self.grid_dimensions[dim] = [ None for _ in pair_spec.get("structure", {})[axis] ] else: self.grid_dimensions[dim] = [None] self.subplot_spec[f"n{dim}s"] = len(self.grid_dimensions[dim]) if not pair_spec.get("cross", True): self.subplot_spec["nrows"] = 1 self.n_subplots = self.subplot_spec["ncols"] * self.subplot_spec["nrows"]
Parse faceting and pairing information to define figure structure.
_determine_grid_dimensions
python
mwaskom/seaborn
seaborn/_core/subplots.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/subplots.py
BSD-3-Clause
def _handle_wrapping( self, facet_spec: FacetSpec, pair_spec: PairSpec ) -> None: """Update figure structure parameters based on facet/pair wrapping.""" self.wrap = wrap = facet_spec.get("wrap") or pair_spec.get("wrap") if not wrap: return wrap_dim = "row" if self.subplot_spec["nrows"] > 1 else "col" flow_dim = {"row": "col", "col": "row"}[wrap_dim] n_subplots = self.subplot_spec[f"n{wrap_dim}s"] flow = int(np.ceil(n_subplots / wrap)) if wrap < self.subplot_spec[f"n{wrap_dim}s"]: self.subplot_spec[f"n{wrap_dim}s"] = wrap self.subplot_spec[f"n{flow_dim}s"] = flow self.n_subplots = n_subplots self.wrap_dim = wrap_dim
Update figure structure parameters based on facet/pair wrapping.
_handle_wrapping
python
mwaskom/seaborn
seaborn/_core/subplots.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/subplots.py
BSD-3-Clause
def _determine_axis_sharing(self, pair_spec: PairSpec) -> None: """Update subplot spec with default or specified axis sharing parameters.""" axis_to_dim = {"x": "col", "y": "row"} key: str val: str | bool for axis in "xy": key = f"share{axis}" # Always use user-specified value, if present if key not in self.subplot_spec: if axis in pair_spec.get("structure", {}): # Paired axes are shared along one dimension by default if self.wrap is None and pair_spec.get("cross", True): val = axis_to_dim[axis] else: val = False else: # This will pick up faceted plots, as well as single subplot # figures, where the value doesn't really matter val = True self.subplot_spec[key] = val
Update subplot spec with default or specified axis sharing parameters.
_determine_axis_sharing
python
mwaskom/seaborn
seaborn/_core/subplots.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/subplots.py
BSD-3-Clause
def init_figure( self, pair_spec: PairSpec, pyplot: bool = False, figure_kws: dict | None = None, target: Axes | Figure | SubFigure | None = None, ) -> Figure: """Initialize matplotlib objects and add seaborn-relevant metadata.""" # TODO reduce need to pass pair_spec here? if figure_kws is None: figure_kws = {} if isinstance(target, mpl.axes.Axes): if max(self.subplot_spec["nrows"], self.subplot_spec["ncols"]) > 1: err = " ".join([ "Cannot create multiple subplots after calling `Plot.on` with", f"a {mpl.axes.Axes} object.", f" You may want to use a {mpl.figure.SubFigure} instead.", ]) raise RuntimeError(err) self._subplot_list = [{ "ax": target, "left": True, "right": True, "top": True, "bottom": True, "col": None, "row": None, "x": "x", "y": "y", }] self._figure = target.figure return self._figure elif isinstance(target, mpl.figure.SubFigure): figure = target.figure elif isinstance(target, mpl.figure.Figure): figure = target else: if pyplot: figure = plt.figure(**figure_kws) else: figure = mpl.figure.Figure(**figure_kws) target = figure self._figure = figure axs = target.subplots(**self.subplot_spec, squeeze=False) if self.wrap: # Remove unused Axes and flatten the rest into a (2D) vector axs_flat = axs.ravel({"col": "C", "row": "F"}[self.wrap_dim]) axs, extra = np.split(axs_flat, [self.n_subplots]) for ax in extra: ax.remove() if self.wrap_dim == "col": axs = axs[np.newaxis, :] else: axs = axs[:, np.newaxis] # Get i, j coordinates for each Axes object # Note that i, j are with respect to faceting/pairing, # not the subplot grid itself, (which only matters in the case of wrapping). iter_axs: np.ndenumerate | zip if not pair_spec.get("cross", True): indices = np.arange(self.n_subplots) iter_axs = zip(zip(indices, indices), axs.flat) else: iter_axs = np.ndenumerate(axs) self._subplot_list = [] for (i, j), ax in iter_axs: info = {"ax": ax} nrows, ncols = self.subplot_spec["nrows"], self.subplot_spec["ncols"] if not self.wrap: info["left"] = j % ncols == 0 info["right"] = (j + 1) % ncols == 0 info["top"] = i == 0 info["bottom"] = i == nrows - 1 elif self.wrap_dim == "col": info["left"] = j % ncols == 0 info["right"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots) info["top"] = j < ncols info["bottom"] = j >= (self.n_subplots - ncols) elif self.wrap_dim == "row": info["left"] = i < nrows info["right"] = i >= self.n_subplots - nrows info["top"] = i % nrows == 0 info["bottom"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots) if not pair_spec.get("cross", True): info["top"] = j < ncols info["bottom"] = j >= self.n_subplots - ncols for dim in ["row", "col"]: idx = {"row": i, "col": j}[dim] info[dim] = self.grid_dimensions[dim][idx] for axis in "xy": idx = {"x": j, "y": i}[axis] if axis in pair_spec.get("structure", {}): key = f"{axis}{idx}" else: key = axis info[axis] = key self._subplot_list.append(info) return figure
Initialize matplotlib objects and add seaborn-relevant metadata.
init_figure
python
mwaskom/seaborn
seaborn/_core/subplots.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_core/subplots.py
BSD-3-Clause
def __init__( self, val: Any = None, depend: str | None = None, rc: str | None = None, auto: bool = False, grouping: bool = True, ): """ Property that can be mapped from data or set directly, with flexible defaults. Parameters ---------- val : Any Use this value as the default. depend : str Use the value of this feature as the default. rc : str Use the value of this rcParam as the default. auto : bool The default value will depend on other parameters at compile time. grouping : bool If True, use the mapped variable to define groups. """ if depend is not None: assert depend in PROPERTIES if rc is not None: assert rc in mpl.rcParams self._val = val self._rc = rc self._depend = depend self._auto = auto self._grouping = grouping
Property that can be mapped from data or set directly, with flexible defaults. Parameters ---------- val : Any Use this value as the default. depend : str Use the value of this feature as the default. rc : str Use the value of this rcParam as the default. auto : bool The default value will depend on other parameters at compile time. grouping : bool If True, use the mapped variable to define groups.
__init__
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def __repr__(self): """Nice formatting for when object appears in Mark init signature.""" if self._val is not None: s = f"<{repr(self._val)}>" elif self._depend is not None: s = f"<depend:{self._depend}>" elif self._rc is not None: s = f"<rc:{self._rc}>" elif self._auto: s = "<auto>" else: s = "<undefined>" return s
Nice formatting for when object appears in Mark init signature.
__repr__
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def default(self) -> Any: """Get the default value for this feature, or access the relevant rcParam.""" if self._val is not None: return self._val elif self._rc is not None: return mpl.rcParams.get(self._rc)
Get the default value for this feature, or access the relevant rcParam.
default
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def _resolve( self, data: DataFrame | dict[str, Any], name: str, scales: dict[str, Scale] | None = None, ) -> Any: """Obtain default, specified, or mapped value for a named feature. Parameters ---------- data : DataFrame or dict with scalar values Container with data values for features that will be semantically mapped. name : string Identity of the feature / semantic. scales: dict Mapping from variable to corresponding scale object. Returns ------- value or array of values Outer return type depends on whether `data` is a dict (implying that we want a single value) or DataFrame (implying that we want an array of values with matching length). """ feature = self._mappable_props[name] prop = PROPERTIES.get(name, Property(name)) directly_specified = not isinstance(feature, Mappable) return_multiple = isinstance(data, pd.DataFrame) return_array = return_multiple and not name.endswith("style") # Special case width because it needs to be resolved and added to the dataframe # during layer prep (so the Move operations use it properly). # TODO how does width *scaling* work, e.g. for violin width by count? if name == "width": directly_specified = directly_specified and name not in data if directly_specified: feature = prop.standardize(feature) if return_multiple: feature = [feature] * len(data) if return_array: feature = np.array(feature) return feature if name in data: if scales is None or name not in scales: # TODO Might this obviate the identity scale? Just don't add a scale? feature = data[name] else: scale = scales[name] value = data[name] try: feature = scale(value) except Exception as err: raise PlotSpecError._during("Scaling operation", name) from err if return_array: feature = np.asarray(feature) return feature if feature.depend is not None: # TODO add source_func or similar to transform the source value? # e.g. set linewidth as a proportion of pointsize? return self._resolve(data, feature.depend, scales) default = prop.standardize(feature.default) if return_multiple: default = [default] * len(data) if return_array: default = np.array(default) return default
Obtain default, specified, or mapped value for a named feature. Parameters ---------- data : DataFrame or dict with scalar values Container with data values for features that will be semantically mapped. name : string Identity of the feature / semantic. scales: dict Mapping from variable to corresponding scale object. Returns ------- value or array of values Outer return type depends on whether `data` is a dict (implying that we want a single value) or DataFrame (implying that we want an array of values with matching length).
_resolve
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def resolve_color( mark: Mark, data: DataFrame | dict, prefix: str = "", scales: dict[str, Scale] | None = None, ) -> RGBATuple | ndarray: """ Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc. """ color = mark._resolve(data, f"{prefix}color", scales) if f"{prefix}alpha" in mark._mappable_props: alpha = mark._resolve(data, f"{prefix}alpha", scales) else: alpha = mark._resolve(data, "alpha", scales) def visible(x, axis=None): """Detect "invisible" colors to set alpha appropriately.""" # TODO First clause only needed to handle non-rgba arrays, # which we are trying to handle upstream return np.array(x).dtype.kind != "f" or np.isfinite(x).all(axis) # Second check here catches vectors of strings with identity scale # It could probably be handled better upstream. This is a tricky problem if np.ndim(color) < 2 and all(isinstance(x, float) for x in color): if len(color) == 4: return mpl.colors.to_rgba(color) alpha = alpha if visible(color) else np.nan return mpl.colors.to_rgba(color, alpha) else: if np.ndim(color) == 2 and color.shape[1] == 4: return mpl.colors.to_rgba_array(color) alpha = np.where(visible(color, axis=1), alpha, np.nan) return mpl.colors.to_rgba_array(color, alpha) # TODO should we be implementing fill here too? # (i.e. set fillalpha to 0 when fill=False)
Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc.
resolve_color
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def visible(x, axis=None): """Detect "invisible" colors to set alpha appropriately.""" # TODO First clause only needed to handle non-rgba arrays, # which we are trying to handle upstream return np.array(x).dtype.kind != "f" or np.isfinite(x).all(axis)
Detect "invisible" colors to set alpha appropriately.
visible
python
mwaskom/seaborn
seaborn/_marks/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_marks/base.py
BSD-3-Clause
def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None: """Raise when parameter value is not one of a specified set.""" value = getattr(self, param) if value not in options: *most, last = options option_str = ", ".join(f"{x!r}" for x in most[:-1]) + f" or {last!r}" err = " ".join([ f"The `{param}` parameter for `{self.__class__.__name__}` must be", f"one of {option_str}; not {value!r}.", ]) raise ValueError(err)
Raise when parameter value is not one of a specified set.
_check_param_one_of
python
mwaskom/seaborn
seaborn/_stats/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/base.py
BSD-3-Clause
def _check_grouping_vars( self, param: str, data_vars: list[str], stacklevel: int = 2, ) -> None: """Warn if vars are named in parameter without being present in the data.""" param_vars = getattr(self, param) undefined = set(param_vars) - set(data_vars) if undefined: param = f"{self.__class__.__name__}.{param}" names = ", ".join(f"{x!r}" for x in undefined) msg = f"Undefined variable(s) passed for {param}: {names}." warnings.warn(msg, stacklevel=stacklevel)
Warn if vars are named in parameter without being present in the data.
_check_grouping_vars
python
mwaskom/seaborn
seaborn/_stats/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/base.py
BSD-3-Clause
def __call__( self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], ) -> DataFrame: """Apply statistical transform to data subgroups and return combined result.""" return data
Apply statistical transform to data subgroups and return combined result.
__call__
python
mwaskom/seaborn
seaborn/_stats/base.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/base.py
BSD-3-Clause
def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete): """Inner function that takes bin parameters as arguments.""" vals = vals.replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna() if binrange is None: start, stop = vals.min(), vals.max() else: start, stop = binrange if discrete: bin_edges = np.arange(start - .5, stop + 1.5) else: if binwidth is not None: bins = int(round((stop - start) / binwidth)) bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight) # TODO warning or cap on too many bins? return bin_edges
Inner function that takes bin parameters as arguments.
_define_bin_edges
python
mwaskom/seaborn
seaborn/_stats/counting.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/counting.py
BSD-3-Clause
def _define_bin_params(self, data, orient, scale_type): """Given data, return numpy.histogram parameters to define bins.""" vals = data[orient] weights = data.get("weight", None) # TODO We'll want this for ordinal / discrete scales too # (Do we need discrete as a parameter or just infer from scale?) discrete = self.discrete or scale_type == "nominal" bin_edges = self._define_bin_edges( vals, weights, self.bins, self.binwidth, self.binrange, discrete, ) if isinstance(self.bins, (str, int)): n_bins = len(bin_edges) - 1 bin_range = bin_edges.min(), bin_edges.max() bin_kws = dict(bins=n_bins, range=bin_range) else: bin_kws = dict(bins=bin_edges) return bin_kws
Given data, return numpy.histogram parameters to define bins.
_define_bin_params
python
mwaskom/seaborn
seaborn/_stats/counting.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/counting.py
BSD-3-Clause
def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None: """Do input checks on grouping parameters.""" value = getattr(self, param) if not ( isinstance(value, bool) or (isinstance(value, list) and all(isinstance(v, str) for v in value)) ): param_name = f"{self.__class__.__name__}.{param}" raise TypeError(f"{param_name} must be a boolean or list of strings.") self._check_grouping_vars(param, grouping_vars, stacklevel=3)
Do input checks on grouping parameters.
_check_var_list_or_boolean
python
mwaskom/seaborn
seaborn/_stats/density.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/density.py
BSD-3-Clause
def _get_support(self, data: DataFrame, orient: str) -> ndarray: """Define the grid that the KDE will be evaluated on.""" if self.gridsize is None: return data[orient].to_numpy() kde = self._fit(data, orient) bw = np.sqrt(kde.covariance.squeeze()) gridmin = data[orient].min() - bw * self.cut gridmax = data[orient].max() + bw * self.cut return np.linspace(gridmin, gridmax, self.gridsize)
Define the grid that the KDE will be evaluated on.
_get_support
python
mwaskom/seaborn
seaborn/_stats/density.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/density.py
BSD-3-Clause
def _fit_and_evaluate( self, data: DataFrame, orient: str, support: ndarray ) -> DataFrame: """Transform single group by fitting a KDE and evaluating on a support grid.""" empty = pd.DataFrame(columns=[orient, "weight", "density"], dtype=float) if len(data) < 2: return empty try: kde = self._fit(data, orient) except np.linalg.LinAlgError: return empty if self.cumulative: s_0 = support[0] density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support]) else: density = kde(support) weight = data["weight"].sum() return pd.DataFrame({orient: support, "weight": weight, "density": density})
Transform single group by fitting a KDE and evaluating on a support grid.
_fit_and_evaluate
python
mwaskom/seaborn
seaborn/_stats/density.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/density.py
BSD-3-Clause
def _transform( self, data: DataFrame, orient: str, grouping_vars: list[str] ) -> DataFrame: """Transform multiple groups by fitting KDEs and evaluating.""" empty = pd.DataFrame(columns=[*data.columns, "density"], dtype=float) if len(data) < 2: return empty try: support = self._get_support(data, orient) except np.linalg.LinAlgError: return empty grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1] if not grouping_vars: return self._fit_and_evaluate(data, orient, support) groupby = GroupBy(grouping_vars) return groupby.apply(data, self._fit_and_evaluate, orient, support)
Transform multiple groups by fitting KDEs and evaluating.
_transform
python
mwaskom/seaborn
seaborn/_stats/density.py
https://github.com/mwaskom/seaborn/blob/master/seaborn/_stats/density.py
BSD-3-Clause