id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
602
import logging import pathlib import pickle import typing as t import numpy as np from deepchecks.vision.utils.test_utils import hash_image from deepchecks.vision.vision_data import VisionData def mnist_generator(shuffle: bool = False, batch_size: int = 64, train: bool = True, n_samples: int = None, model=None) -> t.Generator: """Generate an MNIST dataset. Parameters ---------- batch_size: int, optional how many samples per batch to load train : bool, default : True Train or Test dataset n_samples : int, optional Number of samples to load. shuffle : bool , default : False whether to shuffle the data or not. model : MockModel, optional Model to use for predictions Returns ------- :obj:`t.Generator` """ images, labels = load_mnist_data(train, n_samples=n_samples, shuffle=shuffle) for i in range(0, len(images), batch_size): return_dict = {'images': images[i:(i + batch_size):], 'labels': labels[i:(i + batch_size):]} if model is not None: return_dict.update({'predictions': model(return_dict['images'])}) # deepchecks expects images to be in the range [0, 255] return_dict['images'] = return_dict['images'] * 255.0 yield return_dict def load_model() -> 'MockModel': """Load MNIST model. Returns ------- MnistModel """ path = MODEL_PATH saved_path = MODEL_SAVED_PATH # Different path because tf saves the model in three files, none named as MODEL_PATH def create_model(): """Create a new model.""" return keras.models.Sequential([ keras.layers.Flatten(input_shape=(28, 28, 1)), keras.layers.Dense(128, activation='relu'), keras.layers.Dropout(0.2), keras.layers.Dense(10) ]) def add_softmax(model: keras.models.Sequential): """Add softmax layer to model.""" return keras.Sequential([ model, keras.layers.Softmax() ]) if saved_path.exists(): model = create_model() model.load_weights(path).expect_partial() model = add_softmax(model) return MockModel(model) model = create_model() loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy']) x_train, y_train = load_mnist_data(train=True) model.fit(x_train, y_train, epochs=2) del x_train, y_train model.save_weights(path) # Change output to softmax for probabilities model = add_softmax(model) if not path.parent.exists(): path.parent.mkdir() return MockModel(model) The provided code snippet includes necessary dependencies for implementing the `load_dataset` function. Write a Python function `def load_dataset(train: bool = True, with_predictions: bool = True, batch_size: t.Optional[int] = None, shuffle: bool = False, n_samples: int = None, object_type='VisionData') -> VisionData` to solve the following problem: Return MNIST VisionData, containing prediction produced by a simple fully connected model. Model and data are taken from https://www.tensorflow.org/tutorials/quickstart/beginner. Parameters ---------- train : bool, default : True Train or Test dataset with_predictions : bool, default : True Whether the returned VisonData should contain predictions batch_size: int, optional how many samples per batch to load shuffle : bool , default : False To reshuffled data at every epoch or not. n_samples : int, optional Number of samples to load. Return the first n_samples if shuffle is False otherwise selects n_samples at random. If None, returns all samples. object_type : str, default : 'VisionData' Kept for compatibility with torch datasets. Not used. Returns ------- :obj:`deepchecks.vision.VisionData` Here is the function: def load_dataset(train: bool = True, with_predictions: bool = True, batch_size: t.Optional[int] = None, shuffle: bool = False, n_samples: int = None, object_type='VisionData') -> VisionData: """Return MNIST VisionData, containing prediction produced by a simple fully connected model. Model and data are taken from https://www.tensorflow.org/tutorials/quickstart/beginner. Parameters ---------- train : bool, default : True Train or Test dataset with_predictions : bool, default : True Whether the returned VisonData should contain predictions batch_size: int, optional how many samples per batch to load shuffle : bool , default : False To reshuffled data at every epoch or not. n_samples : int, optional Number of samples to load. Return the first n_samples if shuffle is False otherwise selects n_samples at random. If None, returns all samples. object_type : str, default : 'VisionData' Kept for compatibility with torch datasets. Not used. Returns ------- :obj:`deepchecks.vision.VisionData` """ if object_type != 'VisionData': raise ValueError('only VisionData is supported for MNIST dataset') batch_size = batch_size or (64 if train else 1000) if with_predictions: model = load_model() else: model = None return VisionData( batch_loader=mnist_generator(shuffle, batch_size, train, n_samples, model), task_type='classification', dataset_name=f'mnist {"train" if train else "test"}', reshuffle_data=False )
Return MNIST VisionData, containing prediction produced by a simple fully connected model. Model and data are taken from https://www.tensorflow.org/tutorials/quickstart/beginner. Parameters ---------- train : bool, default : True Train or Test dataset with_predictions : bool, default : True Whether the returned VisonData should contain predictions batch_size: int, optional how many samples per batch to load shuffle : bool , default : False To reshuffled data at every epoch or not. n_samples : int, optional Number of samples to load. Return the first n_samples if shuffle is False otherwise selects n_samples at random. If None, returns all samples. object_type : str, default : 'VisionData' Kept for compatibility with torch datasets. Not used. Returns ------- :obj:`deepchecks.vision.VisionData`
603
from enum import Enum from collections import defaultdict from typing import Any, Dict, List from deepchecks.core.errors import DeepchecksValueError The provided code snippet includes necessary dependencies for implementing the `calc_vision_properties` function. Write a Python function `def calc_vision_properties(raw_data: List, properties_list: List) -> Dict[str, list]` to solve the following problem: Calculate the image properties for a batch of images. Parameters ---------- raw_data : torch.Tensor Batch of images to transform to image properties. properties_list: List[Dict] , default: None A list of properties to calculate. Returns ------ batch_properties: dict[str, List] A dict of property name, property value per sample. Here is the function: def calc_vision_properties(raw_data: List, properties_list: List) -> Dict[str, list]: """ Calculate the image properties for a batch of images. Parameters ---------- raw_data : torch.Tensor Batch of images to transform to image properties. properties_list: List[Dict] , default: None A list of properties to calculate. Returns ------ batch_properties: dict[str, List] A dict of property name, property value per sample. """ batch_properties = defaultdict(list) for single_property in properties_list: property_list = single_property['method'](raw_data) batch_properties[single_property['name']] = property_list return batch_properties
Calculate the image properties for a batch of images. Parameters ---------- raw_data : torch.Tensor Batch of images to transform to image properties. properties_list: List[Dict] , default: None A list of properties to calculate. Returns ------ batch_properties: dict[str, List] A dict of property name, property value per sample.
604
from enum import Enum from collections import defaultdict from typing import Any, Dict, List from deepchecks.core.errors import DeepchecksValueError class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass The provided code snippet includes necessary dependencies for implementing the `validate_properties` function. Write a Python function `def validate_properties(properties: List[Dict[str, Any]])` to solve the following problem: Validate structure of measurements. Here is the function: def validate_properties(properties: List[Dict[str, Any]]): """Validate structure of measurements.""" if not isinstance(properties, list): raise DeepchecksValueError( 'Expected properties to be a list, ' f'instead got {type(properties).__name__}' ) if len(properties) == 0: raise DeepchecksValueError('Properties list can\'t be empty') expected_keys = ('name', 'method', 'output_type') output_types = ('categorical', 'numerical', 'class_id') errors = [] for index, image_property in enumerate(properties): if not isinstance(image_property, dict) or not all(key in image_property for key in expected_keys): errors.append( f'Item #{index}: property must be of type dict, ' f'and include keys {expected_keys}.' ) continue image_property['name'] = property_name = image_property.get('name') or f'#{index}' difference = sorted(set(expected_keys).difference(set(image_property.keys()))) if len(difference) > 0: errors.append( f'Property {property_name}: dictionary must include keys {expected_keys}. ' f'Next keys are missed {difference}' ) continue property_output_type = image_property['output_type'] if property_output_type not in output_types: errors.append( f'Property {property_name}: field "output_type" must be one of {output_types}, ' f'instead got {property_output_type}' ) if len(errors) > 0: errors = '\n+ '.join(errors) raise DeepchecksValueError(f'List of properties contains next problems:\n+ {errors}') return properties
Validate structure of measurements.
605
from typing import List from deepchecks.core.errors import ModelValidationError from deepchecks.vision.utils.vision_properties import PropertiesInputType from deepchecks.vision.vision_data import TaskType from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper class ModelValidationError(DeepchecksBaseError): """Represents unappropriate model instance. Should be used in a situation when a routine (like check instance, utility function, etc) expected and received a dataset instance that did not meet routine requirements. """ pass class PropertiesInputType(Enum): """Enum containing supported task types.""" IMAGES = 'images' PARTIAL_IMAGES = 'partial_images' LABELS = 'labels' PREDICTIONS = 'predictions' class BatchWrapper: """Represents dataset batch returned by the dataloader during iteration.""" def __init__(self, batch: BatchOutputFormat, task_type: TaskType, images_seen_num: int): self._task_type = task_type self._batch = batch self._labels, self._predictions, self._images = None, None, None self._embeddings, self._additional_data, = None, None self._image_identifiers = batch.get('image_identifiers') # if there are no image identifiers, use the number of the image in loading process as identifier if self._image_identifiers is None: self._image_identifiers = np.asarray(range(images_seen_num, images_seen_num + len(self)), dtype='str') self._vision_properties_cache = dict.fromkeys(PropertiesInputType) def _get_relevant_data_for_properties(self, input_type: PropertiesInputType): result = [] if input_type == PropertiesInputType.PARTIAL_IMAGES: for img, bboxes_in_img in zip(self.numpy_images, self.numpy_labels): if bboxes_in_img is None: continue result = result + [crop_image(img, *bbox[1:]) for bbox in bboxes_in_img] elif input_type == PropertiesInputType.IMAGES: result = self.numpy_images elif input_type == PropertiesInputType.LABELS: result = self.numpy_labels elif input_type == PropertiesInputType.PREDICTIONS: result = self.numpy_predictions return result def vision_properties(self, properties_list: Optional[List[Dict]], input_type: PropertiesInputType): """Calculate and cache the properties for the batch according to the property input type. Parameters ---------- properties_list: Optional[List[Dict]] List of properties to calculate. If None, default properties will be calculated. input_type: PropertiesInputType The input type of the properties. Returns ------- Dict[str, Any] Dictionary of the properties name to list of property values per data element. """ if self._vision_properties_cache[input_type] is None: self._vision_properties_cache[input_type] = {} keys_in_cache = self._vision_properties_cache[input_type].keys() if properties_list is not None: properties_list = validate_properties(properties_list) requested_properties_names = [prop['name'] for prop in properties_list] properties_to_calc = [p for p in properties_list if p['name'] not in keys_in_cache] if len(properties_to_calc) > 0: data = self._get_relevant_data_for_properties(input_type) self._vision_properties_cache[input_type].update(calc_vision_properties(data, properties_to_calc)) else: if input_type not in [PropertiesInputType.PARTIAL_IMAGES, PropertiesInputType.IMAGES]: # TODO: add support for quick default properties calculation for other input types raise DeepchecksProcessError(f'None was passed to properties calculation for input type {input_type}.') requested_properties_names = [prop['name'] for prop in default_image_properties] if any(x not in keys_in_cache for x in requested_properties_names): data = self._get_relevant_data_for_properties(input_type) self._vision_properties_cache[input_type].update(calc_default_image_properties(data)) return {key: value for key, value in self._vision_properties_cache[input_type].items() if key in requested_properties_names} def original_labels(self): """Return labels for the batch, formatted in deepchecks format.""" if self._labels is None: self._labels = self._batch.get('labels') return self._labels def numpy_labels(self) -> List[Union[np.ndarray, int]]: """Return labels for the batch in numpy format.""" required_dim = 0 if self._task_type == TaskType.CLASSIFICATION else 2 return sequence_to_numpy(self.original_labels, expected_ndim_per_object=required_dim) def original_predictions(self): """Return predictions for the batch, formatted in deepchecks format.""" if self._predictions is None: self._predictions = self._batch.get('predictions') return self._predictions def numpy_predictions(self) -> List[np.ndarray]: """Return predictions for the batch in numpy format.""" if self._task_type == TaskType.CLASSIFICATION: required_dim = 1 elif self._task_type == TaskType.OBJECT_DETECTION: required_dim = 2 elif self._task_type == TaskType.SEMANTIC_SEGMENTATION: required_dim = 3 else: required_dim = None return sequence_to_numpy(self.original_predictions, expected_ndim_per_object=required_dim) def original_images(self): """Return images for the batch, formatted in deepchecks format.""" if self._images is None: self._images = self._batch.get('images') return self._images def numpy_images(self) -> List[Union[np.ndarray]]: """Return images for the batch in numpy format.""" return sequence_to_numpy(self.original_images, 'uint8', 3) def original_embeddings(self): """Return embedding for the batch, formatted in deepchecks format.""" if self._embeddings is None: self._embeddings = self._batch.get('embeddings') return self._embeddings def numpy_embeddings(self) -> List[Union[np.ndarray]]: """Return embedding for the batch in numpy format.""" return sequence_to_numpy(self.original_embeddings, 'float32') def original_additional_data(self): """Return additional data for the batch, formatted in deepchecks format.""" if self._additional_data is None: self._additional_data = self._batch.get('additional_data') return self._additional_data def numpy_additional_data(self): """Return additional data for the batch in numpy format.""" return sequence_to_numpy(self.original_additional_data) def original_image_identifiers(self): """Return image identifiers for the batch, formatted in deepchecks format.""" return self._image_identifiers def numpy_image_identifiers(self) -> List[Union[str, int]]: """Return image identifiers for the batch in numpy format.""" return sequence_to_numpy(self.original_image_identifiers, 'str', 0) def __len__(self): """Return length of batch.""" data = self.numpy_images if self.numpy_images is not None else self.numpy_predictions if \ self.numpy_predictions is not None else self.numpy_labels if self.numpy_labels is not None else \ self.numpy_embeddings if self.numpy_embeddings is not None else self.numpy_additional_data return len(data) The provided code snippet includes necessary dependencies for implementing the `calc_properties_for_property_label_correlation` function. Write a Python function `def calc_properties_for_property_label_correlation(task_type: TaskType, batch: BatchWrapper, image_properties: List)` to solve the following problem: Transform the data to the relevant format and calculate the properties on it. Intended for the checks PropertyLabelCorrelation and PropertyLabelCorrelationChange. Here is the function: def calc_properties_for_property_label_correlation(task_type: TaskType, batch: BatchWrapper, image_properties: List): """ Transform the data to the relevant format and calculate the properties on it. Intended for the checks PropertyLabelCorrelation and PropertyLabelCorrelationChange. """ targets = [] if task_type == TaskType.OBJECT_DETECTION: for bboxes_per_image in batch.numpy_labels: if bboxes_per_image is not None and len(bboxes_per_image.shape) == 2: targets = targets + bboxes_per_image[:, 0].tolist() property_type = PropertiesInputType.PARTIAL_IMAGES elif task_type == TaskType.CLASSIFICATION: targets = targets + batch.numpy_labels property_type = PropertiesInputType.IMAGES else: raise ModelValidationError(f'Check is irrelevant for task of type {task_type}') data_for_properties = batch.vision_properties(image_properties, property_type) return data_for_properties, targets
Transform the data to the relevant format and calculate the properties on it. Intended for the checks PropertyLabelCorrelation and PropertyLabelCorrelationChange.
606
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_samples_per_class_classification` function. Write a Python function `def _get_samples_per_class_classification(labels: Union[np.ndarray, List]) -> List[int]` to solve the following problem: Return a list containing the class per image in batch. Here is the function: def _get_samples_per_class_classification(labels: Union[np.ndarray, List]) -> List[int]: """Return a list containing the class per image in batch.""" return labels if isinstance(labels, List) else labels.tolist()
Return a list containing the class per image in batch.
607
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_samples_per_class_object_detection` function. Write a Python function `def _get_samples_per_class_object_detection(labels: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the classes in batch. Here is the function: def _get_samples_per_class_object_detection(labels: List[np.ndarray]) -> List[List[int]]: """Return a list containing the classes in batch.""" return [tensor.reshape((-1, 5))[:, 0].tolist() for tensor in labels]
Return a list containing the classes in batch.
608
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_bbox_area` function. Write a Python function `def _get_bbox_area(labels: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the area of bboxes in batch. Here is the function: def _get_bbox_area(labels: List[np.ndarray]) -> List[List[int]]: """Return a list containing the area of bboxes in batch.""" return [(label.reshape((-1, 5))[:, 4] * label.reshape((-1, 5))[:, 3]).tolist() for label in labels]
Return a list containing the area of bboxes in batch.
609
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_count_num_bboxes` function. Write a Python function `def _count_num_bboxes(labels: List[np.ndarray]) -> List[int]` to solve the following problem: Return a list containing the number of bboxes in per sample batch. Here is the function: def _count_num_bboxes(labels: List[np.ndarray]) -> List[int]: """Return a list containing the number of bboxes in per sample batch.""" num_bboxes = [label.shape[0] for label in labels] return num_bboxes
Return a list containing the number of bboxes in per sample batch.
610
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_samples_per_class_semantic_segmentation` function. Write a Python function `def _get_samples_per_class_semantic_segmentation(labels: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the classes in batch. Here is the function: def _get_samples_per_class_semantic_segmentation(labels: List[np.ndarray]) -> List[List[int]]: """Return a list containing the classes in batch.""" return [np.unique(label).tolist() for label in labels]
Return a list containing the classes in batch.
611
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_segment_area` function. Write a Python function `def _get_segment_area(labels: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the area of segments in batch. Here is the function: def _get_segment_area(labels: List[np.ndarray]) -> List[List[int]]: """Return a list containing the area of segments in batch.""" return [np.unique(label, return_counts=True)[1].tolist() for label in labels]
Return a list containing the area of segments in batch.
612
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_count_classes_by_segment_in_image` function. Write a Python function `def _count_classes_by_segment_in_image(labels: List[np.ndarray]) -> List[int]` to solve the following problem: Return a list containing the number of unique classes per image for semantic segmentation. Here is the function: def _count_classes_by_segment_in_image(labels: List[np.ndarray]) -> List[int]: """Return a list containing the number of unique classes per image for semantic segmentation.""" return [np.unique(label).shape[0] for label in labels]
Return a list containing the number of unique classes per image for semantic segmentation.
613
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_predicted_classes_per_image_classification` function. Write a Python function `def _get_predicted_classes_per_image_classification(predictions: List[np.ndarray]) -> List[int]` to solve the following problem: Return a list of the predicted class per image in the batch. Here is the function: def _get_predicted_classes_per_image_classification(predictions: List[np.ndarray]) -> List[int]: """Return a list of the predicted class per image in the batch.""" return np.argmax(predictions, axis=1).tolist()
Return a list of the predicted class per image in the batch.
614
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_predicted_classes_per_image_object_detection` function. Write a Python function `def _get_predicted_classes_per_image_object_detection(predictions: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the classes in batch. Here is the function: def _get_predicted_classes_per_image_object_detection(predictions: List[np.ndarray]) -> List[List[int]]: """Return a list containing the classes in batch.""" return [bboxes_per_image.reshape((-1, 6))[:, -1].tolist() for bboxes_per_image in predictions]
Return a list containing the classes in batch.
615
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_predicted_bbox_area` function. Write a Python function `def _get_predicted_bbox_area(predictions: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list of the predicted bbox sizes per image in the batch. Here is the function: def _get_predicted_bbox_area(predictions: List[np.ndarray]) -> List[List[int]]: """Return a list of the predicted bbox sizes per image in the batch.""" return [(prediction.reshape((-1, 6))[:, 2] * prediction.reshape((-1, 6))[:, 3]).tolist() for prediction in predictions]
Return a list of the predicted bbox sizes per image in the batch.
616
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_predicted_classes_per_image_semantic_segmentation` function. Write a Python function `def _get_predicted_classes_per_image_semantic_segmentation(predictions: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the classes in batch. Here is the function: def _get_predicted_classes_per_image_semantic_segmentation(predictions: List[np.ndarray]) -> List[List[int]]: """Return a list containing the classes in batch.""" return [np.unique(pred.argmax(0)).tolist() for pred in predictions]
Return a list containing the classes in batch.
617
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_get_segment_pred_area` function. Write a Python function `def _get_segment_pred_area(predictions: List[np.ndarray]) -> List[List[int]]` to solve the following problem: Return a list containing the area of segments in batch. Here is the function: def _get_segment_pred_area(predictions: List[np.ndarray]) -> List[List[int]]: """Return a list containing the area of segments in batch.""" return [np.unique(pred.argmax(0), return_counts=True)[1].tolist() for pred in predictions]
Return a list containing the area of segments in batch.
618
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `_count_pred_classes_by_segment_in_image` function. Write a Python function `def _count_pred_classes_by_segment_in_image(predictions: List[np.ndarray]) -> List[int]` to solve the following problem: Return a list containing the number of unique classes per image for semantic segmentation. Here is the function: def _count_pred_classes_by_segment_in_image(predictions: List[np.ndarray]) -> List[int]: """Return a list containing the number of unique classes per image for semantic segmentation.""" return [np.unique(preds.argmax(0)).shape[0] for preds in predictions]
Return a list containing the number of unique classes per image for semantic segmentation.
619
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `get_column_type` function. Write a Python function `def get_column_type(output_type)` to solve the following problem: Get column type to use in drift functions. Here is the function: def get_column_type(output_type): """Get column type to use in drift functions.""" # TODO smarter mapping based on data? # NOTE/TODO: this function is kept only for backward compatibility, remove it later mapper = { 'continuous': 'numerical', 'discrete': 'categorical', 'class_id': 'categorical', 'numerical': 'numerical', 'categorical': 'categorical', } return mapper[output_type]
Get column type to use in drift functions.
620
from typing import List, Sequence, Union import numpy as np The provided code snippet includes necessary dependencies for implementing the `properties_flatten` function. Write a Python function `def properties_flatten(in_list: Sequence) -> List` to solve the following problem: Flatten a list of lists into a single level list. Here is the function: def properties_flatten(in_list: Sequence) -> List: """Flatten a list of lists into a single level list.""" out = [] for el in in_list: if isinstance(el, Sequence) and not isinstance(el, (str, bytes)): out.extend(el) else: out.append(el) return out
Flatten a list of lists into a single level list.
621
import io import typing as t from numbers import Number from pathlib import Path import cv2 import numpy as np import PIL.Image as pilimage import PIL.ImageDraw as pildraw import PIL.ImageOps as pilops import plotly.graph_objects as go from PIL import ImageColor, ImageFont from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.html import imagetag from deepchecks.vision.vision_data import TaskType from deepchecks.vision.vision_data.utils import LabelMap from .detection_formatters import convert_bbox def draw_bboxes( image: t.Union[pilimage.Image, np.ndarray], bboxes: np.ndarray, label_map: LabelMap, bbox_notation: t.Optional[str] = None, copy_image: bool = True, border_width: int = 1, color: t.Union[str, t.Dict[np.number, str]] = 'red', ) -> pilimage.Image: """Draw bboxes on the image. Parameters ---------- image : Union[PIL.Image.Image, numpy.ndarray] image to draw on bboxes : numpy.ndarray array of bboxes label_map: LabelMap Map of class id to label bbox_notation copy_image : bool, default True copy image before drawing or not border_width : int, default 1 width of the bbox outline color: Union[str, Dict[number, str]], default "red" color of the bbox outline. It could be a map mapping class id to the color Returns ------- PIL.Image.Image : image instance with drawen bboxes on it """ if bboxes.size == 0: return image image = ensure_image(image, copy=copy_image) draw = pildraw.ImageDraw(image) if len(bboxes.shape) == 1: bboxes = [bboxes] if bbox_notation is not None: bboxes = np.array( [convert_bbox(bbox, notation=bbox_notation, image_width=image.width, image_height=image.height, _strict=False).tolist() for bbox in bboxes]) for bbox in bboxes: clazz, x0, y0, w, h = bbox x1, y1 = x0 + w, y0 + h text = label_map[clazz] if isinstance(color, str): color_to_use = color elif isinstance(color, dict): color_to_use = color[clazz] else: raise TypeError('color must be of type - Union[str, Dict[int, str]]') font = get_font_with_size(text, min(w, image.width // 2)) draw.rectangle(xy=(x0, y0, x1, y1), width=border_width, outline=color_to_use) draw.text(xy=(x0 + 2, y0), text=text, fill='white', font=font, stroke_width=2, stroke_fill='black') return image def draw_masks( image: t.Union[pilimage.Image, np.ndarray], mask: np.ndarray, color: t.Dict[Number, str] = None, copy_image: bool = True, alpha: float = 0.5 ) -> pilimage.Image: """Draw mask on the image. Parameters ---------- image : Union[PIL.Image.Image, numpy.ndarray] image to draw on mask : numpy.ndarray A mask label. Shape of H,W with every value represents the class id at that location. copy_image : bool, default True copy image before drawing or not alpha: float, default 0.5 Transparency of the mask over the image. When 1 the mask is solid and the image below is hidden color: Dict[Number, str] color of the masks. A map of class id to the color (either string name or rgb list) Returns ------- PIL.Image.Image : image instance with masks on it """ if mask.ndim != 2: raise ValueError('In order to draw mask it must be in H,W shape') image = np.array(ensure_image(image, copy=copy_image)) image_mask = np.zeros(shape=image.shape) classes = set(np.unique(mask)) if color is None: color = random_color_dict(len(classes)) for class_id in classes: color_to_use = color.get(class_id, 'gray') if isinstance(color_to_use, str): color_to_use = ImageColor.getrgb(color_to_use) if len(color_to_use) != 3: raise ValueError(f'Got invalid color: {color_to_use}') rgb_mask = np.stack((mask == class_id,) * 3, axis=-1) * color_to_use image_mask = image_mask + rgb_mask image[image_mask > 0] = image[image_mask > 0] * (1 - alpha) + image_mask[image_mask > 0] * alpha return pilimage.fromarray(image.astype(np.uint8)) def prepare_thumbnail( image: t.Union[pilimage.Image, np.ndarray], size: t.Optional[t.Tuple[int, int]] = None, copy_image: bool = True, ) -> str: """Prepare html image tag with the provided image. Parameters ---------- image : Union[PIL.Image.Image, numpy.ndarray] image to use size : Optional[Tuple[int, int]], default None size to which image should be rescaled copy_image : bool, default True to rescale the image to the provided size this function uses `PIL.Image.Image.thumbnail` method that modified image instance in-place. If `copy_image` is set to True image will be copied before rescaling. Returns ------- str : html '<img>' tag with embedded image """ if size is not None: image = ensure_image(image, copy=copy_image) # First define the correct size with respect to the original aspect ratio width_factor = size[0] / image.size[0] height_factor = size[1] / image.size[1] # Takes the minimum factor in order for the image to not exceed the size in either width or height factor = min(width_factor, height_factor) size = (int(image.size[0] * factor), int(image.size[1] * factor)) # Resize the image by Image.LANCZOS image = image.resize(size, pilimage.LANCZOS) else: image = ensure_image(image, copy=False) img_bytes = io.BytesIO() image.save(img_bytes, optimize=True, quality=60, format='jpeg') img_bytes.seek(0) tag = imagetag(img_bytes.read()) img_bytes.close() return tag from typing import List class LabelMap(dict): """Smarter dict for label map.""" def __init__(self, seq=None, **kwargs): seq = seq or {} super().__init__(seq, **kwargs) def __getitem__(self, class_id) -> str: """Return the name of the class with the given id.""" try: class_id = int(class_id) except ValueError: pass if class_id in self: return dict.__getitem__(self, class_id) return str(class_id) The provided code snippet includes necessary dependencies for implementing the `draw_image` function. Write a Python function `def draw_image(image: np.ndarray, label, task_type: TaskType, label_map: LabelMap, thumbnail_size: t.Tuple[int, int] = (200, 200), draw_label: bool = True) -> str` to solve the following problem: Return an image to show as output of the display. Parameters ---------- image : np.ndarray The image to draw, must be a [H, W, C] 3D numpy array. label : 2-dim labels tensor for the image to draw on top of the image, shape depends on task type. task_type : TaskType The task type associated with the label. label_map: LabelMap Map of class id to label thumbnail_size: t.Tuple[int,int] The required size of the image for display. draw_label : bool, default: True Whether to draw the label on the image or not. Returns ------- str The image in the provided thumbnail size with the label drawn on top of it for relevant tasks as html. Here is the function: def draw_image(image: np.ndarray, label, task_type: TaskType, label_map: LabelMap, thumbnail_size: t.Tuple[int, int] = (200, 200), draw_label: bool = True) -> str: """Return an image to show as output of the display. Parameters ---------- image : np.ndarray The image to draw, must be a [H, W, C] 3D numpy array. label : 2-dim labels tensor for the image to draw on top of the image, shape depends on task type. task_type : TaskType The task type associated with the label. label_map: LabelMap Map of class id to label thumbnail_size: t.Tuple[int,int] The required size of the image for display. draw_label : bool, default: True Whether to draw the label on the image or not. Returns ------- str The image in the provided thumbnail size with the label drawn on top of it for relevant tasks as html. """ if label is not None and image is not None and draw_label: if task_type == TaskType.OBJECT_DETECTION: image = draw_bboxes(image, np.asarray(label), copy_image=False, border_width=5, label_map=label_map) elif task_type == TaskType.SEMANTIC_SEGMENTATION: image = draw_masks(image, label, copy_image=False) if image is not None: return prepare_thumbnail(image=image, size=thumbnail_size, copy_image=False) else: return 'Image unavailable'
Return an image to show as output of the display. Parameters ---------- image : np.ndarray The image to draw, must be a [H, W, C] 3D numpy array. label : 2-dim labels tensor for the image to draw on top of the image, shape depends on task type. task_type : TaskType The task type associated with the label. label_map: LabelMap Map of class id to label thumbnail_size: t.Tuple[int,int] The required size of the image for display. draw_label : bool, default: True Whether to draw the label on the image or not. Returns ------- str The image in the provided thumbnail size with the label drawn on top of it for relevant tasks as html.
622
import io import typing as t from numbers import Number from pathlib import Path import cv2 import numpy as np import PIL.Image as pilimage import PIL.ImageDraw as pildraw import PIL.ImageOps as pilops import plotly.graph_objects as go from PIL import ImageColor, ImageFont from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.html import imagetag from deepchecks.vision.vision_data import TaskType from deepchecks.vision.vision_data.utils import LabelMap from .detection_formatters import convert_bbox class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass The provided code snippet includes necessary dependencies for implementing the `numpy_grayscale_to_heatmap_figure` function. Write a Python function `def numpy_grayscale_to_heatmap_figure(data: np.ndarray)` to solve the following problem: Create heatmap graph object from given numpy array data. Here is the function: def numpy_grayscale_to_heatmap_figure(data: np.ndarray): """Create heatmap graph object from given numpy array data.""" dimension = data.shape[2] if dimension == 3: data = cv2.cvtColor(data, cv2.COLOR_RGB2GRAY) elif dimension != 1: raise DeepchecksValueError(f'Don\'t know to plot images with {dimension} dimensions') return go.Heatmap(z=data.squeeze(), hoverinfo='skip', coloraxis='coloraxis')
Create heatmap graph object from given numpy array data.
623
import io import typing as t from numbers import Number from pathlib import Path import cv2 import numpy as np import PIL.Image as pilimage import PIL.ImageDraw as pildraw import PIL.ImageOps as pilops import plotly.graph_objects as go from PIL import ImageColor, ImageFont from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.html import imagetag from deepchecks.vision.vision_data import TaskType from deepchecks.vision.vision_data.utils import LabelMap from .detection_formatters import convert_bbox The provided code snippet includes necessary dependencies for implementing the `apply_heatmap_image_properties` function. Write a Python function `def apply_heatmap_image_properties(fig)` to solve the following problem: For heatmap and grayscale images, need to add those properties which on Image exists automatically. Here is the function: def apply_heatmap_image_properties(fig): """For heatmap and grayscale images, need to add those properties which on Image exists automatically.""" fig.update_yaxes(autorange='reversed', constrain='domain') fig.update_xaxes(constrain='domain')
For heatmap and grayscale images, need to add those properties which on Image exists automatically.
624
import io import typing as t from numbers import Number from pathlib import Path import cv2 import numpy as np import PIL.Image as pilimage import PIL.ImageDraw as pildraw import PIL.ImageOps as pilops import plotly.graph_objects as go from PIL import ImageColor, ImageFont from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.html import imagetag from deepchecks.vision.vision_data import TaskType from deepchecks.vision.vision_data.utils import LabelMap from .detection_formatters import convert_bbox The provided code snippet includes necessary dependencies for implementing the `crop_image` function. Write a Python function `def crop_image(img: np.ndarray, x, y, w, h) -> np.ndarray` to solve the following problem: Return the cropped numpy array image by x, y, w, h coordinates (top left corner, width and height. Here is the function: def crop_image(img: np.ndarray, x, y, w, h) -> np.ndarray: """Return the cropped numpy array image by x, y, w, h coordinates (top left corner, width and height.""" # Convert x, y, w, h to integers if not integers already: x, y, w, h = [round(n) for n in [x, y, w, h]] # Make sure w, h don't extend the bounding box outside of image dimensions: h = min(h, img.shape[0] - y - 1) w = min(w, img.shape[1] - x - 1) return img[y:y + h, x:x + w]
Return the cropped numpy array image by x, y, w, h coordinates (top left corner, width and height.
625
from collections import Counter from typing import Iterable, List, Sequence, Tuple, Union import numpy as np from PIL.Image import Image from deepchecks.vision.vision_data.utils import is_torch_object def verify_bbox_format_notation(notation: str) -> Tuple[bool, List[str]]: """Verify and tokenize bbox format notation. Parameters ---------- notation : str format notation to verify and to tokenize Returns ------- Tuple[ bool, List[Literal['label', 'score', 'width', 'height', 'xmin', 'ymin', 'xmax', 'ymax', 'xcenter', 'ycenter']] ] first item indicates whether coordinates are normalized or not, second represents format of the bbox """ tokens = [] are_coordinates_normalized = False current = notation = notation.strip().lower() current_pos = 0 while current: if current.startswith('l'): tokens.append('l') current = current[1:] current_pos = current_pos + 1 elif current.startswith('s'): tokens.append('s') current = current[1:] current_pos = current_pos + 1 elif current.startswith('wh'): tokens.append('wh') current = current[2:] current_pos = current_pos + 2 elif current.startswith('xy'): tokens.append('xy') current = current[2:] current_pos = current_pos + 2 elif current.startswith('cxcy'): tokens.append('cxcy') current = current[4:] current_pos = current_pos + 4 elif current.startswith('n') and current_pos == 0: are_coordinates_normalized = True current = current[1:] current_pos = current_pos + 1 elif current.startswith('n') and (current_pos + 1) == len(notation): are_coordinates_normalized = True current_pos = current_pos + 1 break else: raise ValueError( f'Wrong bbox format notation - {notation}. ' f'Incorrect or unknown sequence of charecters starting from position {current_pos} ' f'(sequence: ...{notation[current_pos:]}' ) received_combination = Counter(tokens) allowed_combinations = [ {'l': 1, 'xy': 2}, {'l': 1, 'xy': 1, 'wh': 1}, {'l': 1, 'cxcy': 1, 'wh': 1} ] # All allowed combinations are also allowed with or without score to support both label and prediction allowed_combinations += [{**c, 's': 1} for c in allowed_combinations] if sum(c == received_combination for c in allowed_combinations) != 1: raise ValueError( f'Incorrect bbox format notation - {notation}.\n' 'Only next combinations of elements are allowed:\n' '+ lxyxy (label, upper-left corner, bottom-right corner)\n' '+ lxywh (label, upper-left corner, bbox width and height)\n' '+ lcxcywh (label, bbox center, bbox width and height)\n' '+ lcxcywhn (label, normalized bbox center, bbox width and height)\n\n' '' 'Note:\n' '- notation elements (l, xy, cxcy, wh) can be placed in any order ' 'but only above combinations of elements are allowed\n' '- "n" at the begining or at the ned of the notation indicates ' 'normalized coordinates\n' ) normalized_tokens = [] for t in tokens: if t == 'l': normalized_tokens.append('label') elif t == 's': normalized_tokens.append('score') elif t == 'wh': normalized_tokens.extend(('width', 'height')) elif t == 'cxcy': normalized_tokens.extend(('xcenter', 'ycenter')) elif t == 'xy': if 'xmin' not in normalized_tokens and 'ymin' not in normalized_tokens: normalized_tokens.extend(('xmin', 'ymin')) else: normalized_tokens.extend(('xmax', 'ymax')) else: raise RuntimeError('Internal Error! Unreachable part of code reached') return are_coordinates_normalized, normalized_tokens _BatchOfSamples = Iterable[ Tuple[ Union[Image, np.ndarray], # image Sequence[Sequence[Union[int, float]]] # bboxes ] ] def _convert_bbox( bbox: Sequence[Union[int, float]], notation_tokens: List[str], image_width: Union[int, float, None] = None, image_height: Union[int, float, None] = None ) -> np.ndarray: assert \ (image_width is not None and image_height is not None) \ or (image_width is None and image_height is None) data = dict(zip(notation_tokens, bbox)) if 'xcenter' in data and 'ycenter' in data: if image_width is not None and image_height is not None: xcenter, ycenter = data['xcenter'] * image_width, data['ycenter'] * image_height else: xcenter, ycenter = data['xcenter'], data['ycenter'] return np.asarray([ data['label'], xcenter - (data['width'] / 2), ycenter - (data['height'] / 2), data['width'], data['height'], ]) elif 'height' in data and 'width' in data: if image_width is not None and image_height is not None: xmin, ymin = data['xmin'] * image_width, data['ymin'] * image_height else: xmin, ymin = data['xmin'], data['ymin'] return np.asarray([ data['label'], xmin, ymin, data['width'], data['height'], ]) else: if image_width is not None and image_height is not None: xmin, ymin = data['xmin'] * image_width, data['ymin'] * image_height xmax, ymax = data['xmax'] * image_width, data['ymax'] * image_height else: xmin, ymin = data['xmin'], data['ymin'] xmax, ymax = data['xmax'], data['ymax'] return np.asarray([ data['label'], xmin, ymin, xmax - xmin, ymax - ymin, ]) def is_torch_object(data_object) -> bool: """Check if data_object is a torch object without failing if torch isn't installed.""" return 'torch' in str(type(data_object)) The provided code snippet includes necessary dependencies for implementing the `convert_batch_of_bboxes` function. Write a Python function `def convert_batch_of_bboxes( batch: _BatchOfSamples, notation: str ) -> List[np.ndarray]` to solve the following problem: Convert batch of bboxes to the required format. Parameters ---------- batch : iterable of tuple like object with two items - image, list of bboxes batch of images and bboxes corresponding to them notation : str bboxes format notation Returns ------- List[np.ndarray] list of transformed bboxes Here is the function: def convert_batch_of_bboxes( batch: _BatchOfSamples, notation: str ) -> List[np.ndarray]: """Convert batch of bboxes to the required format. Parameters ---------- batch : iterable of tuple like object with two items - image, list of bboxes batch of images and bboxes corresponding to them notation : str bboxes format notation Returns ------- List[np.ndarray] list of transformed bboxes """ are_coordinates_normalized, notation_tokens = verify_bbox_format_notation(notation) output = [] for image, bboxes in batch: if len(bboxes) == 0: # image does not have bboxes output.append(np.asarray([])) continue if are_coordinates_normalized is False: image_height = None image_width = None elif isinstance(image, Image): image_height, image_width = image.height, image.width elif is_torch_object(image) or isinstance(image, np.ndarray): image_height, image_width, *_ = image.shape else: raise TypeError( 'Do not know how to take dimension sizes of ' f'object of type - {type(image)}' ) r = [] for bbox in bboxes: if len(bbox) < 5: raise ValueError('incorrect bbox') # TODO: better message else: r.append(_convert_bbox( bbox, notation_tokens, image_width=image_width, image_height=image_height, )) output.append(np.stack(r, axis=0)) return output
Convert batch of bboxes to the required format. Parameters ---------- batch : iterable of tuple like object with two items - image, list of bboxes batch of images and bboxes corresponding to them notation : str bboxes format notation Returns ------- List[np.ndarray] list of transformed bboxes
626
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _sizes(batch: List[np.ndarray]): """Return list of tuples of image height and width.""" return [get_size(img) for img in batch] The provided code snippet includes necessary dependencies for implementing the `aspect_ratio` function. Write a Python function `def aspect_ratio(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Return list of floats of image height to width ratio. Here is the function: def aspect_ratio(batch: List[np.ndarray]) -> List[float]: """Return list of floats of image height to width ratio.""" return [x[0] / x[1] for x in _sizes(batch)]
Return list of floats of image height to width ratio.
627
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def get_size(img) -> Tuple[int, int]: """Get size of image as (height, width) tuple.""" return img.shape[0], img.shape[1] The provided code snippet includes necessary dependencies for implementing the `area` function. Write a Python function `def area(batch: List[np.ndarray]) -> List[int]` to solve the following problem: Return list of integers of image areas (height multiplied by width). Here is the function: def area(batch: List[np.ndarray]) -> List[int]: """Return list of integers of image areas (height multiplied by width).""" return [np.prod(get_size(img)) for img in batch]
Return list of integers of image areas (height multiplied by width).
628
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _is_grayscale(img): return get_dimension(img) == 1 The provided code snippet includes necessary dependencies for implementing the `brightness` function. Write a Python function `def brightness(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Calculate brightness on each image in the batch. Here is the function: def brightness(batch: List[np.ndarray]) -> List[float]: """Calculate brightness on each image in the batch.""" return [img.mean() if _is_grayscale(img) else rgb2gray(img).mean() for img in batch]
Calculate brightness on each image in the batch.
629
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _is_grayscale(img): return get_dimension(img) == 1 The provided code snippet includes necessary dependencies for implementing the `rms_contrast` function. Write a Python function `def rms_contrast(batch: List[np.array]) -> List[float]` to solve the following problem: Return RMS contrast of image. Here is the function: def rms_contrast(batch: List[np.array]) -> List[float]: """Return RMS contrast of image.""" return [img.std() if _is_grayscale(img) else rgb2gray(img).std() for img in batch]
Return RMS contrast of image.
630
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _rgb_relative_intensity_mean(batch: List[np.ndarray]) -> List[Tuple[float, float, float]]: """Calculate normalized mean for each channel (rgb) in image. The normalized mean of each channel is calculated by first normalizing the image's pixels (meaning, each color is normalized to its relevant intensity, by dividing the color intensity by the other colors). Then, the mean for each image channel is calculated. Parameters ---------- batch: List[np.ndarray] A list of arrays, each arrays represents an image in the required deepchecks format. Returns ------- List[np.ndarray]: List of 3-dimensional arrays, each dimension is the normalized mean of the color channel. An array is returned for each image. """ return [_normalize_pixelwise(img).mean(axis=(1, 2)) if not _is_grayscale(img) else (None, None, None) for img in batch] The provided code snippet includes necessary dependencies for implementing the `mean_red_relative_intensity` function. Write a Python function `def mean_red_relative_intensity(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Return the mean of the red channel relative intensity. Here is the function: def mean_red_relative_intensity(batch: List[np.ndarray]) -> List[float]: """Return the mean of the red channel relative intensity.""" return [x[0] for x in _rgb_relative_intensity_mean(batch)]
Return the mean of the red channel relative intensity.
631
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _rgb_relative_intensity_mean(batch: List[np.ndarray]) -> List[Tuple[float, float, float]]: """Calculate normalized mean for each channel (rgb) in image. The normalized mean of each channel is calculated by first normalizing the image's pixels (meaning, each color is normalized to its relevant intensity, by dividing the color intensity by the other colors). Then, the mean for each image channel is calculated. Parameters ---------- batch: List[np.ndarray] A list of arrays, each arrays represents an image in the required deepchecks format. Returns ------- List[np.ndarray]: List of 3-dimensional arrays, each dimension is the normalized mean of the color channel. An array is returned for each image. """ return [_normalize_pixelwise(img).mean(axis=(1, 2)) if not _is_grayscale(img) else (None, None, None) for img in batch] The provided code snippet includes necessary dependencies for implementing the `mean_green_relative_intensity` function. Write a Python function `def mean_green_relative_intensity(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Return the mean of the green channel relative intensity. Here is the function: def mean_green_relative_intensity(batch: List[np.ndarray]) -> List[float]: """Return the mean of the green channel relative intensity.""" return [x[1] for x in _rgb_relative_intensity_mean(batch)]
Return the mean of the green channel relative intensity.
632
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _rgb_relative_intensity_mean(batch: List[np.ndarray]) -> List[Tuple[float, float, float]]: """Calculate normalized mean for each channel (rgb) in image. The normalized mean of each channel is calculated by first normalizing the image's pixels (meaning, each color is normalized to its relevant intensity, by dividing the color intensity by the other colors). Then, the mean for each image channel is calculated. Parameters ---------- batch: List[np.ndarray] A list of arrays, each arrays represents an image in the required deepchecks format. Returns ------- List[np.ndarray]: List of 3-dimensional arrays, each dimension is the normalized mean of the color channel. An array is returned for each image. """ return [_normalize_pixelwise(img).mean(axis=(1, 2)) if not _is_grayscale(img) else (None, None, None) for img in batch] The provided code snippet includes necessary dependencies for implementing the `mean_blue_relative_intensity` function. Write a Python function `def mean_blue_relative_intensity(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Return the mean of the blue channel relative intensity. Here is the function: def mean_blue_relative_intensity(batch: List[np.ndarray]) -> List[float]: """Return the mean of the blue channel relative intensity.""" return [x[2] for x in _rgb_relative_intensity_mean(batch)]
Return the mean of the blue channel relative intensity.
633
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _is_grayscale(img): return get_dimension(img) == 1 The provided code snippet includes necessary dependencies for implementing the `texture_level` function. Write a Python function `def texture_level(batch: List[np.ndarray]) -> List[float]` to solve the following problem: Calculate the sharpness of each image in the batch. Here is the function: def texture_level(batch: List[np.ndarray]) -> List[float]: """Calculate the sharpness of each image in the batch.""" return [Laplacian(img, CV_64F).var() if _is_grayscale(img) else Laplacian(rgb2gray(img), CV_64F).var() for img in batch]
Calculate the sharpness of each image in the batch.
634
from typing import Dict, List, Tuple import numpy as np from cv2 import CV_64F, Laplacian from skimage.color import rgb2gray def _sizes_array(batch: List[np.ndarray]): """Return an array of height and width per image (Nx2).""" return np.array(_sizes(batch)) def _rgb_relative_intensity_mean_array(batch: List[np.ndarray]) -> np.ndarray: """Return the _rgb_relative_intensity_mean result as array.""" return np.array(_rgb_relative_intensity_mean(batch)) def _is_grayscale(img): return get_dimension(img) == 1 def sample_pixels(image: np.ndarray, n_pixels: int): """Sample the image to improve runtime, expected image format H,W,C.""" flat_image = image.reshape((-1, image.shape[-1])) if flat_image.shape[0] > n_pixels: pixel_idxs = np.random.choice(flat_image.shape[0], n_pixels) else: pixel_idxs = np.arange(flat_image.shape[0]) sampled_image = flat_image[pixel_idxs, np.newaxis, :] return sampled_image The provided code snippet includes necessary dependencies for implementing the `calc_default_image_properties` function. Write a Python function `def calc_default_image_properties(batch: List[np.ndarray], sample_n_pixels: int = 10000) -> Dict[str, list]` to solve the following problem: Speed up the calculation for the default image properties by sharing common actions. Here is the function: def calc_default_image_properties(batch: List[np.ndarray], sample_n_pixels: int = 10000) -> Dict[str, list]: """Speed up the calculation for the default image properties by sharing common actions.""" if len(batch) == 0: return {} results_dict = {} sizes_array = _sizes_array(batch) results_dict['Aspect Ratio'] = list(sizes_array[:, 0] / sizes_array[:, 1]) results_dict['Area'] = list(sizes_array[:, 0] * sizes_array[:, 1]) sampled_images = [sample_pixels(img, sample_n_pixels) for img in batch] grayscale_images = [img if _is_grayscale(img) else rgb2gray(img)*255 for img in sampled_images] results_dict['Brightness'] = [image.mean() for image in grayscale_images] results_dict['RMS Contrast'] = [image.std() for image in grayscale_images] rgb_intensities = _rgb_relative_intensity_mean_array(sampled_images) results_dict['Mean Red Relative Intensity'] = rgb_intensities[:, 0].tolist() results_dict['Mean Green Relative Intensity'] = rgb_intensities[:, 1].tolist() results_dict['Mean Blue Relative Intensity'] = rgb_intensities[:, 2].tolist() return results_dict
Speed up the calculation for the default image properties by sharing common actions.
635
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_notebook` function. Write a Python function `def is_notebook() -> bool` to solve the following problem: Check if we're in an interactive context (Notebook, GUI support) or terminal-based. Returns ------- bool True if we are in a notebook context, False otherwise Here is the function: def is_notebook() -> bool: """Check if we're in an interactive context (Notebook, GUI support) or terminal-based. Returns ------- bool True if we are in a notebook context, False otherwise """ try: shell = get_ipython() return hasattr(shell, 'config') except NameError: return False # Probably standard Python interpreter
Check if we're in an interactive context (Notebook, GUI support) or terminal-based. Returns ------- bool True if we are in a notebook context, False otherwise
636
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_sphinx` function. Write a Python function `def is_sphinx() -> bool` to solve the following problem: Check if we're in a sphinx gallery env. Returns ------- bool True if we are in a sphinx gallery context, False otherwise Here is the function: def is_sphinx() -> bool: """Check if we're in a sphinx gallery env. Returns ------- bool True if we are in a sphinx gallery context, False otherwise """ return pio.renderers.default.startswith('sphinx_gallery')
Check if we're in a sphinx gallery env. Returns ------- bool True if we are in a sphinx gallery context, False otherwise
637
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_terminal_interactive_shell` function. Write a Python function `def is_terminal_interactive_shell() -> bool` to solve the following problem: Check whether we are in a terminal interactive shell or not. Here is the function: def is_terminal_interactive_shell() -> bool: """Check whether we are in a terminal interactive shell or not.""" return isinstance(get_ipython(), TerminalInteractiveShell)
Check whether we are in a terminal interactive shell or not.
638
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_headless` function. Write a Python function `def is_headless() -> bool` to solve the following problem: Check if the system can support GUI. Returns ------- bool True if we cannot support GUI, False otherwise Here is the function: def is_headless() -> bool: """Check if the system can support GUI. Returns ------- bool True if we cannot support GUI, False otherwise """ # pylint: disable=import-outside-toplevel try: import Tkinter as tk except ImportError: try: import tkinter as tk except ImportError: return True try: root = tk.Tk() except tk.TclError: return True root.destroy() return False
Check if the system can support GUI. Returns ------- bool True if we cannot support GUI, False otherwise
639
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_colab_env` function. Write a Python function `def is_colab_env() -> bool` to solve the following problem: Check if we are in the google colab environment. Here is the function: def is_colab_env() -> bool: """Check if we are in the google colab environment.""" return 'google.colab' in str(get_ipython())
Check if we are in the google colab environment.
640
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_kaggle_env` function. Write a Python function `def is_kaggle_env() -> bool` to solve the following problem: Check if we are in the kaggle environment. Here is the function: def is_kaggle_env() -> bool: """Check if we are in the kaggle environment.""" return os.environ.get('KAGGLE_KERNEL_RUN_TYPE') is not None
Check if we are in the kaggle environment.
641
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_databricks_env` function. Write a Python function `def is_databricks_env() -> bool` to solve the following problem: Check if we are in the databricks environment. Here is the function: def is_databricks_env() -> bool: """Check if we are in the databricks environment.""" return 'DATABRICKS_RUNTIME_VERSION' in os.environ
Check if we are in the databricks environment.
642
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity The provided code snippet includes necessary dependencies for implementing the `is_sagemaker_env` function. Write a Python function `def is_sagemaker_env() -> bool` to solve the following problem: Check if we are in the AWS Sagemaker environment. Here is the function: def is_sagemaker_env() -> bool: """Check if we are in the AWS Sagemaker environment.""" return 'AWS_PATH' in os.environ
Check if we are in the AWS Sagemaker environment.
643
import logging import os import time import typing as t from functools import lru_cache import plotly.io as pio import tqdm from ipykernel.zmqshell import ZMQInteractiveShell from IPython import get_ipython from IPython.display import display from IPython.terminal.interactiveshell import TerminalInteractiveShell from tqdm.notebook import tqdm as tqdm_notebook from deepchecks.utils.logger import get_verbosity def is_zmq_interactive_shell() -> bool: """Check whether we are in a web-based interactive shell or not.""" return isinstance(get_ipython(), ZMQInteractiveShell) class HtmlProgressBar: """Progress bar implementation that uses html <progress> tag.""" STYLE = """ <style> progress { -webkit-appearance: none; border: none; border-radius: 3px; width: 300px; height: 20px; vertical-align: middle; margin-right: 10px; background-color: aliceblue; } progress::-webkit-progress-bar { border-radius: 3px; background-color: aliceblue; } progress::-webkit-progress-value { background-color: #9d60fb; } progress::-moz-progress-bar { background-color: #9d60fb; } </style> """ def __init__( self, title: str, unit: str, iterable: t.Iterable[t.Any], total: int, metadata: t.Optional[t.Mapping[str, t.Any]] = None, display_immediately: bool = False, disable: bool = False, ): self._title = title self._unit = unit self._iterable = iterable self._total = total self._seconds_passed = 0 self._inital_metadata = dict(metadata) if metadata else {} self._metadata = self._inital_metadata.copy() self._progress_bar = None self._current_item_index = 0 display({'text/html': self.STYLE}, raw=True) self._display_handler = display({'text/html': ''}, raw=True, display_id=True) self._disable = disable self._reuse_counter = 0 if disable is False and display_immediately is True: self.refresh() def __iter__(self): """Iterate over iterable.""" if self._disable is True: try: for it in self._iterable: yield it finally: self._reuse_counter += 1 return if self._reuse_counter > 0: self._seconds_passed = 0 self._current_item_index = 0 self._progress_bar = None self._metadata = self._inital_metadata self.clean() started_at = time.time() try: self.refresh() for i, it in enumerate(self._iterable, start=1): yield it self._current_item_index = i self._seconds_passed = int(time.time() - started_at) self.refresh() finally: self._reuse_counter += 1 self.close() def refresh(self): """Refresh progress bar.""" self.progress_bar = self.create_progress_bar( title=self._title, item=self._current_item_index, total=self._total, seconds_passed=self._seconds_passed, metadata=self._metadata ) self._display_handler.update( {'text/html': self.progress_bar}, raw=True ) def close(self): """Close progress bar.""" self._display_handler.update({'text/html': ''}, raw=True) def clean(self): """Clean display cell.""" self._display_handler.update({'text/html': ''}, raw=True) def set_postfix(self, data: t.Mapping[str, t.Any], refresh: bool = True): """Set postfix.""" self.update_metadata(data, refresh) def reset_metadata(self, data: t.Mapping[str, t.Any], refresh: bool = True): """Reset metadata.""" self._metadata = dict(data) if refresh is True: self.refresh() def update_metadata(self, data: t.Mapping[str, t.Any], refresh: bool = True): """Update metadata.""" self._metadata.update(data) if refresh is True: self.refresh() def create_label( cls, item: int, total: int, seconds_passed: int, metadata: t.Optional[t.Mapping[str, t.Any]] = None ): """Create progress bar label.""" minutes = seconds_passed // 60 seconds = seconds_passed - (minutes * 60) minutes = f'0{minutes}' if minutes < 10 else str(minutes) seconds = f'0{seconds}' if seconds < 10 else str(seconds) if metadata: metadata_string = ', '.join(f'{k}={str(v)}' for k, v in metadata.items()) metadata_string = f', {metadata_string}' else: metadata_string = '' return f'{item}/{total} [Time: {minutes}:{seconds}{metadata_string}]' def create_progress_bar( cls, title: str, item: int, total: int, seconds_passed: int, metadata: t.Optional[t.Mapping[str, t.Any]] = None ) -> str: """Create progress bar.""" return f""" <div> <label> {title}:<br/> <progress value='{item}' max='{total}' class='deepchecks' > </progress> </label> <span>{cls.create_label(item, total, seconds_passed, metadata)}</span> </div> """ from typing import List def get_verbosity() -> int: """Return the deepchecks logger verbosity level. Same as doing logging.getLogger('deepchecks').getEffectiveLevel(). """ return _logger.getEffectiveLevel() The provided code snippet includes necessary dependencies for implementing the `create_progress_bar` function. Write a Python function `def create_progress_bar( name: str, unit: str, total: t.Optional[int] = None, iterable: t.Optional[t.Sequence[t.Any]] = None, ) -> t.Union[ tqdm_notebook, HtmlProgressBar, tqdm.tqdm ]` to solve the following problem: Create a progress bar instance. Here is the function: def create_progress_bar( name: str, unit: str, total: t.Optional[int] = None, iterable: t.Optional[t.Sequence[t.Any]] = None, ) -> t.Union[ tqdm_notebook, HtmlProgressBar, tqdm.tqdm ]: """Create a progress bar instance.""" if iterable is not None: iterlen = len(iterable) elif total is not None: iterlen = total else: raise ValueError( 'at least one of the parameters iterable | total must be not None' ) is_disabled = get_verbosity() >= logging.WARNING if is_zmq_interactive_shell(): return HtmlProgressBar( title=name, unit=unit, total=iterlen, iterable=iterable or range(iterlen), display_immediately=True, disable=is_disabled ) else: barlen = iterlen if iterlen > 5 else 5 rbar = ' {n_fmt}/{total_fmt} [Time: {elapsed}{postfix}]' bar_format = f'{{desc}}:\n|{{bar:{barlen}}}|{rbar}' return tqdm.tqdm( iterable=iterable, total=total, desc=name, unit=f' {unit}', leave=False, bar_format=bar_format, disable=is_disabled, )
Create a progress bar instance.
644
import math from collections import Counter from typing import List, Union import numpy as np import pandas as pd from scipy.stats import entropy from deepchecks.utils.distribution.preprocessing import value_frequency def theil_u_correlation(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> float: """ Calculate the Theil's U correlation of y to x. Theil's U is an asymmetric measure ranges [0,1] based on entropy which answers the question: how well does variable y explains variable x? For more information see https://en.wikipedia.org/wiki/Uncertainty_coefficient Parameters ---------- x: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls y: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls Returns ------- float Representing the Theil U correlation between y and x """ s_xy = conditional_entropy(x, y) values_probabilities = value_frequency(x) s_x = entropy(values_probabilities) if s_x == 0: return 1 else: return (s_x - s_xy) / s_x def value_frequency(x: Union[List, np.ndarray, pd.Series]) -> List[float]: """ Calculate the value frequency of x. Parameters: ----------- x: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values. Returns: -------- List[float] Representing the value frequency of x. """ x_values_counter = Counter(x) total_occurrences = len(x) values_probabilities = list(map(lambda n: n / total_occurrences, x_values_counter.values())) return values_probabilities The provided code snippet includes necessary dependencies for implementing the `symmetric_theil_u_correlation` function. Write a Python function `def symmetric_theil_u_correlation(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> float` to solve the following problem: Calculate the symmetric Theil's U correlation of y to x. Parameters ---------- x: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls y: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls Returns ------- float Representing the symmetric Theil U correlation between y and x Here is the function: def symmetric_theil_u_correlation(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> \ float: """ Calculate the symmetric Theil's U correlation of y to x. Parameters ---------- x: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls y: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls Returns ------- float Representing the symmetric Theil U correlation between y and x """ h_x = entropy(value_frequency(x)) h_y = entropy(value_frequency(y)) u_xy = theil_u_correlation(x, y) u_yx = theil_u_correlation(y, x) # pylint: disable=arguments-out-of-order u_sym = (h_x * u_xy + h_y * u_yx) / (h_x + h_y) return u_sym
Calculate the symmetric Theil's U correlation of y to x. Parameters ---------- x: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls y: Union[List, np.ndarray, pd.Series] A sequence of a categorical variable values without nulls Returns ------- float Representing the symmetric Theil U correlation between y and x
645
import math from collections import Counter from typing import List, Union import numpy as np import pandas as pd from scipy.stats import entropy from deepchecks.utils.distribution.preprocessing import value_frequency The provided code snippet includes necessary dependencies for implementing the `correlation_ratio` function. Write a Python function `def correlation_ratio(categorical_data: Union[List, np.ndarray, pd.Series], numerical_data: Union[List, np.ndarray, pd.Series], ignore_mask: Union[List[bool], np.ndarray] = None) -> float` to solve the following problem: Calculate the correlation ratio of numerical_variable to categorical_variable. Correlation ratio is a symmetric grouping based method that describe the level of correlation between a numeric variable and a categorical variable. returns a value in [0,1]. For more information see https://en.wikipedia.org/wiki/Correlation_ratio Parameters ---------- categorical_data: Union[List, np.ndarray, pd.Series] A sequence of categorical values encoded as class indices without nulls except possibly at ignored elements numerical_data: Union[List, np.ndarray, pd.Series] A sequence of numerical values without nulls except possibly at ignored elements ignore_mask: Union[List[bool], np.ndarray[bool]] default: None A sequence of boolean values indicating which elements to ignore. If None, includes all indexes. Returns ------- float Representing the correlation ratio between the variables. Here is the function: def correlation_ratio(categorical_data: Union[List, np.ndarray, pd.Series], numerical_data: Union[List, np.ndarray, pd.Series], ignore_mask: Union[List[bool], np.ndarray] = None) -> float: """ Calculate the correlation ratio of numerical_variable to categorical_variable. Correlation ratio is a symmetric grouping based method that describe the level of correlation between a numeric variable and a categorical variable. returns a value in [0,1]. For more information see https://en.wikipedia.org/wiki/Correlation_ratio Parameters ---------- categorical_data: Union[List, np.ndarray, pd.Series] A sequence of categorical values encoded as class indices without nulls except possibly at ignored elements numerical_data: Union[List, np.ndarray, pd.Series] A sequence of numerical values without nulls except possibly at ignored elements ignore_mask: Union[List[bool], np.ndarray[bool]] default: None A sequence of boolean values indicating which elements to ignore. If None, includes all indexes. Returns ------- float Representing the correlation ratio between the variables. """ if ignore_mask: numerical_data = numerical_data[~np.asarray(ignore_mask)] categorical_data = categorical_data[~np.asarray(ignore_mask)] cat_num = int(np.max(categorical_data) + 1) y_avg_array = np.zeros(cat_num) n_array = np.zeros(cat_num) for i in range(cat_num): cat_measures = numerical_data[categorical_data == i] n_array[i] = cat_measures.shape[0] y_avg_array[i] = np.average(cat_measures.astype(float)) # Cast to float to avoid error in python 3.6 y_total_avg = np.sum(np.multiply(y_avg_array, n_array)) / np.sum(n_array) numerator = np.sum(np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2))) denominator = np.sum(np.power(np.subtract(numerical_data, y_total_avg), 2)) if denominator == 0: eta = 0 else: eta = np.sqrt(numerator / denominator) return eta
Calculate the correlation ratio of numerical_variable to categorical_variable. Correlation ratio is a symmetric grouping based method that describe the level of correlation between a numeric variable and a categorical variable. returns a value in [0,1]. For more information see https://en.wikipedia.org/wiki/Correlation_ratio Parameters ---------- categorical_data: Union[List, np.ndarray, pd.Series] A sequence of categorical values encoded as class indices without nulls except possibly at ignored elements numerical_data: Union[List, np.ndarray, pd.Series] A sequence of numerical values without nulls except possibly at ignored elements ignore_mask: Union[List[bool], np.ndarray[bool]] default: None A sequence of boolean values indicating which elements to ignore. If None, includes all indexes. Returns ------- float Representing the correlation ratio between the variables.
646
import contextlib import typing as t WANDB_INSTALLATION_CMD = 'pip install wandb' from typing import List The provided code snippet includes necessary dependencies for implementing the `wandb_run` function. Write a Python function `def wandb_run( project: t.Optional[str] = None, **kwargs ) -> t.Iterator[t.Any]` to solve the following problem: Create new one or use existing wandb run instance. Parameters ---------- project : Optional[str], default None project name **kwargs : additional parameters that will be passed to the 'wandb.init' Returns ------- Iterator[wandb.sdk.wandb_run.Run] Here is the function: def wandb_run( project: t.Optional[str] = None, **kwargs ) -> t.Iterator[t.Any]: """Create new one or use existing wandb run instance. Parameters ---------- project : Optional[str], default None project name **kwargs : additional parameters that will be passed to the 'wandb.init' Returns ------- Iterator[wandb.sdk.wandb_run.Run] """ try: import wandb except ImportError as error: raise ImportError( '"wandb_run" requires the wandb python package. ' f'To get it, run - {WANDB_INSTALLATION_CMD}.' ) from error else: if wandb.run is not None: yield wandb.run else: kwargs = {'project': project or 'deepchecks', **kwargs} with t.cast(t.ContextManager, wandb.init(**kwargs)) as run: wandb.run._label(repo='Deepchecks') # pylint: disable=protected-access yield run
Create new one or use existing wandb run instance. Parameters ---------- project : Optional[str], default None project name **kwargs : additional parameters that will be passed to the 'wandb.init' Returns ------- Iterator[wandb.sdk.wandb_run.Run]
647
import copy import warnings from collections import Counter from typing import List, Tuple, Union import numpy as np import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder from deepchecks.utils.typing import Hashable The provided code snippet includes necessary dependencies for implementing the `convert_multi_label_to_multi_class` function. Write a Python function `def convert_multi_label_to_multi_class(predictions: np.ndarray, model_classes: List[str]) -> np.ndarray` to solve the following problem: Convert multi-label predictions to multi class format like predictions. Here is the function: def convert_multi_label_to_multi_class(predictions: np.ndarray, model_classes: List[str]) -> np.ndarray: """Convert multi-label predictions to multi class format like predictions.""" predictions = np.asarray(predictions) samples_per_class = np.nansum(np.where(predictions is None, np.nan, predictions), axis=0) # Ignoring nan values all_predictions = [[cls] * int(num_samples) for cls, num_samples in zip(model_classes, samples_per_class)] return np.asarray([item for sublist in all_predictions for item in sublist]).reshape((-1, 1))
Convert multi-label predictions to multi class format like predictions.
648
from numbers import Number from typing import Dict, Optional, Tuple, Union import numpy as np import pandas as pd from plotly.graph_objs import Figure from plotly.subplots import make_subplots from scipy.stats import chi2_contingency, wasserstein_distance from deepchecks.core import ConditionCategory, ConditionResult from deepchecks.core.errors import DeepchecksValueError, NotEnoughSamplesError from deepchecks.utils.dict_funcs import get_dict_entry_by_value from deepchecks.utils.distribution.plot import (CategoriesSortingKind, drift_score_bar_traces, feature_distribution_traces) from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins from deepchecks.utils.plot import DEFAULT_DATASET_NAMES from deepchecks.utils.strings import format_number def cramers_v(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series], balance_classes: bool = False, min_category_size_ratio: float = 0, max_num_categories: int = None, sort_by: str = 'dist1', from_freqs: bool = False) -> float: """Calculate the Cramer's V statistic. For more on Cramer's V, see https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V Uses the Cramer's V bias correction, see http://stats.lse.ac.uk/bergsma/pdf/cramerV3.pdf Function is for categorical data only. Parameters ---------- dist1 : Union[np.ndarray, pd.Series] array of numerical values. dist2 : Union[np.ndarray, pd.Series] array of numerical values to compare dist1 to. balance_classes : bool, default False whether to balance the classes of the distributions. Use this in case of extremely unbalanced classes. min_category_size_ratio: float, default 0.01 minimum size ratio for categories. Categories with size ratio lower than this number are binned into an "Other" category. max_num_categories: int, default: None max number of allowed categories. If there are more categories than this number, categories are ordered by magnitude and all the smaller categories are binned into an "Other" category. If max_num_categories=None, there is no limit. > Note that if this parameter is used, the ordering of categories (and by extension, the decision which categories are kept by name and which are binned to the "Other" category) is done by default according to the values of dist1, which is treated as the "expected" distribution. This behavior can be changed by using the sort_by parameter. sort_by: str, default: 'dist1' Specify how categories should be sorted, affecting which categories will get into the "Other" category. Possible values: - 'dist1': Sort by the largest dist1 categories. - 'dist2': Sort by the largest dist2 categories. - 'difference': Sort by the largest difference between categories. > Note that this parameter has no effect if max_num_categories = None or there are not enough unique categories. from_freqs: bool, default: False Whether the data is already in the form of frequencies. Returns ------- float the bias-corrected Cramer's V value of the 2 distributions. """ # If balance_classes is True, min_category_size_ratio should not affect results: min_category_size_ratio = min_category_size_ratio if balance_classes is False else 0 if from_freqs: dist1_counts, dist2_counts = dist1, dist2 else: dist1_counts, dist2_counts, cat_list = preprocess_2_cat_cols_to_same_bins(dist1, dist2, min_category_size_ratio, max_num_categories, sort_by) if len(cat_list) == 1: # If the distributions have the same single value return 0 if balance_classes is True: dist1_counts, dist2_counts = rebalance_distributions(dist1_counts, dist2_counts) else: dist1_counts, dist2_counts = _balance_sizes_downsizing(dist1_counts, dist2_counts) contingency_matrix = pd.DataFrame([dist1_counts, dist2_counts], dtype=int) # filter all columns that have all 0 values contingency_matrix = contingency_matrix.loc[:, (contingency_matrix != 0).any(axis=0)] # Based on https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V# bias correction method # noqa: SC100 chi2 = chi2_contingency(contingency_matrix)[0] n = contingency_matrix.sum().sum() phi2 = chi2 / n r, k = contingency_matrix.shape phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1)) rcorr = r - ((r - 1) ** 2) / (n - 1) kcorr = k - ((k - 1) ** 2) / (n - 1) return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1))) def psi(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series], min_category_size_ratio: float = 0, max_num_categories: int = None, sort_by: str = 'dist1', from_freqs: bool = False) -> float: """ Calculate the PSI (Population Stability Index). See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf Parameters ---------- dist1 : Union[np.ndarray, pd.Series] array of numerical values. dist2 : Union[np.ndarray, pd.Series] array of numerical values to compare dist1 to. min_category_size_ratio: float, default 0.01 minimum size ratio for categories. Categories with size ratio lower than this number are binned into an "Other" category. max_num_categories: int, default: None max number of allowed categories. If there are more categories than this number, categories are ordered by magnitude and all the smaller categories are binned into an "Other" category. If max_num_categories=None, there is no limit. > Note that if this parameter is used, the ordering of categories (and by extension, the decision which categories are kept by name and which are binned to the "Other" category) is done by default according to the values of dist1, which is treated as the "expected" distribution. This behavior can be changed by using the sort_by parameter. sort_by: str, default: 'dist1' Specify how categories should be sorted, affecting which categories will get into the "Other" category. Possible values: - 'dist1': Sort by the largest dist1 categories. - 'dist2': Sort by the largest dist2 categories. - 'difference': Sort by the largest difference between categories. > Note that this parameter has no effect if max_num_categories = None or there are not enough unique categories. from_freqs: bool, default: False Whether the data is already in the form of frequencies. Returns ------- psi The PSI score """ if from_freqs: expected_counts, actual_counts = dist1, dist2 else: expected_counts, actual_counts, _ = preprocess_2_cat_cols_to_same_bins(dist1, dist2, min_category_size_ratio, max_num_categories, sort_by) size_expected, size_actual = sum(expected_counts), sum(actual_counts) psi_value = 0 for i in range(len(expected_counts)): # In order for the value not to diverge, we cap our min percentage value e_perc = max(expected_counts[i] / size_expected, PSI_MIN_PERCENTAGE) a_perc = max(actual_counts[i] / size_actual, PSI_MIN_PERCENTAGE) value = (e_perc - a_perc) * np.log(e_perc / a_perc) psi_value += value return psi_value def kolmogorov_smirnov(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]) -> float: """ Perform the two-sample Kolmogorov-Smirnov test for goodness of fit. This test compares the underlying continuous distributions F(x) and G(x) of two independent samples. This function is based on the ks_2samp function from scipy.stats, but it only calculates the test statistic. This is useful for large datasets, where the p-value is not needed. Also, this function assumes the alternative hypothesis is two-sided (F(x)!= G(x)). Parameters ---------- dist1, dist2 : array_like, 1-Dimensional Two arrays of sample observations assumed to be drawn from a continuous distribution, sample sizes can be different. Returns ------- statistic : float KS statistic. License ---------- This is a modified version of the ks_2samp function from scipy.stats. The original license is as follows: Copyright (c) 2001-2002 Enthought, Inc. 2003-2023, SciPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if np.ma.is_masked(dist1): dist1 = dist1.compressed() if np.ma.is_masked(dist2): dist2 = dist2.compressed() dist1 = np.sort(dist1) dist2 = np.sort(dist2) n1 = dist1.shape[0] n2 = dist2.shape[0] if min(n1, n2) == 0: raise ValueError('Data must not be empty') data_all = np.concatenate([dist1, dist2]) # using searchsorted solves equal data problem cdf1 = np.searchsorted(dist1, data_all, side='right') / n1 cdf2 = np.searchsorted(dist2, data_all, side='right') / n2 cddiffs = np.abs(cdf1 - cdf2) return np.max(cddiffs) def earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series], margin_quantile_filter: float) -> float: """ Calculate the Earth Movers Distance (Wasserstein distance). See https://en.wikipedia.org/wiki/Wasserstein_metric Function is for numerical data only. Parameters ---------- dist1: Union[np.ndarray, pd.Series] array of numerical values. dist2: Union[np.ndarray, pd.Series] array of numerical values to compare dist1 to. margin_quantile_filter: float float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered out of the EMD calculation. This is done in order for extreme values not to affect the calculation disproportionally. This filter is applied to both distributions, in both margins. Returns ------- Any the Wasserstein distance between the two distributions. Raises ------- DeepchecksValueError if the value of margin_quantile_filter is not in range [0, 0.5) """ if not isinstance(margin_quantile_filter, Number) or margin_quantile_filter < 0 or margin_quantile_filter >= 0.5: raise DeepchecksValueError( f'margin_quantile_filter expected a value in range [0, 0.5), instead got {margin_quantile_filter}') if margin_quantile_filter != 0: dist1 = filter_margins_by_quantile(dist1, margin_quantile_filter) dist2 = filter_margins_by_quantile(dist2, margin_quantile_filter) val_max = np.max([np.max(dist1), np.max(dist2)]) val_min = np.min([np.min(dist1), np.min(dist2)]) if val_max == val_min: return 0 # Scale the distribution between 0 and 1: dist1 = (dist1 - val_min) / (val_max - val_min) dist2 = (dist2 - val_min) / (val_max - val_min) return wasserstein_distance(dist1, dist2) class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass class NotEnoughSamplesError(DeepchecksBaseError): """Represents a failure in calculation due to insufficient amount of samples.""" pass def drift_score_bar_traces(drift_score: float, bar_max: float = None) -> Tuple[List[go.Bar], Dict, Dict]: """Create a traffic light bar traces for drift score. Parameters ---------- drift_score : float Drift score bar_max : float , default: None Maximum value for the bar Returns ------- Tuple[List[go.Bar], Dict, Dict] list of plotly bar traces. """ traffic_light_colors = [((0, 0.1), '#01B8AA'), ((0.1, 0.2), '#F2C80F'), ((0.2, 0.3), '#FE9666'), ((0.3, 1), '#FD625E') ] bars = [] for range_tuple, color in traffic_light_colors: if drift_score < range_tuple[0]: break bars.append(go.Bar( x=[min(drift_score, range_tuple[1]) - range_tuple[0]], y=['Drift Score'], orientation='h', marker=dict( color=color, ), offsetgroup=0, base=range_tuple[0], showlegend=False )) bar_stop = max(0.4, drift_score + 0.1) if bar_max: bar_stop = min(bar_stop, bar_max) xaxis = dict( showgrid=False, gridcolor='black', linecolor='black', range=[0, bar_stop], dtick=0.05, fixedrange=True ) yaxis = dict( showgrid=False, showline=False, showticklabels=False, zeroline=False, color='black', autorange=True, rangemode='normal', fixedrange=True ) return bars, xaxis, yaxis CategoriesSortingKind = L['train_largest', 'test_largest', 'largest_difference'] def feature_distribution_traces( train_column: Union[np.ndarray, pd.Series], test_column: Union[np.ndarray, pd.Series], column_name: str, is_categorical: bool = False, max_num_categories: int = 10, show_categories_by: CategoriesSortingKind = 'largest_difference', quantile_cut: float = 0.02, dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES ) -> Tuple[List[BaseTraceType], Dict, Dict]: """Create traces for comparison between train and test column. Parameters ---------- train_column Train data used to trace distribution. test_column Test data used to trace distribution. column_name The name of the column values on the x axis. is_categorical : bool , default: False State if column is categorical. max_num_categories : int , default: 10 Maximum number of categories to show in plot (default: 10). show_categories_by: str, default: 'largest_difference' Specify which categories to show for categorical features' graphs, as the number of shown categories is limited by max_num_categories_for_display. Possible values: - 'train_largest': Show the largest train categories. - 'test_largest': Show the largest test categories. - 'largest_difference': Show the largest difference between categories. quantile_cut : float , default: 0.02 In which quantile to cut the edges of the plot. dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns ------- List[Union[go.Bar, go.Scatter]] list of plotly traces. Dict layout of x axis Dict layout of y axis """ if is_categorical: traces, y_layout = _create_distribution_bar_graphs(train_column, test_column, max_num_categories, show_categories_by, dataset_names=dataset_names) # NOTE: # the range, in this case, is needed to fix a problem with # too wide bars when there are only one or two of them`s on # the plot, plus it also centralizes them`s on the plot # The min value of the range (range(min. max)) is bigger because # otherwise bars will not be centralized on the plot, they will # appear on the left part of the plot (that is probably because of zero) range_max = max_num_categories if len(set(train_column).union(test_column)) > max_num_categories \ else len(set(train_column).union(test_column)) xaxis_layout = dict(type='category', range=(-3, range_max + 2)) return traces, xaxis_layout, y_layout else: train_uniques, train_uniques_counts = np.unique(train_column, return_counts=True) test_uniques, test_uniques_counts = np.unique(test_column, return_counts=True) x_range = ( min(train_column.min(), test_column.min()), max(train_column.max(), test_column.max()) ) x_width = x_range[1] - x_range[0] # If there are less than 20 total unique values, draw bar graph train_test_uniques = np.unique(np.concatenate([train_uniques, test_uniques])) if train_test_uniques.size < MAX_NUMERICAL_UNIQUE_FOR_BARS: traces, y_layout = _create_distribution_bar_graphs(train_column, test_column, 20, show_categories_by, dataset_names=dataset_names) x_range = (x_range[0] - x_width * 0.2, x_range[1] + x_width * 0.2) xaxis_layout = dict(ticks='outside', tickmode='array', tickvals=train_test_uniques, range=x_range) return traces, xaxis_layout, y_layout x_range_to_show = ( min(np.quantile(train_column, quantile_cut), np.quantile(test_column, quantile_cut)), max(np.quantile(train_column, 1 - quantile_cut), np.quantile(test_column, 1 - quantile_cut)) ) # Heuristically take points on x-axis to show on the plot # The intuition is the graph will look "smooth" wherever we will zoom it # Also takes mean and median values in order to plot it later accurately mean_train_column = np.mean(train_column) mean_test_column = np.mean(test_column) median_train_column = np.median(train_column) median_test_column = np.median(test_column) xs = sorted(np.concatenate(( np.linspace(x_range[0], x_range[1], 50), np.quantile(train_column, q=np.arange(0.02, 1, 0.02)), np.quantile(test_column, q=np.arange(0.02, 1, 0.02)), [mean_train_column, mean_test_column, median_train_column, median_test_column] ))) train_density = get_density(train_column, xs) test_density = get_density(test_column, xs) bars_width = (x_range_to_show[1] - x_range_to_show[0]) / 100 traces: List[go.BaseTraceType] = [] if train_uniques.size <= MAX_NUMERICAL_UNIQUES_FOR_SINGLE_DIST_BARS: traces.append(go.Bar( x=train_uniques, y=_create_bars_data_for_mixed_kde_plot(train_uniques_counts, np.max(test_density)), width=[bars_width] * train_uniques.size, marker=dict(color=colors[DEFAULT_DATASET_NAMES[0]]), name=dataset_names[0] + ' Dataset', )) else: traces.extend(_create_distribution_scatter_plot(xs, train_density, mean_train_column, median_train_column, is_train=True, dataset_names=dataset_names)) if test_uniques.size <= MAX_NUMERICAL_UNIQUES_FOR_SINGLE_DIST_BARS: traces.append(go.Bar( x=test_uniques, y=_create_bars_data_for_mixed_kde_plot(test_uniques_counts, np.max(train_density)), width=[bars_width] * test_uniques.size, marker=dict( color=colors[DEFAULT_DATASET_NAMES[1]] ), name=dataset_names[1] + ' Dataset', )) else: traces.extend(_create_distribution_scatter_plot(xs, test_density, mean_test_column, median_test_column, is_train=False, dataset_names=dataset_names)) xaxis_layout = dict(fixedrange=False, range=x_range_to_show, title=column_name) yaxis_layout = dict(title='Probability Density', fixedrange=True) return traces, xaxis_layout, yaxis_layout DEFAULT_DATASET_NAMES = ('Train', 'Test') The provided code snippet includes necessary dependencies for implementing the `calc_drift_and_plot` function. Write a Python function `def calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, value_name: str, column_type: str, plot_title: Optional[str] = None, margin_quantile_filter: float = 0.025, max_num_categories_for_drift: Optional[int] = None, min_category_size_ratio: float = 0.01, max_num_categories_for_display: int = 10, show_categories_by: CategoriesSortingKind = 'largest_difference', numerical_drift_method: str = 'KS', categorical_drift_method: str = 'cramers_v', balance_classes: bool = False, ignore_na: bool = True, min_samples: int = 10, raise_min_samples_error: bool = False, with_display: bool = True, dataset_names: Tuple[str, str] = DEFAULT_DATASET_NAMES ) -> Tuple[float, str, Optional[Figure]]` to solve the following problem: Calculate drift score per column. Parameters ---------- train_column: pd.Series column from train dataset test_column: pd.Series same column from test dataset value_name: str title of the x axis, if plot_title is None then also the title of the whole plot. column_type: str type of column (either "numerical" or "categorical") plot_title: str or None if None use value_name as title otherwise use this. margin_quantile_filter: float, default: 0.025 float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered out of the EMD calculation. This is done in order for extreme values not to affect the calculation disproportionally. This filter is applied to both distributions, in both margins. min_category_size_ratio: float, default 0.01 minimum size ratio for categories. Categories with size ratio lower than this number are binned into an "Other" category. max_num_categories_for_drift: int, default: None Max number of allowed categories. If there are more, they are binned into an "Other" category. max_num_categories_for_display: int, default: 10 Max number of categories to show in plot. show_categories_by: str, default: 'largest_difference' Specify which categories to show for categorical features' graphs, as the number of shown categories is limited by max_num_categories_for_display. Possible values: - 'train_largest': Show the largest train categories. - 'test_largest': Show the largest test categories. - 'largest_difference': Show the largest difference between categories. numerical_drift_method: str, default: "KS" decides which method to use on numerical variables. Possible values are: "EMD" for Earth Mover's Distance (EMD), "KS" for Kolmogorov-Smirnov (KS). categorical_drift_method: str, default: "cramers_v" decides which method to use on categorical variables. Possible values are: "cramers_v" for Cramer's V, "PSI" for Population Stability Index (PSI). balance_classes: bool, default: False If True, all categories will have an equal weight in the Cramer's V score. This is useful when the categorical variable is highly imbalanced, and we want to be alerted on changes in proportion to the category size, and not only to the entire dataset. Must have categorical_drift_method = "cramers_v". ignore_na: bool, default True For categorical columns only. If True, ignores nones for categorical drift. If False, considers none as a separate category. For numerical columns we always ignore nones. min_samples : int , default: 10 Minimum number of samples required to calculate the drift score. If any of the distributions have less than min_samples, the function will either raise an error or return an invalid output (depends on ``raise_min_sample_error``) raise_min_samples_error : bool , default: False Determines whether to raise an error if the number of samples is less than min_samples. If False, returns the output 'not_enough_samples', None, None. with_display: bool, default: True flag that determines if function will calculate display. dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns ------- Tuple[float, str, Callable] - drift score of the difference between the two columns' distributions - method name - graph comparing the two distributions (density for numerical, stack bar for categorical) Here is the function: def calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, value_name: str, column_type: str, plot_title: Optional[str] = None, margin_quantile_filter: float = 0.025, max_num_categories_for_drift: Optional[int] = None, min_category_size_ratio: float = 0.01, max_num_categories_for_display: int = 10, show_categories_by: CategoriesSortingKind = 'largest_difference', numerical_drift_method: str = 'KS', categorical_drift_method: str = 'cramers_v', balance_classes: bool = False, ignore_na: bool = True, min_samples: int = 10, raise_min_samples_error: bool = False, with_display: bool = True, dataset_names: Tuple[str, str] = DEFAULT_DATASET_NAMES ) -> Tuple[float, str, Optional[Figure]]: """ Calculate drift score per column. Parameters ---------- train_column: pd.Series column from train dataset test_column: pd.Series same column from test dataset value_name: str title of the x axis, if plot_title is None then also the title of the whole plot. column_type: str type of column (either "numerical" or "categorical") plot_title: str or None if None use value_name as title otherwise use this. margin_quantile_filter: float, default: 0.025 float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered out of the EMD calculation. This is done in order for extreme values not to affect the calculation disproportionally. This filter is applied to both distributions, in both margins. min_category_size_ratio: float, default 0.01 minimum size ratio for categories. Categories with size ratio lower than this number are binned into an "Other" category. max_num_categories_for_drift: int, default: None Max number of allowed categories. If there are more, they are binned into an "Other" category. max_num_categories_for_display: int, default: 10 Max number of categories to show in plot. show_categories_by: str, default: 'largest_difference' Specify which categories to show for categorical features' graphs, as the number of shown categories is limited by max_num_categories_for_display. Possible values: - 'train_largest': Show the largest train categories. - 'test_largest': Show the largest test categories. - 'largest_difference': Show the largest difference between categories. numerical_drift_method: str, default: "KS" decides which method to use on numerical variables. Possible values are: "EMD" for Earth Mover's Distance (EMD), "KS" for Kolmogorov-Smirnov (KS). categorical_drift_method: str, default: "cramers_v" decides which method to use on categorical variables. Possible values are: "cramers_v" for Cramer's V, "PSI" for Population Stability Index (PSI). balance_classes: bool, default: False If True, all categories will have an equal weight in the Cramer's V score. This is useful when the categorical variable is highly imbalanced, and we want to be alerted on changes in proportion to the category size, and not only to the entire dataset. Must have categorical_drift_method = "cramers_v". ignore_na: bool, default True For categorical columns only. If True, ignores nones for categorical drift. If False, considers none as a separate category. For numerical columns we always ignore nones. min_samples : int , default: 10 Minimum number of samples required to calculate the drift score. If any of the distributions have less than min_samples, the function will either raise an error or return an invalid output (depends on ``raise_min_sample_error``) raise_min_samples_error : bool , default: False Determines whether to raise an error if the number of samples is less than min_samples. If False, returns the output 'not_enough_samples', None, None. with_display: bool, default: True flag that determines if function will calculate display. dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns ------- Tuple[float, str, Callable] - drift score of the difference between the two columns' distributions - method name - graph comparing the two distributions (density for numerical, stack bar for categorical) """ if min_category_size_ratio < 0 or min_category_size_ratio > 1: raise DeepchecksValueError( f'min_category_size_ratio expected a value in range [0, 1], instead got {min_category_size_ratio}.') if column_type == 'categorical' and ignore_na is False: train_dist = np.array(train_column.values).reshape(-1) test_dist = np.array(test_column.values).reshape(-1) else: train_dist = np.array(train_column.dropna().values).reshape(-1) test_dist = np.array(test_column.dropna().values).reshape(-1) if len(train_dist) < min_samples or len(test_dist) < min_samples: if raise_min_samples_error is True: raise NotEnoughSamplesError( f'Not enough samples to calculate drift score. Minimum {min_samples} samples required. ' 'Note that for numerical labels, None values do not count as samples.' 'Use the \'min_samples\' parameter to change this requirement.' ) else: return 'not_enough_samples', None, None if column_type == 'numerical': train_dist = train_dist.astype('float') test_dist = test_dist.astype('float') if numerical_drift_method.lower() == 'emd': scorer_name = 'Earth Mover\'s Distance' score = earth_movers_distance(dist1=train_dist, dist2=test_dist, margin_quantile_filter=margin_quantile_filter) elif numerical_drift_method.lower() in ['ks', 'kolmogorov-smirnov']: scorer_name = 'Kolmogorov-Smirnov' score = kolmogorov_smirnov(dist1=train_dist, dist2=test_dist) else: raise DeepchecksValueError('Expected numerical_drift_method to be one ' f'of ["EMD", "KS"], received: {numerical_drift_method}') if not with_display: return score, scorer_name, None bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score) dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, value_name, dataset_names=dataset_names) elif column_type == 'categorical': if balance_classes is True and categorical_drift_method.lower() not in ['cramer_v', 'cramers_v']: raise DeepchecksValueError( 'balance_classes is only supported for Cramer\'s V. please set balance_classes=False ' 'or use \'cramers_v\' as categorical_drift_method') sort_by = 'difference' if show_categories_by == 'largest_difference' else \ ('dist1' if show_categories_by == 'train_largest' else 'dist2') if categorical_drift_method.lower() in ['cramer_v', 'cramers_v']: scorer_name = 'Cramer\'s V' score = cramers_v(dist1=train_dist, dist2=test_dist, balance_classes=balance_classes, min_category_size_ratio=min_category_size_ratio, max_num_categories=max_num_categories_for_drift, sort_by=sort_by) elif categorical_drift_method.lower() == 'psi': scorer_name = 'PSI' score = psi(dist1=train_dist, dist2=test_dist, min_category_size_ratio=min_category_size_ratio, max_num_categories=max_num_categories_for_drift, sort_by=sort_by) else: raise DeepchecksValueError('Expected categorical_drift_method to be one ' f'of ["cramers_v", "PSI"], received: {categorical_drift_method}') if not with_display: return score, scorer_name, None bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1) dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces( train_dist, test_dist, value_name, is_categorical=True, max_num_categories=max_num_categories_for_display, show_categories_by=show_categories_by, dataset_names=dataset_names) else: # Should never reach here raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}') fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False, row_heights=[0.1, 0.9], subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot']) fig.add_traces(bar_traces, rows=1, cols=1) fig.update_xaxes(bar_x_axis, row=1, col=1) fig.update_yaxes(bar_y_axis, row=1, col=1) fig.add_traces(dist_traces, rows=2, cols=1) fig.update_xaxes(dist_x_axis, row=2, col=1) if balance_classes is True: dist_y_axis['title'] += ' (Log Scale)' fig.update_yaxes(dist_y_axis, row=2, col=1, type='log') else: fig.update_yaxes(dist_y_axis, row=2, col=1) fig.update_layout( legend=dict( title='Legend', yanchor='top', y=0.6), height=400, title=dict(text=plot_title or value_name, x=0.5, xanchor='center'), bargroupgap=0) return score, scorer_name, fig
Calculate drift score per column. Parameters ---------- train_column: pd.Series column from train dataset test_column: pd.Series same column from test dataset value_name: str title of the x axis, if plot_title is None then also the title of the whole plot. column_type: str type of column (either "numerical" or "categorical") plot_title: str or None if None use value_name as title otherwise use this. margin_quantile_filter: float, default: 0.025 float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered out of the EMD calculation. This is done in order for extreme values not to affect the calculation disproportionally. This filter is applied to both distributions, in both margins. min_category_size_ratio: float, default 0.01 minimum size ratio for categories. Categories with size ratio lower than this number are binned into an "Other" category. max_num_categories_for_drift: int, default: None Max number of allowed categories. If there are more, they are binned into an "Other" category. max_num_categories_for_display: int, default: 10 Max number of categories to show in plot. show_categories_by: str, default: 'largest_difference' Specify which categories to show for categorical features' graphs, as the number of shown categories is limited by max_num_categories_for_display. Possible values: - 'train_largest': Show the largest train categories. - 'test_largest': Show the largest test categories. - 'largest_difference': Show the largest difference between categories. numerical_drift_method: str, default: "KS" decides which method to use on numerical variables. Possible values are: "EMD" for Earth Mover's Distance (EMD), "KS" for Kolmogorov-Smirnov (KS). categorical_drift_method: str, default: "cramers_v" decides which method to use on categorical variables. Possible values are: "cramers_v" for Cramer's V, "PSI" for Population Stability Index (PSI). balance_classes: bool, default: False If True, all categories will have an equal weight in the Cramer's V score. This is useful when the categorical variable is highly imbalanced, and we want to be alerted on changes in proportion to the category size, and not only to the entire dataset. Must have categorical_drift_method = "cramers_v". ignore_na: bool, default True For categorical columns only. If True, ignores nones for categorical drift. If False, considers none as a separate category. For numerical columns we always ignore nones. min_samples : int , default: 10 Minimum number of samples required to calculate the drift score. If any of the distributions have less than min_samples, the function will either raise an error or return an invalid output (depends on ``raise_min_sample_error``) raise_min_samples_error : bool , default: False Determines whether to raise an error if the number of samples is less than min_samples. If False, returns the output 'not_enough_samples', None, None. with_display: bool, default: True flag that determines if function will calculate display. dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns ------- Tuple[float, str, Callable] - drift score of the difference between the two columns' distributions - method name - graph comparing the two distributions (density for numerical, stack bar for categorical)
649
from numbers import Number from typing import Dict, Optional, Tuple, Union import numpy as np import pandas as pd from plotly.graph_objs import Figure from plotly.subplots import make_subplots from scipy.stats import chi2_contingency, wasserstein_distance from deepchecks.core import ConditionCategory, ConditionResult from deepchecks.core.errors import DeepchecksValueError, NotEnoughSamplesError from deepchecks.utils.dict_funcs import get_dict_entry_by_value from deepchecks.utils.distribution.plot import (CategoriesSortingKind, drift_score_bar_traces, feature_distribution_traces) from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins from deepchecks.utils.plot import DEFAULT_DATASET_NAMES from deepchecks.utils.strings import format_number SUPPORTED_CATEGORICAL_METHODS = ['Cramer\'s V', 'PSI'] SUPPORTED_NUMERIC_METHODS = ['Earth Mover\'s Distance', 'Kolmogorov-Smirnov'] def get_drift_method(result_dict: Dict): """Return which drift scoring methods were in use. Parameters ---------- result_dict : Dict the result dict of the drift check. Returns ------- Tuple(str, str) the categorical scoring method and then the numeric scoring method. """ result_df = pd.DataFrame(result_dict).T cat_mthod_arr = result_df[result_df['Method'].isin(SUPPORTED_CATEGORICAL_METHODS)]['Method'] cat_method = cat_mthod_arr.iloc[0] if len(cat_mthod_arr) else None num_mthod_arr = result_df[result_df['Method'].isin(SUPPORTED_NUMERIC_METHODS)]['Method'] num_method = num_mthod_arr.iloc[0] if len(num_mthod_arr) else None return cat_method, num_method def get_dict_entry_by_value(x: dict, value_select_fn=max): """Get from dictionary the entry with value that returned from value_select_fn. Returns ------- Tuple: key, value """ if not x: return None, None value = value_select_fn(x.values()) index = list(x.values()).index(value) return list(x.keys())[index], value def format_number(x, floating_point: int = 2) -> str: """Format number for elegant display. Parameters ---------- x Number to be displayed floating_point : int , default: 2 Number of floating points to display Returns ------- str String of beautified number """ def add_commas(x): return f'{x:,}' # yes this actually formats the number 1000 to "1,000" if np.isnan(x): return 'nan' # 0 is lost in the next if case, so we have it here as a special use-case if x == 0: return '0' # If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3. if abs(x) < 10 ** (-floating_point): return f'{Decimal(x):.{floating_point}E}' # If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer: if round(x) == round(x, floating_point): return add_commas(round(x)) # If not, return as a float, but don't print unnecessary zeros at end: else: ret_x = round(x, floating_point) return add_commas(ret_x).rstrip('0') The provided code snippet includes necessary dependencies for implementing the `drift_condition` function. Write a Python function `def drift_condition(max_allowed_categorical_score: float, max_allowed_numeric_score: float, subject_single: str, subject_multi: str, allowed_num_subjects_exceeding_threshold: int = 0)` to solve the following problem: Create a condition function to be used in drift check's conditions. Parameters ---------- max_allowed_categorical_score: float Max value allowed for categorical drift max_allowed_numeric_score: float Max value allowed for numerical drift subject_single: str String that represents the subject being tested as single (feature, column, property) subject_multi: str String that represents the subject being tested as multiple (features, columns, properties) allowed_num_subjects_exceeding_threshold: int, default: 0 Determines the number of properties with drift score above threshold needed to fail the condition. Here is the function: def drift_condition(max_allowed_categorical_score: float, max_allowed_numeric_score: float, subject_single: str, subject_multi: str, allowed_num_subjects_exceeding_threshold: int = 0): """Create a condition function to be used in drift check's conditions. Parameters ---------- max_allowed_categorical_score: float Max value allowed for categorical drift max_allowed_numeric_score: float Max value allowed for numerical drift subject_single: str String that represents the subject being tested as single (feature, column, property) subject_multi: str String that represents the subject being tested as multiple (features, columns, properties) allowed_num_subjects_exceeding_threshold: int, default: 0 Determines the number of properties with drift score above threshold needed to fail the condition. """ def condition(result: dict): cat_method, num_method = get_drift_method(result) cat_drift_props = {prop: d['Drift score'] for prop, d in result.items() if d['Method'] in SUPPORTED_CATEGORICAL_METHODS} not_passing_categorical_props = {props: format_number(d) for props, d in cat_drift_props.items() if d >= max_allowed_categorical_score} num_drift_props = {prop: d['Drift score'] for prop, d in result.items() if d['Method'] in SUPPORTED_NUMERIC_METHODS} not_passing_numeric_props = {prop: format_number(d) for prop, d in num_drift_props.items() if d >= max_allowed_numeric_score} num_failed = len(not_passing_categorical_props) + len(not_passing_numeric_props) if num_failed > allowed_num_subjects_exceeding_threshold: details = f'Failed for {num_failed} out of {len(result)} {subject_multi}.' if not_passing_categorical_props: details += f'\nFound {len(not_passing_categorical_props)} categorical {subject_multi} with ' \ f'{cat_method} above threshold: {not_passing_categorical_props}' if not_passing_numeric_props: details += f'\nFound {len(not_passing_numeric_props)} numeric {subject_multi} with {num_method} above' \ f' threshold: {not_passing_numeric_props}' return ConditionResult(ConditionCategory.FAIL, details) else: details = f'Passed for {len(result) - num_failed} {subject_multi} out of {len(result)} {subject_multi}.' if cat_drift_props: prop, score = get_dict_entry_by_value(cat_drift_props) details += f'\nFound {subject_single} "{prop}" has the highest categorical drift score: ' \ f'{format_number(score)}' if num_drift_props: prop, score = get_dict_entry_by_value(num_drift_props) details += f'\nFound {subject_single} "{prop}" has the highest numerical drift score: ' \ f'{format_number(score)}' return ConditionResult(ConditionCategory.PASS, details) return condition
Create a condition function to be used in drift check's conditions. Parameters ---------- max_allowed_categorical_score: float Max value allowed for categorical drift max_allowed_numeric_score: float Max value allowed for numerical drift subject_single: str String that represents the subject being tested as single (feature, column, property) subject_multi: str String that represents the subject being tested as multiple (features, columns, properties) allowed_num_subjects_exceeding_threshold: int, default: 0 Determines the number of properties with drift score above threshold needed to fail the condition.
650
The provided code snippet includes necessary dependencies for implementing the `sort_dict` function. Write a Python function `def sort_dict(x: dict, reverse=True)` to solve the following problem: Sort dictionary by values. Returns ------- Dict: sorted dictionary Here is the function: def sort_dict(x: dict, reverse=True): """Sort dictionary by values. Returns ------- Dict: sorted dictionary """ return dict(sorted(x.items(), key=lambda item: item[1], reverse=reverse))
Sort dictionary by values. Returns ------- Dict: sorted dictionary
651
from typing import List import numpy as np import pandas as pd import plotly.graph_objects as go from sklearn.metrics import confusion_matrix from deepchecks import ConditionCategory, ConditionResult from deepchecks.core import CheckResult from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.strings import format_number_if_not_nan, format_percent def create_confusion_matrix_figure(confusion_matrix_data: np.ndarray, classes_names: List[str], normalize_display: bool): """Create a confusion matrix figure. Parameters ---------- confusion_matrix_data: np.ndarray 2D array containing the confusion matrix. classes_names: List[str] the names of the classes to display as the axis. normalize_display: bool if True will also show normalized values by the true values. Returns ------- plotly Figure object confusion matrix figure """ confusion_matrix_norm = confusion_matrix_data.astype('float') / \ (confusion_matrix_data.sum(axis=1)[:, np.newaxis] + np.finfo(float).eps) * 100 if normalize_display: z = np.vectorize(format_number_if_not_nan)(confusion_matrix_norm) else: z = confusion_matrix_data accuracy_array = np.diag(confusion_matrix_norm).round(decimals=2) display = [] display_msg = f'The overall accuracy of your model is: {round(np.sum(accuracy_array)/len(accuracy_array), 2)}%.' if min(accuracy_array) < 100: display_msg += f'<br>Best accuracy achieved on samples with <b>{classes_names[np.argmax(accuracy_array)]}' \ f'</b> label ({np.max(accuracy_array)}%).' display_msg += f'<br>Worst accuracy achieved on samples with <b>{classes_names[np.argmin(accuracy_array)]}' \ f'</b> label ({np.min(accuracy_array)}%).' display.append(display_msg) total_samples = np.nansum(confusion_matrix_data) percent_data_each_row = np.round(confusion_matrix_norm, decimals=2) percent_data_each_cell = np.round(np.divide(np.nan_to_num(confusion_matrix_data, nan=0.0), total_samples) * 100, decimals=2) percent_data_each_col = (confusion_matrix_data.astype('float') / (confusion_matrix_data.sum(axis=0)[:, np.newaxis] + np.finfo(float).eps) * 100).round(decimals=2) custom_hoverdata = np.dstack((percent_data_each_cell, percent_data_each_row, percent_data_each_col)) fig = go.Figure(data=go.Heatmap(x=classes_names, y=classes_names, z=z, customdata=custom_hoverdata, xgap=1, ygap=1, text=confusion_matrix_data, texttemplate='%{text}', hovertemplate='% out of all data: <b>%{customdata[0]}%</b><br>% out ' 'of row: <b>%{customdata[1]}%</b><br>% out of column: ' '<b>%{customdata[2]}%</b><extra></extra>', showscale=False)) fig.update_layout(title='Confusion Matrix (# Samples)', title_x=0.5) fig.update_layout(height=600) fig.update_xaxes(title='Predicted Value', type='category', scaleanchor='y', constrain='domain') fig.update_yaxes(title='True Value', type='category', constrain='domain', autorange='reversed') display.append(fig) return display The provided code snippet includes necessary dependencies for implementing the `run_confusion_matrix_check` function. Write a Python function `def run_confusion_matrix_check(y_pred: np.ndarray, y_true: np.ndarray, with_display=True, normalize_display=True) -> CheckResult` to solve the following problem: Calculate confusion matrix based on predictions and true label values. Here is the function: def run_confusion_matrix_check(y_pred: np.ndarray, y_true: np.ndarray, with_display=True, normalize_display=True) -> CheckResult: """Calculate confusion matrix based on predictions and true label values.""" total_classes = sorted([str(x) for x in set(y_pred).union(set(y_true))]) result = confusion_matrix(y_true, y_pred) if with_display: displays = create_confusion_matrix_figure(result, total_classes, normalize_display) else: displays = None # For accessing the class names from the condition result = pd.DataFrame(result, index=total_classes, columns=total_classes) return CheckResult(result, display=displays)
Calculate confusion matrix based on predictions and true label values.
652
from typing import List import numpy as np import pandas as pd import plotly.graph_objects as go from sklearn.metrics import confusion_matrix from deepchecks import ConditionCategory, ConditionResult from deepchecks.core import CheckResult from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.strings import format_number_if_not_nan, format_percent class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4, add_positive_prefix: bool = False) -> str: """Format percent for elegant display. Parameters ---------- ratio : float Ratio to be displayed as percent floating_point: int , default: 2 Number of floating points to display scientific_notation_threshold: int, default: 4 Max number of floating points for which to show number as float. If number of floating points is larger than this parameter, scientific notation (e.g. "10E-5%") will be shown. add_positive_prefix: bool, default: False add plus sign before positive percentages (minus sign is always added for negative percentages). Returns ------- str String of ratio as percent """ result: str if ratio < 0: ratio = -ratio prefix = '-' else: prefix = '+' if add_positive_prefix and ratio != 0 else '' if int(ratio) == ratio: result = f'{int(ratio) * 100}%' elif ratio > 1: result = truncate_zero_percent(ratio, floating_point) elif ratio < 10**(-(2+floating_point)): if ratio > 10**(-(2+scientific_notation_threshold)): result = truncate_zero_percent(ratio, scientific_notation_threshold) else: result = f'{Decimal(ratio * 100):.{floating_point}E}%' elif ratio > (1-10**(-(2+floating_point))): if floating_point > 0: result = f'99.{"".join(["9"]*floating_point)}%' else: result = '99%' else: result = truncate_zero_percent(ratio, floating_point) return prefix + result The provided code snippet includes necessary dependencies for implementing the `misclassified_samples_lower_than_condition` function. Write a Python function `def misclassified_samples_lower_than_condition(value: pd.DataFrame, misclassified_samples_threshold: float) -> ConditionResult` to solve the following problem: Condition function that checks if the misclassified samples in the confusion matrix is below threshold. Parameters ---------- value: pd.DataFrame Dataframe containing the confusion matrix misclassified_samples_threshold: float Ratio of samples to be used for comparison in the condition (Value should be between 0 - 1 inclusive) Raises ------ DeepchecksValueError if the value of `misclassified_samples_threshold` parameter is not between 0 - 1 inclusive. Returns ------- ConditionResult - ConditionCategory.PASS, if all the misclassified samples in the confusion matrix are less than `misclassified_samples_threshold` ratio - ConditionCategory.FAIL, if the misclassified samples in the confusion matrix are more than the `misclassified_samples_threshold` ratio Here is the function: def misclassified_samples_lower_than_condition(value: pd.DataFrame, misclassified_samples_threshold: float) -> ConditionResult: """Condition function that checks if the misclassified samples in the confusion matrix is below threshold. Parameters ---------- value: pd.DataFrame Dataframe containing the confusion matrix misclassified_samples_threshold: float Ratio of samples to be used for comparison in the condition (Value should be between 0 - 1 inclusive) Raises ------ DeepchecksValueError if the value of `misclassified_samples_threshold` parameter is not between 0 - 1 inclusive. Returns ------- ConditionResult - ConditionCategory.PASS, if all the misclassified samples in the confusion matrix are less than `misclassified_samples_threshold` ratio - ConditionCategory.FAIL, if the misclassified samples in the confusion matrix are more than the `misclassified_samples_threshold` ratio """ if misclassified_samples_threshold < 0 or misclassified_samples_threshold > 1: raise DeepchecksValueError( 'Condition requires the parameter "misclassified_samples_threshold" ' f'to be between 0 and 1 inclusive but got {misclassified_samples_threshold}' ) # Getting the class names from the confusion matrix class_names = value.columns # Converting the confusion matrix to a numpy array for numeric indexing value = value.to_numpy() # Computing the total number of samples from the confusion matrix total_samples = np.sum(value) # Number of threshold samples based on the 'misclassified_samples_threshold' parameter thresh_samples = round(np.ceil(misclassified_samples_threshold * total_samples)) # m is the number of rows in the confusion matrix and # n is the number of columns in the confusion matrix m, n = value.shape[0], value.shape[1] # Variables to keep track of the misclassified cells above 'thresh_samples' n_cells_above_thresh = 0 max_misclassified_cell_idx = (0, 1) # Looping over the confusion matrix and checking only the misclassified cells for i in range(m): for j in range(n): # omitting the principal axis of the confusion matrix if i != j: n_samples = value[i][j] if n_samples > thresh_samples: n_cells_above_thresh += 1 x, y = max_misclassified_cell_idx max_misclassified_samples = value[x][y] if n_samples > max_misclassified_samples: max_misclassified_cell_idx = (i, j) # There are misclassified cells in the confusion matrix with samples more than 'thresh_samples' if n_cells_above_thresh > 0: x, y = max_misclassified_cell_idx max_misclassified_samples = value[x][y] max_misclassified_samples_ratio = max_misclassified_samples / total_samples details = f'Detected {n_cells_above_thresh} misclassified confusion matrix cell(s) each one ' \ f'containing more than {format_percent(misclassified_samples_threshold)} of the data. ' \ f'Largest misclassified cell ({format_percent(max_misclassified_samples_ratio)} of the data) ' \ f'is samples with a true value of "{class_names[x]}" and a predicted value of "{class_names[y]}".' return ConditionResult(ConditionCategory.FAIL, details) # No cell has more than 'thresh_samples' misclassified samples details = 'All misclassified confusion matrix cells contain less than ' \ f'{format_percent(misclassified_samples_threshold)} of the data.' return ConditionResult(ConditionCategory.PASS, details)
Condition function that checks if the misclassified samples in the confusion matrix is below threshold. Parameters ---------- value: pd.DataFrame Dataframe containing the confusion matrix misclassified_samples_threshold: float Ratio of samples to be used for comparison in the condition (Value should be between 0 - 1 inclusive) Raises ------ DeepchecksValueError if the value of `misclassified_samples_threshold` parameter is not between 0 - 1 inclusive. Returns ------- ConditionResult - ConditionCategory.PASS, if all the misclassified samples in the confusion matrix are less than `misclassified_samples_threshold` ratio - ConditionCategory.FAIL, if the misclassified samples in the confusion matrix are more than the `misclassified_samples_threshold` ratio
653
import gc import torch.cuda The provided code snippet includes necessary dependencies for implementing the `empty_gpu` function. Write a Python function `def empty_gpu(device)` to solve the following problem: Empty GPU or MPS memory and run garbage collector. Here is the function: def empty_gpu(device): """Empty GPU or MPS memory and run garbage collector.""" gc.collect() device = str(device) if 'cuda' in device.lower(): torch.cuda.empty_cache() elif 'mps' in device.lower(): try: from torch import mps # pylint: disable=import-outside-toplevel mps.empty_cache() except Exception: # pylint: disable=broad-except pass
Empty GPU or MPS memory and run garbage collector.
654
import typing as t import jsonpickle from deepchecks.core.check_result import BaseCheckResult from deepchecks.core.suite import SuiteResult from typing import List class BaseCheckResult: """Generic class for any check output, contains some basic functions.""" check: Optional['BaseCheck'] header: Optional[str] run_time: Optional[int] = 0 def from_json(json_dict: Union[str, Dict]) -> 'BaseCheckResult': """Convert a json object that was returned from CheckResult.to_json or CheckFailure.to_json. Parameters ---------- json_dict: Union[str, Dict] Json data Returns ------- BaseCheckResult A check output object. """ from deepchecks.core.check_json import CheckFailureJson, CheckResultJson if isinstance(json_dict, str): json_dict = jsonpickle.loads(json_dict) check_type = cast(dict, json_dict)['type'] if check_type == 'CheckFailure': return CheckFailureJson(json_dict) elif check_type == 'CheckResult': return CheckResultJson(json_dict) else: raise ValueError( 'Excpected json object to be one of [CheckFailure, CheckResult] ' f'but recievied: {check_type}' ) def get_header(self) -> str: """Return header for display. if header was defined return it, else extract name of check class.""" return self.header or self.check.name() def get_metadata(self, with_doc_link: bool = False) -> Dict: """Return the related check metadata.""" return {'header': self.get_header(), **self.check.metadata(with_doc_link=with_doc_link)} def get_check_id(self, unique_id: str = '') -> str: """Return check id (used for href).""" header = self.get_header().replace(' ', '') return f'{header}_{unique_id}' class SuiteResult(DisplayableResult): """Contain the results of a suite run. Parameters ---------- name: str results: List[BaseCheckResult] extra_info: Optional[List[str]] """ name: str extra_info: List[str] results: List['check_types.BaseCheckResult'] def __init__( self, name: str, results: List['check_types.BaseCheckResult'], extra_info: Optional[List[str]] = None, ): """Initialize suite result.""" self.name = name self.results = sort_check_results(results) self.extra_info = extra_info or [] # NOTE: # we collect results indexes in order to facilitate results # filtering and selection via the `select_results` method # # Examples: # >> # >> sr.select_result(sr.results_with_conditions | sr.results_with_display) # >> sr.select_results(sr.results_without_conditions & sr.results_with_display) self.results_with_conditions: Set[int] = set() self.results_without_conditions: Set[int] = set() self.results_with_display: Set[int] = set() self.results_without_display: Set[int] = set() self.failures: Set[int] = set() for index, result in enumerate(self.results): if isinstance(result, check_types.CheckFailure): self.failures.add(index) elif isinstance(result, check_types.CheckResult): has_conditions = result.have_conditions() has_display = result.have_display() if has_conditions: self.results_with_conditions.add(index) else: self.results_without_conditions.add(index) if has_display: self.results_with_display.add(index) else: self.results_without_display.add(index) else: raise TypeError(f'Unknown type of result - {type(result).__name__}') def select_results(self, idx: Set[int] = None, names: Set[str] = None) -> List[Union[ 'check_types.CheckResult', 'check_types.CheckFailure' ]]: """Select results either by indexes or result header names. Parameters ---------- idx : Set[int], default None The list of indexes to filter the check results from the results list. If names is None, then this parameter is required. names : Set[str], default None The list of names denoting the header of the check results. If idx is None, this parameter is required. Both idx and names cannot be passed. Returns ------- List[Union['check_types.CheckResult', 'check_types.CheckFailure']] : A list of check results filtered either by the indexes or by their names. """ if idx is None and names is None: raise DeepchecksNotSupportedError('Either idx or names should be passed') if idx and names: raise DeepchecksNotSupportedError('Only one of idx or names should be passed') if names: names = [name.lower().replace('_', ' ').strip() for name in names] output = [result for name in names for result in self.results if result.get_header().lower() == name] else: output = [result for index, result in enumerate(self.results) if index in idx] return output def __repr__(self): """Return default __repr__ function uses value.""" return self.name def _repr_html_( self, unique_id: Optional[str] = None, requirejs: bool = False, ) -> str: """Return html representation of check result.""" return widget_to_html_string( self.to_widget(unique_id=unique_id or get_random_string(n=25)), title=self.name, requirejs=requirejs ) def _repr_json_(self): return SuiteResultJsonSerializer(self).serialize() def _repr_mimebundle_(self, **kwargs): return { 'text/html': self._repr_html_(), 'application/json': self._repr_json_() } def widget_serializer(self) -> SuiteResultWidgetSerializer: """Return WidgetSerializer instance.""" return SuiteResultWidgetSerializer(self) def ipython_serializer(self) -> SuiteResultIPythonSerializer: """Return IPythonSerializer instance.""" return SuiteResultIPythonSerializer(self) def html_serializer(self) -> SuiteResultHtmlSerializer: """Return HtmlSerializer instance.""" return SuiteResultHtmlSerializer(self) def show( self, as_widget: bool = True, unique_id: Optional[str] = None, **kwargs ) -> Optional[HTMLFormatter]: """Display result. Parameters ---------- as_widget : bool whether to display result with help of ipywidgets or not unique_id : Optional[str], default None unique identifier of the result output **kwrgs : other key-value arguments will be passed to the `Serializer.serialize` method Returns ------- Optional[HTMLFormatter] : when used by sphinx-gallery """ return super().show( as_widget, unique_id or get_random_string(n=25), **kwargs ) def show_not_interactive( self, unique_id: Optional[str] = None, **kwargs ): """Display the not interactive version of result output. In this case, ipywidgets will not be used and plotly figures will be transformed into png images. Parameters ---------- unique_id : Optional[str], default None unique identifier of the result output **kwrgs : other key-value arguments will be passed to the `Serializer.serialize` method """ return super().show_not_interactive( unique_id or get_random_string(n=25), **kwargs ) def save_as_html( self, file: Union[str, io.TextIOWrapper, None] = None, as_widget: bool = True, requirejs: bool = True, unique_id: Optional[str] = None, connected: bool = False, **kwargs ): """Save output as html file. Parameters ---------- file : filename or file-like object The file to write the HTML output to. If None writes to output.html as_widget : bool, default True whether to use ipywidgets or not requirejs: bool , default: True whether to include requirejs library into output HTML or not unique_id : Optional[str], default None unique identifier of the result output connected: bool , default False indicates whether internet connection is available or not, if 'True' then CDN urls will be used to load javascript otherwise javascript libraries will be injected directly into HTML output. Set to 'False' to make results viewing possible when the internet connection is not available. Returns ------- Optional[str] : name of newly create file """ return save_as_html( file=file, serializer=self.widget_serializer if as_widget else self.html_serializer, connected=connected, # next kwargs will be passed to the serializer.serialize method requirejs=requirejs, output_id=unique_id or get_random_string(n=25), ) def save_as_cml_markdown( self, file: str = None, platform: str = 'github', attach_html_report: bool = True, ): """Save a result to a markdown file to use with [CML](https://cml.dev). The rendered markdown will include only the conditions summary, with the full html results attached. Parameters ---------- file : filename or file-like object The file to write the HTML output to. If None writes to report.md platform: str , default: 'github' Target Git platform to ensure pretty formatting and nothing funky. Options currently include 'github' or 'gitlab'. attach_html_report: bool , default True Whether to attach the full html report with plots, making it available for download. This will save a [suite_name].html file in the same directory as the markdown report. Returns ------- Optional[str] : name of newly create file. """ if file is None: file = './report.md' elif isinstance(file, str): pass elif isinstance(file, io.TextIOWrapper): raise NotImplementedError( 'io.TextIOWrapper is not yet supported for save_as_cml_markdown.' ) def format_conditions_table(): conditions_table = SuiteResultHtmlSerializer(self).prepare_conditions_table() # conditions_table = self.html_serializer.prepare_conditions_table() soup = BeautifulSoup(conditions_table, features='html.parser') soup.h2.extract() # remove 'Conditions Table' redundant heading soup.style.extract() # these are not rendered anyway summary = soup.new_tag('summary') summary.string = self.name soup.table.insert_before(summary) soup = BeautifulSoup( f'\n<details>{str(soup)}</details>\n', features='html.parser' ) return soup soup = format_conditions_table() if not attach_html_report: with open(file, 'w', encoding='utf-8') as handle: handle.write(soup.prettify()) else: # save full html report path = pathlib.Path(file) html_file = str( pathlib.Path(file).parent .resolve() .joinpath(path.stem+'.html') ) self.save_as_html(html_file) # build string containing html report as an attachment # (hyperlink syntax gets processed as an attachment by cml) if platform == 'gitlab': soup.summary.string = f'![{soup.summary.string}]({html_file})' soup = soup.prettify() elif platform == 'github': soup = ( soup.prettify() + f'\n> 📎 ![Full {self.name} Report]({html_file})\n' ) else: error_message = 'Only \'github\' and \'gitlab\' are supported right now.' error_message += '\nThough one of these formats ' error_message += 'might work for your target Git platform!' raise ValueError(error_message) with open(file, 'w', encoding='utf-8') as file_handle: file_handle.write(soup) def to_widget( self, unique_id: Optional[str] = None, **kwargs ) -> Widget: """Return SuiteResult as a ipywidgets.Widget instance. Parameters ---------- unique_id : Optional[str], default None unique identifier of the result output Returns ------- Widget """ output_id = unique_id or get_random_string(n=25) return SuiteResultWidgetSerializer(self).serialize(output_id=output_id) def to_json(self, with_display: bool = True, **kwargs): """Return check result as json. Parameters ---------- with_display : bool, default True whether to include serialized `SuiteResult.display` items into the output or not Returns ------- str """ return jsonpickle.dumps( SuiteResultJsonSerializer(self).serialize(with_display=with_display), unpicklable=False ) def to_wandb( self, dedicated_run: Optional[bool] = None, **kwargs ): """Send suite result to wandb. Parameters ---------- dedicated_run : bool whether to create a separate wandb run or not (deprecated parameter, does not have any effect anymore) kwargs: Keyword arguments to pass to wandb.init. Default project name is deepchecks. Default config is the suite name. """ # NOTE: # Wandb is not a default dependency # user should install it manually therefore we are # doing import within method to prevent premature ImportError # TODO: # Previous implementation used ProgressBar to show serialization progress from deepchecks.core.serialization.suite_result.wandb import SuiteResultSerializer as WandbSerializer if dedicated_run is not None: warnings.warn( '"dedicated_run" parameter is deprecated and does not have effect anymore. ' 'It will be remove in next versions.' ) wandb_kwargs = {'config': {'name': self.name}} wandb_kwargs.update(**kwargs) with wandb_run(**wandb_kwargs) as run: run.log(WandbSerializer(self).serialize()) def get_not_ran_checks(self) -> List['check_types.CheckFailure']: """Get all the check results which did not run (unable to run due to missing parameters, exception, etc). Returns ------- List[CheckFailure] All the check failures in the suite. """ return cast(List[check_types.CheckFailure], self.select_results(self.failures)) def get_not_passed_checks(self, fail_if_warning=True) -> List['check_types.CheckResult']: """Get all the check results that have not passing condition. This does not include checks that failed to run. Parameters ---------- fail_if_warning: bool, Default: True Whether conditions should fail on status of warning Returns ------- List[CheckResult] All the check results in the suite that have failing conditions. """ results = cast( List[check_types.CheckResult], self.select_results(self.results_with_conditions) ) return [ r for r in results if not r.passed_conditions(fail_if_warning) ] def get_passed_checks(self, fail_if_warning=True) -> List['check_types.CheckResult']: """Get all the check results that have passing condition. This does not include checks that failed to run. Parameters ---------- fail_if_warning: bool, Default: True Whether conditions should fail on status of warning Returns ------- List[CheckResult] All the check results in the suite that have failing conditions. """ results = cast( List[check_types.CheckResult], self.select_results(self.results_with_conditions) ) return [ r for r in results if r.passed_conditions(fail_if_warning) ] def passed(self, fail_if_warning: bool = True, fail_if_check_not_run: bool = False) -> bool: """Return whether this suite result has passed. Pass value is derived from condition results of all individual\ checks, and may consider checks that didn't run. Parameters ---------- fail_if_warning: bool, Default: True Whether conditions should fail on status of warning fail_if_check_not_run: bool, Default: False Whether checks that didn't run (missing parameters, exception, etc) should fail the suite result. Returns ------- bool """ not_run_pass = len(self.get_not_ran_checks()) == 0 if fail_if_check_not_run else True conditions_pass = len(self.get_not_passed_checks(fail_if_warning)) == 0 return conditions_pass and not_run_pass def from_json(cls, json_res: str): """Convert a json object that was returned from SuiteResult.to_json. Parameters ---------- json_data: Union[str, Dict] Json data Returns ------- SuiteResult A suite result object. """ json_dict = jsonpickle.loads(json_res) name = json_dict['name'] results = [] for res in json_dict['results']: results.append(check_types.BaseCheckResult.from_json(res)) return SuiteResult(name, results) The provided code snippet includes necessary dependencies for implementing the `from_json` function. Write a Python function `def from_json(json_dict: t.Union[str, t.Dict]) -> t.Union[BaseCheckResult, SuiteResult]` to solve the following problem: Convert a json object that was returned from one of our classes to_json. Parameters ---------- json_data: Union[str, Dict] Json data Returns ------- Union[BaseCheckResult, SuiteResult] A check output or a suite result object. Here is the function: def from_json(json_dict: t.Union[str, t.Dict]) -> t.Union[BaseCheckResult, SuiteResult]: """Convert a json object that was returned from one of our classes to_json. Parameters ---------- json_data: Union[str, Dict] Json data Returns ------- Union[BaseCheckResult, SuiteResult] A check output or a suite result object. """ if isinstance(json_dict, str): json_dict = jsonpickle.loads(json_dict) json_type = json_dict['type'] if 'Check' in json_type: return BaseCheckResult.from_json(json_dict) if json_type == 'SuiteResult': return SuiteResult.from_json(json_dict) raise ValueError('Excpected json object to be one of ' '[CheckFailure, CheckResult, SuiteResult] but recievied: ' + json_type)
Convert a json object that was returned from one of our classes to_json. Parameters ---------- json_data: Union[str, Dict] Json data Returns ------- Union[BaseCheckResult, SuiteResult] A check output or a suite result object.
655
import typing as t T = t.TypeVar("T") from typing import List The provided code snippet includes necessary dependencies for implementing the `to_ordional_enumeration` function. Write a Python function `def to_ordional_enumeration(data: t.List[T]) -> t.Dict[T, int]` to solve the following problem: Enumarate each unique item. Here is the function: def to_ordional_enumeration(data: t.List[T]) -> t.Dict[T, int]: """Enumarate each unique item.""" counter = 0 enum = {} for it in data: if it not in enum: enum[it] = counter counter += 1 return enum
Enumarate each unique item.
656
from functools import lru_cache from inspect import Signature, signature from typing import Any, Callable, Dict def extract_signature(obj: Callable[..., Any]) -> Signature: """Extract signature object from a callable instance. Getting a callable signature is a heavy and not cheap op therefore we are caching it. """ return signature(obj) The provided code snippet includes necessary dependencies for implementing the `initvars` function. Write a Python function `def initvars( obj: object, include_defaults: bool = False, include_kwargs: bool = False, include_properties: bool = False, ) -> Dict[Any, Any]` to solve the following problem: Return object __dict__ variables that was passed throw constructor (__init__ method). Parameters ---------- obj : object include_defaults : bool, default False wherether to include vars with default value or not Returns ------- Dict[Any, Any] subset of the obj __dict__ Here is the function: def initvars( obj: object, include_defaults: bool = False, include_kwargs: bool = False, include_properties: bool = False, ) -> Dict[Any, Any]: """Return object __dict__ variables that was passed throw constructor (__init__ method). Parameters ---------- obj : object include_defaults : bool, default False wherether to include vars with default value or not Returns ------- Dict[Any, Any] subset of the obj __dict__ """ assert hasattr(obj, '__init__') state = {k: v for k, v in obj.__dict__.items() if not k.startswith('_')} signature = extract_signature(obj.__init__) # pylint: disable=redefined-outer-name bind = signature.bind(**state) if include_defaults is True: bind.apply_defaults() arguments = bind.arguments else: arguments = { k: v for k, v in bind.arguments.items() if signature.parameters[k].default != v } if not include_kwargs: arguments.pop('kwargs', None) if not include_properties: arguments.pop('properties_list', None) arguments.pop('image_properties', None) arguments.pop('label_properties', None) arguments.pop('prediction_properties', None) return arguments
Return object __dict__ variables that was passed throw constructor (__init__ method). Parameters ---------- obj : object include_defaults : bool, default False wherether to include vars with default value or not Returns ------- Dict[Any, Any] subset of the obj __dict__
657
from typing import Sequence, Tuple, Union import numpy as np from deepchecks.core.errors import DeepchecksValueError EPS = 0.001 class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass The provided code snippet includes necessary dependencies for implementing the `iqr_outliers_range` function. Write a Python function `def iqr_outliers_range(data: np.ndarray, iqr_range: Tuple[int, int], scale: float, sharp_drop_ratio: float = 0.9) -> Tuple[float, float]` to solve the following problem: Calculate outliers range on the data given using IQR. Parameters ---------- data: np.ndarray Data to calculate outliers range for. iqr_range: Tuple[int, int] Two percentiles which define the IQR range scale: float The scale to multiply the IQR range for the outliers' detection. When the percentiles values are the same (When many samples have the same value), the scale will be modified based on the closest element to the percentiles values and the `sharp_drop_ratio` parameter. sharp_drop_ratio: float, default : 0.9 A threshold for the sharp drop outliers detection. When more than `sharp_drop_ratio` of the data contain the same value the rest will be considered as outliers. Also used to normalize the scale in case the percentiles values are the same. Returns ------- Tuple[float, float] Tuple of lower limit and upper limit of outliers range Here is the function: def iqr_outliers_range(data: np.ndarray, iqr_range: Tuple[int, int], scale: float, sharp_drop_ratio: float = 0.9) -> Tuple[float, float]: """Calculate outliers range on the data given using IQR. Parameters ---------- data: np.ndarray Data to calculate outliers range for. iqr_range: Tuple[int, int] Two percentiles which define the IQR range scale: float The scale to multiply the IQR range for the outliers' detection. When the percentiles values are the same (When many samples have the same value), the scale will be modified based on the closest element to the percentiles values and the `sharp_drop_ratio` parameter. sharp_drop_ratio: float, default : 0.9 A threshold for the sharp drop outliers detection. When more than `sharp_drop_ratio` of the data contain the same value the rest will be considered as outliers. Also used to normalize the scale in case the percentiles values are the same. Returns ------- Tuple[float, float] Tuple of lower limit and upper limit of outliers range """ if len(iqr_range) != 2 or any((x < 0 or x > 100 for x in iqr_range)) or all(x < 1 for x in iqr_range): raise DeepchecksValueError('IQR range must contain two numbers between 0 to 100') if scale < 1: raise DeepchecksValueError('IQR scale must be greater than 1') q1, q3 = np.percentile(data, sorted(iqr_range)) if q1 == q3: common_percent_in_total = np.sum(data == q1) / len(data) if common_percent_in_total > sharp_drop_ratio: return q1 - EPS, q1 + EPS else: closest_dist_to_common = min(np.abs(data[data != q1] - q1)) # modify the scale to be proportional to the percent of samples that have the same value # when many samples have the same value, the scale will be closer to sharp_drop_ratio scale = sharp_drop_ratio + ((scale - 1) * (1 - common_percent_in_total)) return q1 - (closest_dist_to_common * scale), q1 + (closest_dist_to_common * scale) else: iqr = q3 - q1 return q1 - scale * iqr, q3 + scale * iqr
Calculate outliers range on the data given using IQR. Parameters ---------- data: np.ndarray Data to calculate outliers range for. iqr_range: Tuple[int, int] Two percentiles which define the IQR range scale: float The scale to multiply the IQR range for the outliers' detection. When the percentiles values are the same (When many samples have the same value), the scale will be modified based on the closest element to the percentiles values and the `sharp_drop_ratio` parameter. sharp_drop_ratio: float, default : 0.9 A threshold for the sharp drop outliers detection. When more than `sharp_drop_ratio` of the data contain the same value the rest will be considered as outliers. Also used to normalize the scale in case the percentiles values are the same. Returns ------- Tuple[float, float] Tuple of lower limit and upper limit of outliers range
658
from typing import Sequence, Tuple, Union import numpy as np from deepchecks.core.errors import DeepchecksValueError EPS = 0.001 class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass The provided code snippet includes necessary dependencies for implementing the `sharp_drop_outliers_range` function. Write a Python function `def sharp_drop_outliers_range(data_percents: Sequence, sharp_drop_ratio: float = 0.9, max_outlier_percentage: float = 0.05) -> Union[float, None]` to solve the following problem: Calculate outliers range on the data given using sharp drop. Parameters ---------- data_percents : np.ndarray Counts of data to calculate outliers range for. The data is assumed to be sorted from the most common to the least common. sharp_drop_ratio : float , default 0.9 The sharp drop threshold to use for the outliers detection. max_outlier_percentage : float , default 0.05 The maximum percentage of data that can be considered as "outliers". Here is the function: def sharp_drop_outliers_range(data_percents: Sequence, sharp_drop_ratio: float = 0.9, max_outlier_percentage: float = 0.05) -> Union[float, None]: """Calculate outliers range on the data given using sharp drop. Parameters ---------- data_percents : np.ndarray Counts of data to calculate outliers range for. The data is assumed to be sorted from the most common to the least common. sharp_drop_ratio : float , default 0.9 The sharp drop threshold to use for the outliers detection. max_outlier_percentage : float , default 0.05 The maximum percentage of data that can be considered as "outliers". """ if not 1 - EPS < sum(data_percents) < 1 + EPS: raise DeepchecksValueError('Data percents must sum to 1') for i in range(len(data_percents) - 1): if sum(data_percents[:i+1]) < 1 - max_outlier_percentage: continue if 1 - (data_percents[i + 1] / data_percents[i]) >= sharp_drop_ratio: return data_percents[i + 1] else: return None
Calculate outliers range on the data given using sharp drop. Parameters ---------- data_percents : np.ndarray Counts of data to calculate outliers range for. The data is assumed to be sorted from the most common to the least common. sharp_drop_ratio : float , default 0.9 The sharp drop threshold to use for the outliers detection. max_outlier_percentage : float , default 0.05 The maximum percentage of data that can be considered as "outliers".
659
import numpy as np def create_proba_result(predictions, classes): def prediction_to_proba(y_pred): proba = np.zeros(len(classes)) proba[classes.index(y_pred)] = 1 return proba return np.apply_along_axis(prediction_to_proba, axis=1, arr=predictions.reshape(-1, 1))
null
660
import base64 from deepchecks.utils.logger import get_logger import sys from io import StringIO import pkg_resources The provided code snippet includes necessary dependencies for implementing the `imagetag` function. Write a Python function `def imagetag(img: bytes) -> str` to solve the following problem: Return html image tag with embedded image. Here is the function: def imagetag(img: bytes) -> str: """Return html image tag with embedded image.""" png = base64.b64encode(img).decode('ascii') return f'<img src="data:image/png;base64,{png}"/>'
Return html image tag with embedded image.
661
import base64 from deepchecks.utils.logger import get_logger import sys from io import StringIO import pkg_resources def get_logger() -> logging.Logger: """Retutn the deepchecks logger.""" return _logger The provided code snippet includes necessary dependencies for implementing the `display_in_gui` function. Write a Python function `def display_in_gui(result)` to solve the following problem: Display suite result or check result in a new python gui window. Here is the function: def display_in_gui(result): """Display suite result or check result in a new python gui window.""" try: required = {'pyqt5', 'pyqtwebengine'} # List of all packages installed (key is always in all small case!) installed = {pkg.key for pkg in list(pkg_resources.working_set)} missing = required - installed if missing: get_logger().warning('Missing packages in order to display result in GUI. either run "pip install %s"' ' or use "result.save_as_html()" to save result', {' '.join(missing)}) return from PyQt5.QtWebEngineWidgets import QWebEngineView # pylint: disable=import-outside-toplevel from PyQt5.QtWidgets import QApplication # pylint: disable=import-outside-toplevel app = QApplication(sys.argv) web = QWebEngineView() web.setWindowTitle('deepchecks') web.setGeometry(0, 0, 1200, 1200) html_out = StringIO() result.save_as_html(html_out) web.setHtml(html_out.getvalue()) web.show() sys.exit(app.exec_()) except Exception: # pylint: disable=broad-except get_logger().error('Unable to show result, run in an interactive environment' ' or use "result.save_as_html()" to save result')
Display suite result or check result in a new python gui window.
662
import textwrap import typing as t from functools import wraps from deepchecks.utils.logger import get_logger INDENT = ' ' from typing import List def indent( text: t.Optional[str], indents: int = 1, prefix: bool = False ) -> str: if not text or not isinstance(text, str): return '' identation = ''.join((INDENT for _ in range(indents))) jointext = ''.join(('\n', identation)) output = jointext.join(text.split('\n')) return output if prefix is False else f'{identation}{output}'
null
663
import textwrap import typing as t from functools import wraps from deepchecks.utils.logger import get_logger F = t.TypeVar('F', bound=t.Callable[..., t.Any]) from typing import List def get_logger() -> logging.Logger: """Retutn the deepchecks logger.""" return _logger The provided code snippet includes necessary dependencies for implementing the `deprecate_kwarg` function. Write a Python function `def deprecate_kwarg( old_name: str, new_name: t.Optional[str] = None, ) -> t.Callable[[F], F]` to solve the following problem: Decorate a function with deprecated kwargs. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : Optional[str], default None Name of preferred argument in function. Here is the function: def deprecate_kwarg( old_name: str, new_name: t.Optional[str] = None, ) -> t.Callable[[F], F]: """Decorate a function with deprecated kwargs. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : Optional[str], default None Name of preferred argument in function. """ def _deprecate_kwarg(func: F) -> F: @wraps(func) def wrapper(*args, **kwargs) -> t.Callable[..., t.Any]: if old_name in kwargs and new_name in kwargs: raise TypeError( f'Can only specify {repr(old_name)} ' f'or {repr(new_name)}, not both' ) elif old_name in kwargs and new_name is None: get_logger().warning( 'the %s keyword is deprecated and ' 'will be removed in a future version. Please take ' 'steps to stop the use of %s', repr(old_name), repr(old_name) ) elif old_name in kwargs and new_name is not None: get_logger().warning( 'the %s keyword is deprecated, ' 'use %s instead', repr(old_name), repr(new_name) ) kwargs[new_name] = kwargs.pop(old_name) return func(*args, **kwargs) return t.cast(F, wrapper) return _deprecate_kwarg
Decorate a function with deprecated kwargs. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : Optional[str], default None Name of preferred argument in function.
664
import textwrap import typing as t from functools import wraps from deepchecks.utils.logger import get_logger from typing import List def get_routine_name(it: t.Any) -> str: if hasattr(it, '__qualname__'): return it.__qualname__ elif callable(it) or isinstance(it, type): return it.__name__ else: return type(it).__name__
null
665
from typing import Hashable, List import numpy as np import pandas as pd from deepchecks.utils.array_math import fast_sum_by_row def calculate_distance(vec1: np.array, vec2: np.array, range_per_feature: np.array) -> float: """Calculate distance between two vectors using Gower's method. Parameters ---------- vec1 : np.array First vector. vec2 : np.array Second vector. range_per_feature : np.array Range of each numeric feature or -1 for categorical. Returns ------- float representing Gower's distance between the two vectors. """ sum_dist = 0 num_features = 0 for col_index in range(len(vec1)): if range_per_feature[col_index] == -1: # categorical feature if pd.isnull(vec1[col_index]) and pd.isnull(vec2[col_index]): sum_dist += 0 elif (pd.isnull(vec1[col_index]) or pd.isnull(vec2[col_index])) or vec1[col_index] != vec2[col_index]: sum_dist += 1 num_features += 1 else: # numeric feature if pd.isnull(vec1[col_index]) or pd.isnull(vec2[col_index]): continue sum_dist += np.abs(vec1[col_index] - vec2[col_index]) / range_per_feature[col_index] num_features += 1 if num_features == 0: return np.nan return sum_dist / num_features The provided code snippet includes necessary dependencies for implementing the `gower_matrix` function. Write a Python function `def gower_matrix(data: np.ndarray, cat_features: np.array) -> np.ndarray` to solve the following problem: Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. In addition, it can deal with missing values. Note that this method is expensive in memory and requires keeping in memory a matrix of size data*data. Parameters ---------- data: numpy.ndarray Dataset matrix. cat_features: numpy.array Boolean array of representing which of the columns are categorical features. Returns ------- numpy.ndarray representing the distance matrix. Here is the function: def gower_matrix(data: np.ndarray, cat_features: np.array) -> np.ndarray: """ Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. In addition, it can deal with missing values. Note that this method is expensive in memory and requires keeping in memory a matrix of size data*data. Parameters ---------- data: numpy.ndarray Dataset matrix. cat_features: numpy.array Boolean array of representing which of the columns are categorical features. Returns ------- numpy.ndarray representing the distance matrix. """ if not isinstance(data, np.ndarray): data = np.asarray(data) feature_ranges = np.ones(data.shape[1]) * -1 feature_ranges[~cat_features] = np.nanmax(data[:, ~cat_features], axis=0) - np.nanmin(data[:, ~cat_features], axis=0) result = np.zeros((data.shape[0], data.shape[0])) for i in range(data.shape[0]): for j in range(i, data.shape[0]): value = calculate_distance(data[i, :], data[j, :], feature_ranges) result[i, j] = value result[j, i] = value return result
Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. In addition, it can deal with missing values. Note that this method is expensive in memory and requires keeping in memory a matrix of size data*data. Parameters ---------- data: numpy.ndarray Dataset matrix. cat_features: numpy.array Boolean array of representing which of the columns are categorical features. Returns ------- numpy.ndarray representing the distance matrix.
666
from typing import Hashable, List import numpy as np import pandas as pd from deepchecks.utils.array_math import fast_sum_by_row def _calculate_distances_to_sample(categorical_sample: np.ndarray, numeric_sample: np.ndarray, cat_data: np.ndarray, numeric_data: np.ndarray, numeric_feature_ranges: np.ndarray, num_features: int): """ Calculate Gower's distance between a single sample to the rest of the samples in the dataset. Parameters ---------- categorical_sample The categorical features part of the sample to compare to the rest of the samples. numeric_sample The numeric features part of the sample to compare to the rest of the samples. cat_data The categorical features part of the dataset(after preprocessing). numeric_data The numeric features part of the dataset(after preprocessing). numeric_feature_ranges The range sizes of each numerical feature. num_features The total number of features in the dataset. Returns ------- numpy.ndarray The distances to the rest of the samples. """ numeric_feat_dist_to_sample = numeric_data - numeric_sample np.abs(numeric_feat_dist_to_sample, out=numeric_feat_dist_to_sample) # if a numeric feature value is null for one of the two samples, the distance over it is ignored null_dist_locations = np.logical_or(numeric_feat_dist_to_sample == np.inf, numeric_feat_dist_to_sample == np.nan) null_numeric_features_per_sample = fast_sum_by_row(null_dist_locations) numeric_feat_dist_to_sample[null_dist_locations] = 0 numeric_feat_dist_to_sample = numeric_feat_dist_to_sample.astype('float64') np.divide(numeric_feat_dist_to_sample, numeric_feature_ranges, out=numeric_feat_dist_to_sample) cat_feature_dist_to_sample = (cat_data - categorical_sample) != 0 dist_to_sample = fast_sum_by_row(cat_feature_dist_to_sample) + fast_sum_by_row(numeric_feat_dist_to_sample) return dist_to_sample / (-null_numeric_features_per_sample + num_features) # can have inf values class Hashable(Protocol): """Trait for any hashable type that also defines comparison operators.""" def __hash__(self) -> int: # noqa: D105 ... def __le__(self, __value) -> bool: # noqa: D105 ... def __lt__(self, __value) -> bool: # noqa: D105 ... def __ge__(self, __value) -> bool: # noqa: D105 ... def __gt__(self, __value) -> bool: # noqa: D105 ... def __eq__(self, __value) -> bool: # noqa: D105 ... The provided code snippet includes necessary dependencies for implementing the `calculate_nearest_neighbors_distances` function. Write a Python function `def calculate_nearest_neighbors_distances(data: pd.DataFrame, cat_cols: List[Hashable], numeric_cols: List[Hashable], num_neighbors: int, samples_to_calc_neighbors_for: pd.DataFrame = None)` to solve the following problem: Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. This method minimizes memory usage by saving in memory and returning only the closest neighbors of each sample. In addition, it can deal with missing values. Parameters ---------- data: pd.DataFrame DataFrame including all cat_cols: List[Hashable] List of categorical columns in the data. numeric_cols: List[Hashable] List of numerical columns in the data. num_neighbors: int Number of neighbors to return. For example, for n=2 for each sample returns the distances to the two closest samples in the dataset. samples_to_calc_neighbors_for: pd.DataFrame, default None Samples for which to calculate nearest neighbors. If None, calculates for all given samples in data. These samples do not have to exist in data, but must share all relevant features. Returns ------- numpy.ndarray representing the distance matrix to the nearest neighbors. numpy.ndarray representing the indexes of the nearest neighbors. Here is the function: def calculate_nearest_neighbors_distances(data: pd.DataFrame, cat_cols: List[Hashable], numeric_cols: List[Hashable], num_neighbors: int, samples_to_calc_neighbors_for: pd.DataFrame = None): """ Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. This method minimizes memory usage by saving in memory and returning only the closest neighbors of each sample. In addition, it can deal with missing values. Parameters ---------- data: pd.DataFrame DataFrame including all cat_cols: List[Hashable] List of categorical columns in the data. numeric_cols: List[Hashable] List of numerical columns in the data. num_neighbors: int Number of neighbors to return. For example, for n=2 for each sample returns the distances to the two closest samples in the dataset. samples_to_calc_neighbors_for: pd.DataFrame, default None Samples for which to calculate nearest neighbors. If None, calculates for all given samples in data. These samples do not have to exist in data, but must share all relevant features. Returns ------- numpy.ndarray representing the distance matrix to the nearest neighbors. numpy.ndarray representing the indexes of the nearest neighbors. """ num_samples = data.shape[0] if samples_to_calc_neighbors_for is not None: data = pd.concat([data, samples_to_calc_neighbors_for]) num_indices_to_calc = samples_to_calc_neighbors_for.shape[0] else: num_indices_to_calc = data.shape[0] cat_data = data[cat_cols] numeric_data = data[numeric_cols] num_features = len(cat_cols + numeric_cols) distances, indexes = np.zeros((num_indices_to_calc, num_neighbors)), np.zeros((num_indices_to_calc, num_neighbors)) # handle categorical - transform to an ordinal numpy array cat_data = np.asarray(cat_data.apply(lambda x: pd.factorize(x)[0])) if not cat_data.empty else np.asarray(cat_data) # handle numerical - calculate ranges per feature and fill numerical nan to minus np.inf numeric_data = np.asarray(numeric_data.fillna(value=np.nan).astype('float64')) numeric_feature_ranges = np.nanmax(numeric_data, axis=0) - np.nanmin(numeric_data, axis=0) numeric_feature_ranges = np.where(numeric_feature_ranges == 0, 1, numeric_feature_ranges) numeric_data = np.nan_to_num(numeric_data, nan=np.inf) # do not warn on operations that include usage of math involving inf original_error_state = np.geterr()['invalid'] np.seterr(invalid='ignore') if samples_to_calc_neighbors_for is not None: numeric_samples_to_calc_neighbors_for = numeric_data[num_samples:] cat_samples_to_calc_neighbors_for = cat_data[num_samples:] numeric_data = numeric_data[:num_samples] cat_data = cat_data[:num_samples] else: numeric_samples_to_calc_neighbors_for = numeric_data cat_samples_to_calc_neighbors_for = cat_data for i in range(num_indices_to_calc): # TODO: parallelize this loop numeric_sample_i = numeric_samples_to_calc_neighbors_for[i, :] cat_sample_i = cat_samples_to_calc_neighbors_for[i, :] dist_to_sample_i = _calculate_distances_to_sample( categorical_sample=cat_sample_i, numeric_sample=numeric_sample_i, cat_data=cat_data, numeric_data=numeric_data, numeric_feature_ranges=numeric_feature_ranges, num_features=num_features ) # sort to find the closest samples (including self) min_dist_indexes = np.argpartition(dist_to_sample_i, num_neighbors)[:num_neighbors] min_dist_indexes_ordered = sorted(min_dist_indexes, key=lambda x, arr=dist_to_sample_i: arr[x], reverse=False) indexes[i, :] = min_dist_indexes_ordered distances[i, :] = dist_to_sample_i[min_dist_indexes_ordered] np.seterr(invalid=original_error_state) return np.nan_to_num(distances, nan=np.nan, posinf=np.nan, neginf=np.nan), indexes
Calculate distance matrix for a dataset using Gower's method. Gowers distance is a measurement for distance between two samples. It returns the average of their distances per feature. For numeric features it calculates the absolute distance divide by the range of the feature. For categorical features it is an indicator whether the values are the same. See https://www.jstor.org/stable/2528823 for further details. This method minimizes memory usage by saving in memory and returning only the closest neighbors of each sample. In addition, it can deal with missing values. Parameters ---------- data: pd.DataFrame DataFrame including all cat_cols: List[Hashable] List of categorical columns in the data. numeric_cols: List[Hashable] List of numerical columns in the data. num_neighbors: int Number of neighbors to return. For example, for n=2 for each sample returns the distances to the two closest samples in the dataset. samples_to_calc_neighbors_for: pd.DataFrame, default None Samples for which to calculate nearest neighbors. If None, calculates for all given samples in data. These samples do not have to exist in data, but must share all relevant features. Returns ------- numpy.ndarray representing the distance matrix to the nearest neighbors. numpy.ndarray representing the indexes of the nearest neighbors.
667
import typing as t import numpy as np import pandas as pd from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_numeric_dtype from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.type_inference import infer_categorical_features from deepchecks.utils.typing import Hashable from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence def default_fill_na_series(col: pd.Series, is_cat_column: t.Optional[bool] = None) -> t.Optional[pd.Series]: """Fill NaN values based on column type if possible otherwise returns None.""" if is_cat_column and 'None' not in col.astype('object').dropna().unique(): return col.astype('object').fillna('None') elif is_numeric_dtype(col): return col.astype('float64').fillna(np.nan) common_values_list = col.mode() if isinstance(common_values_list, pd.Series) and len(common_values_list) > 0: return col.fillna(common_values_list[0]) return None from typing import List def infer_categorical_features( df: pd.DataFrame, max_categorical_ratio: float = 0.01, max_categories: int = None, columns: t.Optional[t.List[Hashable]] = None, ) -> t.List[Hashable]: """Infers which features are categorical by checking types and number of unique values. Parameters ---------- df : pd.DataFrame dataframe for which to infer categorical features max_categorical_ratio : float , default: 0.01 max_categories : int , default: None columns : t.Optional[t.List[Hashable]] , default: None Returns ------- List[Hashable] list of categorical features """ categorical_dtypes = df.select_dtypes(include='category') if len(categorical_dtypes.columns) > 0: return list(categorical_dtypes.columns) if columns is not None: dataframe_columns = ensure_hashable_or_mutable_sequence(columns) else: dataframe_columns = df.columns if max_categories is None: return [ column for column in dataframe_columns if is_categorical( t.cast(pd.Series, df[column]), max_categorical_ratio)] else: return [ column for column in dataframe_columns if is_categorical( t.cast(pd.Series, df[column]), max_categorical_ratio, max_categories, max_categories, max_categories)] The provided code snippet includes necessary dependencies for implementing the `default_fill_na_per_column_type` function. Write a Python function `def default_fill_na_per_column_type(df: pd.DataFrame, cat_features: t.Optional[t.Union[pd.Series, t.List]]) -> pd.DataFrame` to solve the following problem: Fill NaN values per column type. Here is the function: def default_fill_na_per_column_type(df: pd.DataFrame, cat_features: t.Optional[t.Union[pd.Series, t.List]]) \ -> pd.DataFrame: """Fill NaN values per column type.""" pd.set_option('mode.chained_assignment', None) if cat_features is None: cat_features = infer_categorical_features(df) result = {} for col_name in df.columns: modified_col = default_fill_na_series(df[col_name], col_name in cat_features) if modified_col is not None: result[col_name] = modified_col return pd.DataFrame(result, index=df.index)
Fill NaN values per column type.
668
import typing as t import numpy as np import pandas as pd from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_numeric_dtype from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.type_inference import infer_categorical_features from deepchecks.utils.typing import Hashable from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence The provided code snippet includes necessary dependencies for implementing the `floatify_series` function. Write a Python function `def floatify_series(ser: pd.Series)` to solve the following problem: Return a series that if the type is int converted to float. Parameters ---------- ser : pd.Series series to convert Raises ------ pd.Series the converted series Here is the function: def floatify_series(ser: pd.Series): """Return a series that if the type is int converted to float. Parameters ---------- ser : pd.Series series to convert Raises ------ pd.Series the converted series """ if is_integer_dtype(ser): ser = ser.astype(float) return ser
Return a series that if the type is int converted to float. Parameters ---------- ser : pd.Series series to convert Raises ------ pd.Series the converted series
669
import typing as t import numpy as np import pandas as pd from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_numeric_dtype from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.type_inference import infer_categorical_features from deepchecks.utils.typing import Hashable from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence from typing import List The provided code snippet includes necessary dependencies for implementing the `generalized_corrwith` function. Write a Python function `def generalized_corrwith(x1: pd.DataFrame, x2: pd.DataFrame, method: t.Callable)` to solve the following problem: Compute pairwise correlation. Pairwise correlation is computed between columns of one DataFrame with columns of another DataFrame. Pandas' method corrwith only applies when both dataframes have the same column names, this generalized method applies to any two Dataframes with the same number of rows, regardless of the column names. Parameters ---------- x1: DataFrame Left data frame to compute correlations. x2: Dataframe Right data frame to compute correlations. method: Callable Method of correlation. callable with input two 1d ndarrays and returning a float. Returns ------- DataFrame Pairwise correlations, the index matches the columns of x1 and the columns match the columns of x2. Here is the function: def generalized_corrwith(x1: pd.DataFrame, x2: pd.DataFrame, method: t.Callable): """ Compute pairwise correlation. Pairwise correlation is computed between columns of one DataFrame with columns of another DataFrame. Pandas' method corrwith only applies when both dataframes have the same column names, this generalized method applies to any two Dataframes with the same number of rows, regardless of the column names. Parameters ---------- x1: DataFrame Left data frame to compute correlations. x2: Dataframe Right data frame to compute correlations. method: Callable Method of correlation. callable with input two 1d ndarrays and returning a float. Returns ------- DataFrame Pairwise correlations, the index matches the columns of x1 and the columns match the columns of x2. """ corr_results = x2.apply(lambda col: x1.corrwith(col, method=method)) return corr_results
Compute pairwise correlation. Pairwise correlation is computed between columns of one DataFrame with columns of another DataFrame. Pandas' method corrwith only applies when both dataframes have the same column names, this generalized method applies to any two Dataframes with the same number of rows, regardless of the column names. Parameters ---------- x1: DataFrame Left data frame to compute correlations. x2: Dataframe Right data frame to compute correlations. method: Callable Method of correlation. callable with input two 1d ndarrays and returning a float. Returns ------- DataFrame Pairwise correlations, the index matches the columns of x1 and the columns match the columns of x2.
670
import typing as t import numpy as np import pandas as pd from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_numeric_dtype from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.type_inference import infer_categorical_features from deepchecks.utils.typing import Hashable from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence The provided code snippet includes necessary dependencies for implementing the `is_float_column` function. Write a Python function `def is_float_column(col: pd.Series) -> bool` to solve the following problem: Check if a column must be a float - meaning does it contain fractions. Parameters ---------- col : pd.Series The column to check. Returns ------- bool True if the column is float, False otherwise. Here is the function: def is_float_column(col: pd.Series) -> bool: """Check if a column must be a float - meaning does it contain fractions. Parameters ---------- col : pd.Series The column to check. Returns ------- bool True if the column is float, False otherwise. """ if not is_float_dtype(col): return False return (col.round() != col).any()
Check if a column must be a float - meaning does it contain fractions. Parameters ---------- col : pd.Series The column to check. Returns ------- bool True if the column is float, False otherwise.
671
import typing as t import numpy as np import pandas as pd from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_numeric_dtype from deepchecks.core.errors import DeepchecksValueError from deepchecks.utils.type_inference import infer_categorical_features from deepchecks.utils.typing import Hashable from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence The provided code snippet includes necessary dependencies for implementing the `cast_categorical_to_object_dtype` function. Write a Python function `def cast_categorical_to_object_dtype(df: pd.DataFrame) -> pd.DataFrame` to solve the following problem: Cast categorical columns to the object dtype. Here is the function: def cast_categorical_to_object_dtype(df: pd.DataFrame) -> pd.DataFrame: """Cast categorical columns to the object dtype.""" # NOTE: # pandas have bug with groupby on category dtypes, # so until it fixed, change dtypes manually categorical_columns = df.dtypes[df.dtypes == 'category'].index.tolist() if categorical_columns: df = df.astype({c: 'object' for c in categorical_columns}) return df
Cast categorical columns to the object dtype.
672
from typing import Union from sklearn.pipeline import Pipeline from deepchecks.utils.typing import BasicModel class BasicModel(Protocol): """Traits of a model that are necessary for deepchecks.""" def predict(self, X) -> List[Hashable]: """Predict on given X.""" ... The provided code snippet includes necessary dependencies for implementing the `get_model_of_pipeline` function. Write a Python function `def get_model_of_pipeline(model: Union[Pipeline, BasicModel])` to solve the following problem: Return the model of a given Pipeline or itself if a BaseEstimator is given. Parameters ---------- model : Union[Pipeline, BasicModel] a Pipeline or a BasicModel Returns ------- Union[Pipeline, BasicModel] the inner BaseEstimator of the Pipeline or itself Here is the function: def get_model_of_pipeline(model: Union[Pipeline, BasicModel]): """Return the model of a given Pipeline or itself if a BaseEstimator is given. Parameters ---------- model : Union[Pipeline, BasicModel] a Pipeline or a BasicModel Returns ------- Union[Pipeline, BasicModel] the inner BaseEstimator of the Pipeline or itself """ if isinstance(model, Pipeline): # get model type from last step in pipeline return model.steps[-1][1] return model
Return the model of a given Pipeline or itself if a BaseEstimator is given. Parameters ---------- model : Union[Pipeline, BasicModel] a Pipeline or a BasicModel Returns ------- Union[Pipeline, BasicModel] the inner BaseEstimator of the Pipeline or itself
673
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable def generate_check_docs_link(check): """Create from check object a link to its example page in the docs.""" if not isinstance(check, core.BaseCheck): return '' module_path = type(check).__module__ # NOTE: # it is better to import deepchecks.tabular.checks, deepchecks.vision.checks # to be sure that those packages actually exists and we are using right names, # but we do not know with what set of extra dependencies deepchecks was # installed, therefore we do not want to cause ImportError. # Refer to the setup.py for more understanding if not ( module_path.startswith('deepchecks.tabular.checks') or module_path.startswith('deepchecks.vision.checks') or module_path.startswith('deepchecks.nlp.checks') ): # not builtin check, cannot generate link to the docs return '' link_postfix = '.html?utm_source=display_output&utm_medium=referral&utm_campaign=check_link' # compare check full name and link to the notebook to # understand how link is formatted: # # - deepchecks.tabular.checks.integrity.StringMismatchComparison # - https://docs.deepchecks.com/{version}/tabular/auto_checks/integrity/plot_string_mismatch_comparison.html # noqa: E501 # pylint: disable=line-too-long # Remove 'deepchecks' from the start and 'checks' from the middle _, subpackage, _, module, file = type(check).__module__.split('.') return f'{get_docs_link()}{subpackage}/auto_checks/{module}/plot_{file}{link_postfix}' The provided code snippet includes necessary dependencies for implementing the `get_docs_summary` function. Write a Python function `def get_docs_summary(obj, with_doc_link: bool = True)` to solve the following problem: Return the docs summary if available. Parameters ---------- obj an object with_doc_link : bool , default: True if to add doc link Returns ------- str the object summary. Here is the function: def get_docs_summary(obj, with_doc_link: bool = True): """Return the docs summary if available. Parameters ---------- obj an object with_doc_link : bool , default: True if to add doc link Returns ------- str the object summary. """ if hasattr(obj.__class__, '__doc__'): docs = obj.__class__.__doc__ or '' # Take first non-whitespace line. summary = next((s for s in docs.split('\n') if not re.match('^\\s*$', s)), '') if with_doc_link: link = generate_check_docs_link(obj) summary += f' <a href="{link}" target="_blank">Read More...</a>' return summary return ''
Return the docs summary if available. Parameters ---------- obj an object with_doc_link : bool , default: True if to add doc link Returns ------- str the object summary.
674
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable def widget_to_html( widget: Widget, html_out: t.Union[str, io.TextIOWrapper], title: str = '', requirejs: bool = True, connected: bool = True, full_html: bool = True ): """Save widget as html file. Parameters ---------- widget: Widget The widget to save as html. html_out: filename or file-like object The file to write the HTML output to. title: str , default: None The title of the html file. requirejs: bool , default: True If to save with all javascript dependencies. connected : bool, default True whether to use CDN or not full_html: bool, default True whether to return full html page or not """ state = dependency_state(widget) data = embed_data(views=[widget], drop_defaults=True, state=state) snippet = snippet_template.format( load='', # will be added below json_data=escape_script(json.dumps(data['manager_state'], default=json_encoder)), widget_views='\n'.join( widget_view_template.format(view_spec=escape_script(json.dumps(view_spec, default=json_encoder))) for view_spec in data['view_specs'] ) ) template = suite_template(full_html=full_html) html = template.replace('$Title', title).replace('$WidgetSnippet', snippet) # if connected is True widgets js library will load jupyterlab-plotly by itself jupyterlab_plotly_lib = jupyterlab_plotly_script(False) if connected is False else '' requirejs_lib = requirejs_script(connected) if requirejs else '' widgetsjs_lib = widgets_script(connected, amd_module=requirejs) tags = f'{requirejs_lib}{jupyterlab_plotly_lib}{widgetsjs_lib}' html = html.replace('$WidgetJavascript', tags) if isinstance(html_out, str): with open(html_out, 'w', encoding='utf-8') as f: f.write(html) elif isinstance(html_out, (io.TextIOBase, io.TextIOWrapper)): html_out.write(html) else: name = type(html_out).__name__ raise TypeError(f'Unsupported type of "html_out" parameter - {name}') The provided code snippet includes necessary dependencies for implementing the `widget_to_html_string` function. Write a Python function `def widget_to_html_string( widget: Widget, title: str = '', requirejs: bool = True, connected: bool = True, full_html: bool = True, ) -> str` to solve the following problem: Transform widget into html string. Parameters ---------- widget: Widget The widget to save as html. title: str The title of the html file. requirejs: bool , default: True If to save with all javascript dependencies connected : bool, default True whether to use CDN or not full_html: bool, default True whether to return full html page or not Returns ------- str Here is the function: def widget_to_html_string( widget: Widget, title: str = '', requirejs: bool = True, connected: bool = True, full_html: bool = True, ) -> str: """Transform widget into html string. Parameters ---------- widget: Widget The widget to save as html. title: str The title of the html file. requirejs: bool , default: True If to save with all javascript dependencies connected : bool, default True whether to use CDN or not full_html: bool, default True whether to return full html page or not Returns ------- str """ buffer = io.StringIO() widget_to_html( widget=widget, html_out=buffer, title=title, requirejs=requirejs, connected=connected, full_html=full_html ) buffer.seek(0) return buffer.getvalue()
Transform widget into html string. Parameters ---------- widget: Widget The widget to save as html. title: str The title of the html file. requirejs: bool , default: True If to save with all javascript dependencies connected : bool, default True whether to use CDN or not full_html: bool, default True whether to return full html page or not Returns ------- str
675
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable The provided code snippet includes necessary dependencies for implementing the `is_string_column` function. Write a Python function `def is_string_column(column: pd.Series) -> bool` to solve the following problem: Determine whether a pandas series is string type. Here is the function: def is_string_column(column: pd.Series) -> bool: """Determine whether a pandas series is string type.""" if is_numeric_dtype(column): return False try: pd.to_numeric(column) return False except ValueError: return True # Non string objects like pd.Timestamp results in TypeError except TypeError: return False
Determine whether a pandas series is string type.
676
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable def split_camel_case(string: str) -> str: """Split string where there are capital letters and enter space instead. Parameters ---------- string : str string to change """ return ' '.join(re.findall('[A-Z][^A-Z]*', string)) The provided code snippet includes necessary dependencies for implementing the `to_snake_case` function. Write a Python function `def to_snake_case(value: str) -> str` to solve the following problem: Transform camel case indentifier into snake case. Parameters ---------- value : str string to transform Returns ------- str transformed value Here is the function: def to_snake_case(value: str) -> str: """Transform camel case indentifier into snake case. Parameters ---------- value : str string to transform Returns ------- str transformed value """ return split_camel_case(value).strip().replace(' ', '_')
Transform camel case indentifier into snake case. Parameters ---------- value : str string to transform Returns ------- str transformed value
677
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable def string_baseform(string: str, allow_empty_result: bool = False) -> str: """Normalize the string input to a uniform form. If input is a string containing alphanumeric characters or if allow_empty_result is set to True, removes all non-alphanumeric characters and convert characters to lower form. Parameters ---------- allow_empty_result : bool , default : False bool indicating whether to return empty result if no alphanumeric characters are present or the original input string : str string to remove special characters from Returns ------- str original input if condition is not met or lower form alphanumeric characters of input. """ if not isinstance(string, str): return string lower_alphanumeric_form = string.translate(DEL_MAP).lower() if len(lower_alphanumeric_form) > 0 or allow_empty_result: return lower_alphanumeric_form else: return string from typing import List The provided code snippet includes necessary dependencies for implementing the `get_base_form_to_variants_dict` function. Write a Python function `def get_base_form_to_variants_dict(uniques: t.Iterable[str]) -> t.Dict[str, t.Set[str]]` to solve the following problem: Create dict of base-form of the uniques to their values. function gets a set of strings, and returns a dictionary of shape Dict[str, Set] the key being the "base_form" (a clean version of the string), and the value being a set of all existing original values. This is done using the StringCategory class. Here is the function: def get_base_form_to_variants_dict(uniques: t.Iterable[str]) -> t.Dict[str, t.Set[str]]: """Create dict of base-form of the uniques to their values. function gets a set of strings, and returns a dictionary of shape Dict[str, Set] the key being the "base_form" (a clean version of the string), and the value being a set of all existing original values. This is done using the StringCategory class. """ base_form_to_variants = defaultdict(set) for item in uniques: base_form_to_variants[string_baseform(item)].add(item) return base_form_to_variants
Create dict of base-form of the uniques to their values. function gets a set of strings, and returns a dictionary of shape Dict[str, Set] the key being the "base_form" (a clean version of the string), and the value being a set of all existing original values. This is done using the StringCategory class.
678
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable def str_min_find(s: str, substr_list: t.Iterable[str]) -> t.Tuple[int, str]: """ Find the minimal first occurence of a substring in a string, and return both the index and substring. Parameters ---------- s : str The string in which we look for substrings substr_list : t.Iterable[str] list of substrings to find Returns ------- min_find : int index of minimal first occurence of substring min_substr : str the substring that occures in said index """ min_find = -1 min_substr = '' for substr in substr_list: first_find = s.find(substr) if first_find != -1 and (first_find < min_find or min_find == -1): min_find = first_find min_substr = substr return min_find, min_substr from typing import List The provided code snippet includes necessary dependencies for implementing the `split_and_keep` function. Write a Python function `def split_and_keep(s: str, separators: t.Union[str, t.Iterable[str]]) -> t.List[str]` to solve the following problem: Split string by another substring into a list. Like str.split(), but keeps the separator occurrences in the list. Parameters ---------- s : str the string to split separators : t.Union[str, t.Iterable[str]] the substring to split by Returns ------- t.List[str] list of substrings, including the separator occurrences in string Here is the function: def split_and_keep(s: str, separators: t.Union[str, t.Iterable[str]]) -> t.List[str]: """ Split string by another substring into a list. Like str.split(), but keeps the separator occurrences in the list. Parameters ---------- s : str the string to split separators : t.Union[str, t.Iterable[str]] the substring to split by Returns ------- t.List[str] list of substrings, including the separator occurrences in string """ if isinstance(separators, str): separators = [separators] split_s = [] while len(s) != 0: i, substr = str_min_find(s=s, substr_list=separators) if i == 0: split_s.append(substr) s = s[len(substr):] elif i == -1: split_s.append(s) break else: pre, _ = s.split(substr, 1) split_s.append(pre) s = s[len(pre):] return split_s
Split string by another substring into a list. Like str.split(), but keeps the separator occurrences in the list. Parameters ---------- s : str the string to split separators : t.Union[str, t.Iterable[str]] the substring to split by Returns ------- t.List[str] list of substrings, including the separator occurrences in string
679
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable from typing import List The provided code snippet includes necessary dependencies for implementing the `split_by_order` function. Write a Python function `def split_by_order(s: str, separators: t.Iterable[str], keep: bool = True) -> t.List[str]` to solve the following problem: Split string by a list of substrings, each used once as a separator. Parameters ---------- s : str the string to split separators : t.Iterable[str] list of substrings to split by keep : bool , default: True whether to keep the separators in list as well. Default is True. Returns ------- t.List[str] list of substrings Here is the function: def split_by_order(s: str, separators: t.Iterable[str], keep: bool = True) -> t.List[str]: """ Split string by a list of substrings, each used once as a separator. Parameters ---------- s : str the string to split separators : t.Iterable[str] list of substrings to split by keep : bool , default: True whether to keep the separators in list as well. Default is True. Returns ------- t.List[str] list of substrings """ split_s = [] separators = list(copy(separators)) while len(s) != 0: if len(separators) > 0: sep = separators[0] if s.find(sep) == 0: if keep is True: split_s.append(sep) s = s[len(sep):] separators.pop(0) else: pre, _ = s.split(sep, 1) split_s.append(pre) s = s[len(pre):] else: split_s.append(s) break return split_s
Split string by a list of substrings, each used once as a separator. Parameters ---------- s : str the string to split separators : t.Iterable[str] list of substrings to split by keep : bool , default: True whether to keep the separators in list as well. Default is True. Returns ------- t.List[str] list of substrings
680
import io import itertools import json import os import random import re import sys import typing as t from collections import defaultdict from copy import copy from datetime import datetime from decimal import Decimal from string import ascii_uppercase, digits import numpy as np import pandas as pd from ipywidgets import Widget from ipywidgets.embed import dependency_state, embed_data, escape_script, snippet_template, widget_view_template from packaging.version import Version from pandas.core.dtypes.common import is_numeric_dtype import deepchecks from deepchecks import core from deepchecks.core.resources import jupyterlab_plotly_script, requirejs_script, suite_template, widgets_script from deepchecks.utils.typing import Hashable from typing import List The provided code snippet includes necessary dependencies for implementing the `format_datetime` function. Write a Python function `def format_datetime( value: t.Union[int, float, datetime], ) -> str` to solve the following problem: Format datetime object or timestamp value. Parameters ---------- value : Union[datetime, int, float] datetime (timestamp) to format Returns ------- str string representation of the provided value Raises ------ ValueError if unexpected value type was passed to the function Here is the function: def format_datetime( value: t.Union[int, float, datetime], ) -> str: """Format datetime object or timestamp value. Parameters ---------- value : Union[datetime, int, float] datetime (timestamp) to format Returns ------- str string representation of the provided value Raises ------ ValueError if unexpected value type was passed to the function """ if isinstance(value, datetime): datetime_value = value elif isinstance(value, (int, float)): datetime_value = datetime.fromtimestamp(value) else: raise ValueError(f'Unsupported value type - {type(value).__name__}') if datetime_value.hour == 0 and datetime_value.minute == 0 and datetime_value.second == 0: return datetime_value.strftime('%Y-%m-%d') elif (datetime_value.hour != 0 or datetime_value.minute != 0) and datetime_value.second == 0: return datetime_value.strftime('%Y-%m-%d %H:%M') else: return datetime_value.strftime('%Y-%m-%d %H:%M:%S')
Format datetime object or timestamp value. Parameters ---------- value : Union[datetime, int, float] datetime (timestamp) to format Returns ------- str string representation of the provided value Raises ------ ValueError if unexpected value type was passed to the function
681
import typing as t from deepchecks import __version__ links = { 'default': { 'supported-metrics-by-string': 'https://docs.deepchecks.com/stable/general/guides/metrics_guide.html#list-of-supported-strings', # pylint: disable=line-too-long # noqa 'supported-prediction-format': 'https://docs.deepchecks.com/stable/tabular/usage_guides/supported_models.html#supported-tasks-and-predictions-format', # pylint: disable=line-too-long # noqa 'supported-predictions-format-nlp': 'https://docs.deepchecks.com/stable/nlp/usage_guides/supported_tasks.html#supported-labels-and-predictions-format', # pylint: disable=line-too-long # noqa }, # '0.0.1': {}, # noqa # '0.0.2': {}, # noqa } from typing import List __version__ = version('deepchecks') The provided code snippet includes necessary dependencies for implementing the `doclink` function. Write a Python function `def doclink( name: str, default_link: t.Optional[str] = None, template: t.Optional[str] = None, package_version: str = __version__ ) -> str` to solve the following problem: Get documentation link. Parameters ---------- name: str the name of the required link as appears in the links' dictionary. default_link: t.Optional[str], default: None default like to use if no link corresponding to name was found. template: t.Optional[str], default: None a string template in which to incorporate the link. package_version: str which version of the docs to use Returns ------- str The template text incorporated with the relevant link Here is the function: def doclink( name: str, default_link: t.Optional[str] = None, template: t.Optional[str] = None, package_version: str = __version__ ) -> str: """Get documentation link. Parameters ---------- name: str the name of the required link as appears in the links' dictionary. default_link: t.Optional[str], default: None default like to use if no link corresponding to name was found. template: t.Optional[str], default: None a string template in which to incorporate the link. package_version: str which version of the docs to use Returns ------- str The template text incorporated with the relevant link """ index = ( links[package_version] if package_version in links else (links.get('default') or {}) ) link = index.get(name) or default_link if link is None: return '' return ( link if template is None else template.format(link=link) )
Get documentation link. Parameters ---------- name: str the name of the required link as appears in the links' dictionary. default_link: t.Optional[str], default: None default like to use if no link corresponding to name was found. template: t.Optional[str], default: None a string template in which to incorporate the link. package_version: str which version of the docs to use Returns ------- str The template text incorporated with the relevant link
682
from typing import List, Optional import numpy as np import pandas as pd from deepchecks.core.errors import DeepchecksValueError The provided code snippet includes necessary dependencies for implementing the `calculate_neg_mse_per_sample` function. Write a Python function `def calculate_neg_mse_per_sample(labels, predictions, index=None) -> pd.Series` to solve the following problem: Calculate negative mean squared error per sample. Here is the function: def calculate_neg_mse_per_sample(labels, predictions, index=None) -> pd.Series: """Calculate negative mean squared error per sample.""" if index is None and isinstance(labels, pd.Series): index = labels.index return pd.Series([-(y - y_pred) ** 2 for y, y_pred in zip(labels, predictions)], index=index)
Calculate negative mean squared error per sample.
683
from typing import List, Optional import numpy as np import pandas as pd from deepchecks.core.errors import DeepchecksValueError class DeepchecksValueError(DeepchecksBaseError): """Exception class that represent a fault parameter was passed to Deepchecks.""" pass The provided code snippet includes necessary dependencies for implementing the `calculate_neg_cross_entropy_per_sample` function. Write a Python function `def calculate_neg_cross_entropy_per_sample(labels, probas: np.ndarray, model_classes: Optional[List] = None, index=None, is_multilabel: bool = False, eps=1e-15) -> pd.Series` to solve the following problem: Calculate negative cross entropy per sample. Here is the function: def calculate_neg_cross_entropy_per_sample(labels, probas: np.ndarray, model_classes: Optional[List] = None, index=None, is_multilabel: bool = False, eps=1e-15) -> pd.Series: """Calculate negative cross entropy per sample.""" if not is_multilabel: if index is None and isinstance(labels, pd.Series): index = labels.index # transform categorical labels into integers if model_classes is not None: if any(x not in model_classes for x in labels): raise DeepchecksValueError( f'Label observed values {sorted(np.unique(labels))} contain values ' f'that are not found in the model classes: {model_classes}.') if probas.shape[1] != len(model_classes): raise DeepchecksValueError( f'Predicted probabilities shape {probas.shape} does not match the number of classes found in' f' the labels: {model_classes}.') labels = pd.Series(labels).apply(list(model_classes).index) num_samples, num_classes = probas.shape one_hot_labels = np.zeros((num_samples, num_classes)) one_hot_labels[list(np.arange(num_samples)), list(labels)] = 1 else: one_hot_labels = labels return pd.Series(np.sum(one_hot_labels * np.log(probas + eps), axis=1), index=index)
Calculate negative cross entropy per sample.
684
import matplotlib.pyplot as plt import numpy as np from matplotlib.cm import ScalarMappable from matplotlib.colors import LinearSegmentedColormap def shifted_color_map(cmap, start=0, midpoint=0.5, stop=1.0, name: str = 'shiftedcmap', transparent_from: float = None): """Offset the "center" of a colormap. Parameters ---------- cmap The matplotlib colormap to be altered start , default: 0 Offset from lowest point in the colormap's range. Should be between0.0 and 1.0. midpoint , default: 0.5 The new center of the colormap. Defaults to 0.5 (no shift). Should be between 0.0 and 1.0. In general, this should be 1 - vmax/(vmax + abs(vmin)) For example if your data range from -15.0 to +5.0 and you want the center of the colormap at 0.0, `midpoint` should be set to 1 - 5/(5 + 15)) or 0.75 stop , default: 1.0 Offset from highest point in the colormap's range. Should be between0.0 and 1.0. name: str , default: shiftedcmap transparent_from : float , default: None The point between start and stop where the colors will start being transparent. """ if transparent_from is None: transparent_from = stop cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []} # regular index to compute the colors reg_index = np.linspace(start, stop, 257) # shifted index to match the data shift_index = np.hstack( [np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True)]) for ri, si in zip(reg_index, shift_index): r, g, b, a = cmap(ri) cdict['red'].append((si, r, r)) cdict['green'].append((si, g, g)) cdict['blue'].append((si, b, b)) if transparent_from / midpoint < si: cdict['alpha'].append((si, 0.3, 0.3)) else: cdict['alpha'].append((si, a, a)) newcmap = LinearSegmentedColormap(name, cdict) plt.register_cmap(cmap=newcmap) return newcmap The provided code snippet includes necessary dependencies for implementing the `create_colorbar_barchart_for_check` function. Write a Python function `def create_colorbar_barchart_for_check(x: np.ndarray, y: np.ndarray, ylabel: str = 'Result', xlabel: str = 'Features', color_map: str = 'RdYlGn_r', start: float = 0, stop: float = 1.0, tick_steps: float = 0.1, color_label: str = 'Color', color_shift_midpoint: float = 0.5, check_name: str = '')` to solve the following problem: Output a colorbar barchart using matplotlib. Parameters ---------- x: np.ndarray array containing x axis data. y: np.ndarray array containing y axis data. ylabel: str , default: Result Name of y axis xlabel : str , default: Features Name of x axis color_map : str , default: RdYlGn_r color_map name. See https://matplotlib.org/stable/tutorials/colors/colormaps.html for more details start : float , default: 0 start of y axis ticks stop : float , default: 1.0 end of y axis ticks tick_steps : float , default: 0.1 step to y axis ticks color_shift_midpoint : float , default: 0.5 midpoint of color map check_name : str , default: '' name of the check that called this function Here is the function: def create_colorbar_barchart_for_check(x: np.ndarray, y: np.ndarray, ylabel: str = 'Result', xlabel: str = 'Features', color_map: str = 'RdYlGn_r', start: float = 0, stop: float = 1.0, tick_steps: float = 0.1, color_label: str = 'Color', color_shift_midpoint: float = 0.5, check_name: str = ''): """Output a colorbar barchart using matplotlib. Parameters ---------- x: np.ndarray array containing x axis data. y: np.ndarray array containing y axis data. ylabel: str , default: Result Name of y axis xlabel : str , default: Features Name of x axis color_map : str , default: RdYlGn_r color_map name. See https://matplotlib.org/stable/tutorials/colors/colormaps.html for more details start : float , default: 0 start of y axis ticks stop : float , default: 1.0 end of y axis ticks tick_steps : float , default: 0.1 step to y axis ticks color_shift_midpoint : float , default: 0.5 midpoint of color map check_name : str , default: '' name of the check that called this function """ fig, ax = plt.subplots(figsize=(15, 4)) # pylint: disable=unused-variable try: my_cmap = plt.cm.get_cmap(color_map + check_name) except ValueError: my_cmap = plt.cm.get_cmap(color_map) my_cmap = shifted_color_map(my_cmap, start=start, midpoint=color_shift_midpoint, stop=stop, name=color_map + check_name) cmap_colors = my_cmap(list(y)) _ = ax.bar(x, y, color=cmap_colors) # pylint: disable=unused-variable sm = ScalarMappable(cmap=my_cmap, norm=plt.Normalize(start, stop)) sm.set_array([]) cbar = plt.colorbar(sm) cbar.set_label(color_label, rotation=270, labelpad=25) plt.yticks(np.arange(start, stop, tick_steps)) plt.ylabel(ylabel) plt.xlabel(xlabel)
Output a colorbar barchart using matplotlib. Parameters ---------- x: np.ndarray array containing x axis data. y: np.ndarray array containing y axis data. ylabel: str , default: Result Name of y axis xlabel : str , default: Features Name of x axis color_map : str , default: RdYlGn_r color_map name. See https://matplotlib.org/stable/tutorials/colors/colormaps.html for more details start : float , default: 0 start of y axis ticks stop : float , default: 1.0 end of y axis ticks tick_steps : float , default: 0.1 step to y axis ticks color_shift_midpoint : float , default: 0.5 midpoint of color map check_name : str , default: '' name of the check that called this function
685
import matplotlib.pyplot as plt import numpy as np from matplotlib.cm import ScalarMappable from matplotlib.colors import LinearSegmentedColormap The provided code snippet includes necessary dependencies for implementing the `hex_to_rgba` function. Write a Python function `def hex_to_rgba(h, alpha)` to solve the following problem: Convert color value in hex format to rgba format with alpha transparency. Here is the function: def hex_to_rgba(h, alpha): """Convert color value in hex format to rgba format with alpha transparency.""" return 'rgba' + str(tuple([int(h.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4)] + [alpha]))
Convert color value in hex format to rgba format with alpha transparency.
686
import numpy as np The provided code snippet includes necessary dependencies for implementing the `round_sig` function. Write a Python function `def round_sig(x: float, sig: int = 2)` to solve the following problem: Round a number to a given number of significant digits. Here is the function: def round_sig(x: float, sig: int = 2): """Round a number to a given number of significant digits.""" return round(x, sig-int(np.floor(np.log10(abs(x))))-1)
Round a number to a given number of significant digits.
687
from collections import defaultdict from copy import deepcopy from typing import Callable, List, Optional, Tuple, Union import numpy as np import pandas as pd from sklearn.tree import _tree from deepchecks.tabular.dataset import Dataset from deepchecks.utils.strings import format_number from deepchecks.utils.typing import Hashable class DeepchecksFilter: """Contains a filter function which works on a dataframe and a label describing the filter. Parameters ---------- filter_functions : List[Callable], default: None List of functions that receive a DataFrame and return a filter on it. If None, no filter is applied label : str, default = '' name of the filter """ def __init__(self, filter_functions: List[Callable] = None, label: str = ''): if not filter_functions: self.filter_functions = [] else: self.filter_functions = filter_functions self.label = label def filter(self, dataframe: pd.DataFrame, label_col: Optional[pd.Series] = None) -> \ Union[Tuple[pd.DataFrame, pd.Series], pd.DataFrame]: """Run the filter on given dataframe. Return rows in data frame satisfying the filter properties.""" if label_col is not None: dataframe['temp_label_col'] = label_col for func in self.filter_functions: dataframe = dataframe.loc[func(dataframe)] if label_col is not None: return dataframe.drop(columns=['temp_label_col']), dataframe['temp_label_col'] else: return dataframe The provided code snippet includes necessary dependencies for implementing the `intersect_two_filters` function. Write a Python function `def intersect_two_filters(filter1: DeepchecksFilter, filter2: DeepchecksFilter) -> DeepchecksFilter` to solve the following problem: Merge two DeepChecksFilters into one, an intersection of both filters. Here is the function: def intersect_two_filters(filter1: DeepchecksFilter, filter2: DeepchecksFilter) -> DeepchecksFilter: """Merge two DeepChecksFilters into one, an intersection of both filters.""" return DeepchecksFilter(filter1.filter_functions + filter2.filter_functions)
Merge two DeepChecksFilters into one, an intersection of both filters.
688
from collections import defaultdict from copy import deepcopy from typing import Callable, List, Optional, Tuple, Union import numpy as np import pandas as pd from sklearn.tree import _tree from deepchecks.tabular.dataset import Dataset from deepchecks.utils.strings import format_number from deepchecks.utils.typing import Hashable def numeric_segmentation_edges(column: pd.Series, max_segments: int) -> np.ndarray: """Split given series into values which are used to create quantiles segments. Tries to create `max_segments + 1` values (since segment is a range, so 2 values needed to create segment) but in case some quantiles have the same value they will be filtered, and the result will have less than max_segments + 1 values. """ percentile_values = np.array([min(column), max(column)]) attempt_max_segments = max_segments prev_percentile_values = deepcopy(percentile_values) while len(percentile_values) < max_segments + 1: prev_percentile_values = deepcopy(percentile_values) percentile_values = pd.unique( np.nanpercentile(column.to_numpy(), np.linspace(0, 100, attempt_max_segments + 1)) ) if len(percentile_values) == len(prev_percentile_values): break attempt_max_segments *= 2 if len(percentile_values) > max_segments + 1: percentile_values = prev_percentile_values return percentile_values The provided code snippet includes necessary dependencies for implementing the `partition_numeric_feature_around_segment` function. Write a Python function `def partition_numeric_feature_around_segment(column: pd.Series, segment: List[float], max_additional_segments: int = 4) -> np.ndarray` to solve the following problem: Split given series into segments containing specified segment. Tries to create segments as balanced as possible in size. Parameters ---------- column : pd.Series Series to be partitioned. segment : List[float] Segment to be included in the partition. max_additional_segments : int, default = 4 Maximum number of segments to be returned (not including the original segment). Here is the function: def partition_numeric_feature_around_segment(column: pd.Series, segment: List[float], max_additional_segments: int = 4) -> np.ndarray: """Split given series into segments containing specified segment. Tries to create segments as balanced as possible in size. Parameters ---------- column : pd.Series Series to be partitioned. segment : List[float] Segment to be included in the partition. max_additional_segments : int, default = 4 Maximum number of segments to be returned (not including the original segment). """ data_below_segment, data_above_segment = column[column <= segment[0]], column[column > segment[1]] if len(data_below_segment) + len(data_above_segment) == 0: return np.array([np.nanmin(column), np.nanmax(column)]) ratio = np.divide(len(data_below_segment), len(data_below_segment) + len(data_above_segment)) if len(data_below_segment) == 0: segments_below = np.array([np.nanmin(column)]) elif data_below_segment.nunique() == 1: segments_below = np.array([np.nanmin(column), segment[0]]) else: segments_below = numeric_segmentation_edges(data_below_segment, round(max_additional_segments * ratio)) segments_below = np.append(np.delete(segments_below, len(segments_below) - 1), segment[0]) if len(data_above_segment) == 0: segments_above = np.array([np.nanmax(column)]) elif data_above_segment.nunique() == 1: segments_above = np.array([segment[1], np.nanmax(column)]) else: segments_above = numeric_segmentation_edges(data_above_segment, round(max_additional_segments * (1 - ratio))) segments_above = np.append(segment[1], np.delete(segments_above, 0)) return np.unique(np.concatenate([segments_below, segments_above], axis=None))
Split given series into segments containing specified segment. Tries to create segments as balanced as possible in size. Parameters ---------- column : pd.Series Series to be partitioned. segment : List[float] Segment to be included in the partition. max_additional_segments : int, default = 4 Maximum number of segments to be returned (not including the original segment).
689
from collections import defaultdict from copy import deepcopy from typing import Callable, List, Optional, Tuple, Union import numpy as np import pandas as pd from sklearn.tree import _tree from deepchecks.tabular.dataset import Dataset from deepchecks.utils.strings import format_number from deepchecks.utils.typing import Hashable class DeepchecksFilter: """Contains a filter function which works on a dataframe and a label describing the filter. Parameters ---------- filter_functions : List[Callable], default: None List of functions that receive a DataFrame and return a filter on it. If None, no filter is applied label : str, default = '' name of the filter """ def __init__(self, filter_functions: List[Callable] = None, label: str = ''): if not filter_functions: self.filter_functions = [] else: self.filter_functions = filter_functions self.label = label def filter(self, dataframe: pd.DataFrame, label_col: Optional[pd.Series] = None) -> \ Union[Tuple[pd.DataFrame, pd.Series], pd.DataFrame]: """Run the filter on given dataframe. Return rows in data frame satisfying the filter properties.""" if label_col is not None: dataframe['temp_label_col'] = label_col for func in self.filter_functions: dataframe = dataframe.loc[func(dataframe)] if label_col is not None: return dataframe.drop(columns=['temp_label_col']), dataframe['temp_label_col'] else: return dataframe def numeric_segmentation_edges(column: pd.Series, max_segments: int) -> np.ndarray: """Split given series into values which are used to create quantiles segments. Tries to create `max_segments + 1` values (since segment is a range, so 2 values needed to create segment) but in case some quantiles have the same value they will be filtered, and the result will have less than max_segments + 1 values. """ percentile_values = np.array([min(column), max(column)]) attempt_max_segments = max_segments prev_percentile_values = deepcopy(percentile_values) while len(percentile_values) < max_segments + 1: prev_percentile_values = deepcopy(percentile_values) percentile_values = pd.unique( np.nanpercentile(column.to_numpy(), np.linspace(0, 100, attempt_max_segments + 1)) ) if len(percentile_values) == len(prev_percentile_values): break attempt_max_segments *= 2 if len(percentile_values) > max_segments + 1: percentile_values = prev_percentile_values return percentile_values def largest_category_index_up_to_ratio(histogram, max_segments, max_cat_proportions): """Decide which categorical values are big enough to display individually. First check how many of the biggest categories needed in order to occupy `max_cat_proportions`% of the data. If the number is less than max_segments than return it, else return max_segments or number of unique values. """ total_values = sum(histogram.values) first_less_then_max_cat_proportions_idx = np.argwhere( histogram.values.cumsum() >= total_values * max_cat_proportions )[0][0] # Get index of last value in histogram to show return min(max_segments, histogram.size, first_less_then_max_cat_proportions_idx + 1) class Dataset: """ Dataset wraps pandas DataFrame together with ML related metadata. The Dataset class is containing additional data and methods intended for easily accessing metadata relevant for the training or validating of an ML models. Parameters ---------- df : Any An object that can be casted to a pandas DataFrame - containing data relevant for the training or validating of a ML models. label : t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] , default: None label column provided either as a string with the name of an existing column in the DataFrame or a label object including the label data (pandas Series/DataFrame or a numpy array) that will be concatenated to the data in the DataFrame. in case of label data the following logic is applied to set the label name: - Series: takes the series name or 'target' if name is empty - DataFrame: expect single column in the dataframe and use its name - numpy: use 'target' features : t.Optional[t.Sequence[Hashable]] , default: None List of names for the feature columns in the DataFrame. cat_features : t.Optional[t.Sequence[Hashable]] , default: None List of names for the categorical features in the DataFrame. In order to disable categorical. features inference, pass cat_features=[] index_name : t.Optional[Hashable] , default: None Name of the index column in the dataframe. If set_index_from_dataframe_index is True and index_name is not None, index will be created from the dataframe index level with the given name. If index levels have no names, an int must be used to select the appropriate level by order. set_index_from_dataframe_index : bool , default: False If set to true, index will be created from the dataframe index instead of dataframe columns (default). If index_name is None, first level of the index will be used in case of a multilevel index. datetime_name : t.Optional[Hashable] , default: None Name of the datetime column in the dataframe. If set_datetime_from_dataframe_index is True and datetime_name is not None, date will be created from the dataframe index level with the given name. If index levels have no names, an int must be used to select the appropriate level by order. set_datetime_from_dataframe_index : bool , default: False If set to true, date will be created from the dataframe index instead of dataframe columns (default). If datetime_name is None, first level of the index will be used in case of a multilevel index. convert_datetime : bool , default: True If set to true, date will be converted to datetime using pandas.to_datetime. datetime_args : t.Optional[t.Dict] , default: None pandas.to_datetime args used for conversion of the datetime column. (look at https://pandas.pydata.org/docs/reference/api/pandas.to_datetime.html for more documentation) max_categorical_ratio : float , default: 0.01 The max ratio of unique values in a column in order for it to be inferred as a categorical feature. max_categories : int , default: None The maximum number of categories in a column in order for it to be inferred as a categorical feature. if None, uses is_categorical default inference mechanism. label_type : str , default: None Used to determine the task type. If None, inferred when running a check based on label column and model. Possible values are: 'multiclass', 'binary' and 'regression'. """ _features: t.List[Hashable] _label_name: t.Optional[Hashable] _index_name: t.Optional[Hashable] _set_index_from_dataframe_index: t.Optional[bool] _datetime_name: t.Optional[Hashable] _set_datetime_from_dataframe_index: t.Optional[bool] _convert_datetime: t.Optional[bool] _datetime_column: t.Optional[pd.Series] _cat_features: t.List[Hashable] _data: pd.DataFrame _max_categorical_ratio: float _max_categories: int _label_type: t.Optional[TaskType] def __init__( self, df: t.Any, label: t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] = None, features: t.Optional[t.Sequence[Hashable]] = None, cat_features: t.Optional[t.Sequence[Hashable]] = None, index_name: t.Optional[Hashable] = None, set_index_from_dataframe_index: bool = False, datetime_name: t.Optional[Hashable] = None, set_datetime_from_dataframe_index: bool = False, convert_datetime: bool = True, datetime_args: t.Optional[t.Dict] = None, max_categorical_ratio: float = 0.01, max_categories: int = None, label_type: str = None, dataset_name: t.Optional[str] = None, label_classes=None ): if len(df) == 0: raise DeepchecksValueError('Can\'t create a Dataset object with an empty dataframe') self._data = pd.DataFrame(df).copy() # Checking for duplicate columns duplicated_columns = [key for key, value in Counter(self._data.columns).items() if value > 1] if len(duplicated_columns) >= 1: raise DeepchecksValueError( f"Data has {len(duplicated_columns)} duplicate columns. " "Change the duplicate column names or remove them from the data. " f"Duplicate column names: {duplicated_columns}") # Validations if label is None: self._label_name = None elif isinstance(label, (pd.Series, pd.DataFrame, np.ndarray)): if isinstance(label, pd.DataFrame): if label.shape[1] != 1: raise DeepchecksValueError('Provide label as a Series or a DataFrame with a single column.') label = label.iloc[:, 0] elif isinstance(label, np.ndarray): if len(label.shape) > 2: raise DeepchecksValueError('Label must be either column vector or row vector') elif len(label.shape) == 2: if all(x != 1 for x in label.shape): raise DeepchecksValueError('Label must be either column vector or row vector') label = np.squeeze(label) label = pd.Series(label) if label.shape[0] != self._data.shape[0]: raise DeepchecksValueError('Number of samples of label and data must be equal') pd.testing.assert_index_equal(self._data.index, label.index) self._label_name = DEFAULT_LABEL_NAME if label.name is None or label.name == 0 else label.name if self._label_name in self._data.columns: raise DeepchecksValueError(f'Data has column with name "{self._label_name}", change label column name ' f'or provide the column label name as str') self._data[self._label_name] = label elif isinstance(label, Hashable): if label not in self._data.columns: raise DeepchecksValueError(f'label column {label} not found in dataset columns') self._label_name = label else: raise DeepchecksValueError(f'Unsupported type for label: {type(label).__name__}') # Assert that the requested index can be found if not set_index_from_dataframe_index: if index_name is not None and index_name not in self._data.columns: error_message = f'Index column {index_name} not found in dataset columns.' if index_name == 'index': error_message += ' If you attempted to use the dataframe index, set ' \ 'set_index_from_dataframe_index to True instead.' raise DeepchecksValueError(error_message) else: if index_name is not None: if isinstance(index_name, str): if index_name not in self._data.index.names: raise DeepchecksValueError(f'Index {index_name} not found in dataframe index level names.') elif isinstance(index_name, int): if index_name > (len(self._data.index.names) - 1): raise DeepchecksValueError(f'Dataframe index has less levels than {index_name + 1}.') else: raise DeepchecksValueError(f'When set_index_from_dataframe_index is True index_name can be None,' f' int or str, but found {type(index_name)}') # Assert that the requested datetime can be found if not set_datetime_from_dataframe_index: if datetime_name is not None and datetime_name not in self._data.columns: error_message = f'Datetime column {datetime_name} not found in dataset columns.' if datetime_name == 'date': error_message += ' If you attempted to use the dataframe index, ' \ 'set set_datetime_from_dataframe_index to True instead.' raise DeepchecksValueError(error_message) else: if datetime_name is not None: if isinstance(datetime_name, str): if datetime_name not in self._data.index.names: raise DeepchecksValueError( f'Datetime {datetime_name} not found in dataframe index level names.' ) elif isinstance(datetime_name, int): if datetime_name > (len(self._data.index.names) - 1): raise DeepchecksValueError(f'Dataframe index has less levels than {datetime_name + 1}.') else: raise DeepchecksValueError(f'When set_index_from_dataframe_index is True index_name can be None,' f' int or str, but found {type(index_name)}') self._datetime_column = self.get_datetime_column_from_index(datetime_name) if features is not None: difference = set(features) - set(self._data.columns) if len(difference) > 0: raise DeepchecksValueError('Features must be names of columns in dataframe. ' f'Features {difference} have not been ' 'found in input dataframe.') self._features = list(features) else: self._features = [x for x in self._data.columns if x not in {self._label_name, index_name if not set_index_from_dataframe_index else None, datetime_name if not set_datetime_from_dataframe_index else None}] if len(set(self._data.index)) != len(self._data.index): if set_index_from_dataframe_index: raise DeepchecksValueError('Selected index column has duplicate values.') else: self._data['original_df_index'] = self._data.index self._data.index = range(len(self._data.index)) warnings.warn('Dataframe index has duplicate indexes, setting index to [0,1..,n-1].') self._index_name = index_name self._set_index_from_dataframe_index = set_index_from_dataframe_index self._convert_datetime = convert_datetime self._datetime_name = datetime_name self._set_datetime_from_dataframe_index = set_datetime_from_dataframe_index self._datetime_args = datetime_args or {} self._max_categorical_ratio = max_categorical_ratio self._max_categories = max_categories if isinstance(dataset_name, str) or (dataset_name is None): self.name = dataset_name else: raise DeepchecksValueError('The dataset_name parameter accepts a string or None.') if self._label_name in self.features: raise DeepchecksValueError(f'label column {self._label_name} can not be a feature column') if self._datetime_name in self.features: raise DeepchecksValueError(f'datetime column {self._datetime_name} can not be a feature column') if self._index_name in self.features: raise DeepchecksValueError(f'index column {self._index_name} can not be a feature column') if cat_features is not None: if set(cat_features).intersection(set(self._features)) != set(cat_features): raise DeepchecksValueError('Categorical features must be a subset of features. ' f'Categorical features {set(cat_features) - set(self._features)} ' 'have not been found in feature list.') self._cat_features = list(cat_features) else: self._cat_features = self._infer_categorical_features( self._data, max_categorical_ratio=max_categorical_ratio, max_categories=max_categories, columns=self._features ) if ((self._datetime_name is not None) or self._set_datetime_from_dataframe_index) and convert_datetime: if self._set_datetime_from_dataframe_index: self._datetime_column = pd.to_datetime(self._datetime_column, **self._datetime_args) else: self._data[self._datetime_name] = pd.to_datetime(self._data[self._datetime_name], **self._datetime_args) if label_type in ['classification_label', 'regression_label']: warnings.warn(f'{label_type} value for label type is deprecated, allowed task types are multiclass,' f' binary and regression.', DeprecationWarning, stacklevel=2) self._label_type = TaskType.REGRESSION if label_type == 'regression_label' else TaskType.MULTICLASS elif label_type in [task.value for task in TaskType]: self._label_type = TaskType(label_type) elif label_type is not None: raise DeepchecksValueError(f'allowed value for label type are {[task.value for task in TaskType]},' f' received {label_type}.') else: self._label_type = None if label_classes is not None: warnings.warn('label_classes parameter is deprecated, use model_classes parameter on a check run function ' 'instead.', DeprecationWarning, stacklevel=2) unassigned_cols = [col for col in self._features if col not in self._cat_features] self._numerical_features = infer_numerical_features(self._data[unassigned_cols]) def from_numpy( cls: t.Type[TDataset], *args: np.ndarray, columns: t.Sequence[Hashable] = None, label_name: t.Hashable = None, **kwargs ) -> TDataset: """Create Dataset instance from numpy arrays. Parameters ---------- *args: np.ndarray Numpy array of data columns, and second optional numpy array of labels. columns : t.Sequence[Hashable] , default: None names for the columns. If none provided, the names that will be automatically assigned to the columns will be: 1 - n (where n - number of columns) label_name : t.Hashable , default: None labels column name. If none is provided, the name 'target' will be used. **kwargs : Dict additional arguments that will be passed to the main Dataset constructor. Returns ------- Dataset instance of the Dataset Raises ------ DeepchecksValueError if receives zero or more than two numpy arrays. if columns (args[0]) is not two dimensional numpy array. if labels (args[1]) is not one dimensional numpy array. if features array or labels array is empty. Examples -------- >>> import numpy >>> from deepchecks.tabular import Dataset >>> features = numpy.array([[0.25, 0.3, 0.3], ... [0.14, 0.75, 0.3], ... [0.23, 0.39, 0.1]]) >>> labels = numpy.array([0.1, 0.1, 0.7]) >>> dataset = Dataset.from_numpy(features, labels) Creating dataset only from features array. >>> dataset = Dataset.from_numpy(features) Passing additional arguments to the main Dataset constructor >>> dataset = Dataset.from_numpy(features, labels, max_categorical_ratio=0.5) Specifying features and label columns names. >>> dataset = Dataset.from_numpy( ... features, labels, ... columns=['sensor-1', 'sensor-2', 'sensor-3'], ... label_name='labels' ... ) """ if len(args) == 0 or len(args) > 2: raise DeepchecksValueError( "'from_numpy' constructor expecting to receive two numpy arrays (or at least one)." "First array must contains the columns and second the labels." ) columns_array = args[0] columns_error_message = ( "'from_numpy' constructor expecting columns (args[0]) " "to be not empty two dimensional array." ) if len(columns_array.shape) != 2: raise DeepchecksValueError(columns_error_message) if columns_array.shape[0] == 0 or columns_array.shape[1] == 0: raise DeepchecksValueError(columns_error_message) if columns is not None and len(columns) != columns_array.shape[1]: raise DeepchecksValueError( f'{columns_array.shape[1]} columns were provided ' f'but only {len(columns)} name(s) for them`s.' ) elif columns is None: columns = [str(index) for index in range(1, columns_array.shape[1] + 1)] if len(args) == 1: labels_array = None else: labels_array = args[1] if len(labels_array.shape) != 1 or labels_array.shape[0] == 0: raise DeepchecksValueError( "'from_numpy' constructor expecting labels (args[1]) " "to be not empty one dimensional array." ) labels_array = pd.Series(labels_array) if label_name: labels_array = labels_array.rename(label_name) return cls( df=pd.DataFrame(data=columns_array, columns=columns), label=labels_array, **kwargs ) def data(self) -> pd.DataFrame: """Return the data of dataset.""" return self._data def copy(self: TDataset, new_data: pd.DataFrame) -> TDataset: """Create a copy of this Dataset with new data. Parameters ---------- new_data (DataFrame): new data from which new dataset will be created Returns ------- Dataset new dataset instance """ # Filter out if columns were dropped features = [feat for feat in self._features if feat in new_data.columns] cat_features = [feat for feat in self.cat_features if feat in new_data.columns] label_name = self._label_name if self._label_name in new_data.columns else None label_type = None if self._label_type is None else self._label_type.value index = self._index_name if self._index_name in new_data.columns else None date = self._datetime_name if self._datetime_name in new_data.columns else None cls = type(self) return cls(new_data, features=features, cat_features=cat_features, label=label_name, index_name=index, set_index_from_dataframe_index=self._set_index_from_dataframe_index, datetime_name=date, set_datetime_from_dataframe_index=self._set_datetime_from_dataframe_index, convert_datetime=self._convert_datetime, max_categorical_ratio=self._max_categorical_ratio, max_categories=self._max_categories, label_type=label_type, dataset_name=self.name) def sample(self: TDataset, n_samples: t.Optional[int] = None, replace: bool = False, random_state: t.Optional[int] = None) -> TDataset: """Create a copy of the dataset object, with the internal dataframe being a sample of the original dataframe. Parameters ---------- n_samples : t.Optional[int] Number of samples to draw. replace : bool, default: False Whether to sample with replacement. random_state : t.Optional[int] , default None Random state. Returns ------- Dataset instance of the Dataset with sampled internal dataframe. """ if n_samples is None: return self n_samples = min(n_samples, len(self.data)) return self.copy(self.data.sample(n_samples, replace=replace, random_state=random_state)) def drop_na_labels(self) -> TDataset: """Create a copy of the dataset object without samples with missing labels.""" if not self.has_label(): return self return self.copy(self.data[self.label_col.notna()]) def n_samples(self) -> int: """Return number of samples in dataframe. Returns ------- int Number of samples in dataframe """ return self.data.shape[0] def label_type(self) -> t.Optional[TaskType]: """Return the label type. Returns ------- t.Optional[TaskType] Label type """ return self._label_type def train_test_split(self: TDataset, train_size: t.Union[int, float, None] = None, test_size: t.Union[int, float] = 0.25, random_state: int = 42, shuffle: bool = True, stratify: t.Union[t.List, pd.Series, np.ndarray, bool] = False ) -> t.Tuple[TDataset, TDataset]: """Split dataset into random train and test datasets. Parameters ---------- train_size : t.Union[int, float, None] , default: None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. test_size : t.Union[int, float] , default: 0.25 If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. random_state : int , default: 42 The random state to use for shuffling. shuffle : bool , default: True Whether to shuffle the data before splitting. stratify : t.Union[t.List, pd.Series, np.ndarray, bool] , default: False If True, data is split in a stratified fashion, using the class labels. If array-like, data is split in a stratified fashion, using this as class labels. Returns ------- Dataset Dataset containing train split data. Dataset Dataset containing test split data. """ if isinstance(stratify, bool): stratify = self.label_col if stratify else None train_df, test_df = train_test_split(self._data, test_size=test_size, train_size=train_size, random_state=random_state, shuffle=shuffle, stratify=stratify) return self.copy(train_df), self.copy(test_df) def _infer_categorical_features( df: pd.DataFrame, max_categorical_ratio: float, max_categories: int = None, columns: t.Optional[t.List[Hashable]] = None, ) -> t.List[Hashable]: """Infers which features are categorical by checking types and number of unique values. Parameters ---------- df: pd.DataFrame max_categorical_ratio: float max_categories: int , default: None columns: t.Optional[t.List[Hashable]] , default: None Returns ------- t.List[Hashable] Out of the list of feature names, returns list of categorical features """ categorical_columns = infer_categorical_features( df, max_categorical_ratio=max_categorical_ratio, max_categories=max_categories, columns=columns ) message = ('It is recommended to initialize Dataset with categorical features by doing ' '"Dataset(df, cat_features=categorical_list)". No categorical features were passed, therefore ' 'heuristically inferring categorical features in the data. ' f'{len(categorical_columns)} categorical features were inferred.') if len(categorical_columns) > 0: columns_to_print = categorical_columns[:7] message += ': ' + ', '.join(list(map(str, columns_to_print))) if len(categorical_columns) > len(columns_to_print): message += '... For full list use dataset.cat_features' get_logger().warning(message) return categorical_columns def is_categorical(self, col_name: Hashable) -> bool: """Check if a column is considered a category column in the dataset object. Parameters ---------- col_name : Hashable The name of the column in the dataframe Returns ------- bool If is categorical according to input numbers """ return col_name in self._cat_features def index_name(self) -> t.Optional[Hashable]: """If index column exists, return its name. Returns ------- t.Optional[Hashable] index name """ return self._index_name def index_col(self) -> t.Optional[pd.Series]: """Return index column. Index can be a named column or DataFrame index. Returns ------- t.Optional[pd.Series] If index column exists, returns a pandas Series of the index column. """ if self._set_index_from_dataframe_index is True: index_name = self.data.index.name or 'index' if self._index_name is None: return pd.Series(self.data.index.get_level_values(0), name=index_name, index=self.data.index) elif isinstance(self._index_name, (str, int)): return pd.Series(self.data.index.get_level_values(self._index_name), name=index_name, index=self.data.index) else: raise DeepchecksValueError(f'Don\'t know to handle index_name of type {type(self._index_name)}') elif self._index_name is not None: return self.data[self._index_name] else: # No meaningful index to use: Index column not configured, and _set_index_from_dataframe_index is False return def datetime_name(self) -> t.Optional[Hashable]: """If datetime column exists, return its name. Returns ------- t.Optional[Hashable] datetime name """ return self._datetime_name def get_datetime_column_from_index(self, datetime_name): """Retrieve the datetime info from the index if _set_datetime_from_dataframe_index is True.""" index_name = self.data.index.name or 'datetime' if datetime_name is None: return pd.Series(self.data.index.get_level_values(0), name=index_name, index=self.data.index) elif isinstance(datetime_name, (str, int)): return pd.Series(self.data.index.get_level_values(datetime_name), name=index_name, index=self.data.index) def datetime_col(self) -> t.Optional[pd.Series]: """Return datetime column if exists. Returns ------- t.Optional[pd.Series] Series of the datetime column """ if self._set_datetime_from_dataframe_index is True and self._datetime_column is not None: return self._datetime_column elif self._datetime_name is not None: return self.data[self._datetime_name] else: # No meaningful Datetime to use: # Datetime column not configured, and _set_datetime_from_dataframe_index is False return def label_name(self) -> Hashable: """If label column exists, return its name. Otherwise, throw an exception. Returns ------- Hashable: Label name """ if not self._label_name: raise DeepchecksNotSupportedError( 'Dataset does not contain a label column', html=f'Dataset does not contain a label column. see {_get_dataset_docs_tag()}' ) return self._label_name def features(self) -> t.List[Hashable]: """Return list of feature names. Returns ------- t.List[Hashable] List of feature names. """ return list(self._features) def features_columns(self) -> pd.DataFrame: """Return DataFrame containing only the features defined in the dataset, if features are empty raise error. Returns ------- pd.DataFrame """ self.assert_features() return self.data[self.features] def label_col(self) -> pd.Series: """Return Series of the label defined in the dataset, if label is not defined raise error. Returns ------- pd.Series """ return self.data[self.label_name] def cat_features(self) -> t.List[Hashable]: """Return list of categorical feature names. Returns ------- t.List[Hashable] List of categorical feature names. """ return list(self._cat_features) def numerical_features(self) -> t.List[Hashable]: """Return list of numerical feature names. Returns ------- t.List[Hashable] List of numerical feature names. """ return list(self._numerical_features) def classes_in_label_col(self) -> t.Tuple[str, ...]: """Return the classes from label column in sorted list. if no label column defined, return empty list. Returns ------- t.Tuple[str, ...] Sorted classes """ if self.has_label(): return tuple(sorted(self.data[self.label_name].dropna().unique())) else: return tuple() def columns_info(self) -> t.Dict[Hashable, str]: """Return the role and logical type of each column. Returns ------- t.Dict[Hashable, str] Directory of a column and its role """ columns = {} for column in self.data.columns: if column == self._index_name: value = 'index' elif column == self._datetime_name: value = 'date' elif column == self._label_name: value = 'label' elif column in self._features: if column in self.cat_features: value = 'categorical feature' elif column in self.numerical_features: value = 'numerical feature' else: value = 'other feature' else: value = 'other' columns[column] = value return columns def has_label(self) -> bool: """Return True if label column exists. Returns ------- bool True if label column exists. """ return self._label_name is not None def assert_features(self): """Check if features are defined (not empty) and if not raise error. Raises ------ DeepchecksNotSupportedError """ if not self.features: raise DeepchecksNotSupportedError( 'Dataset does not contain any feature columns', html=f'Dataset does not contain any feature columns. see {_get_dataset_docs_tag()}' ) def assert_datetime(self): """Check if datetime is defined and if not raise error. Raises ------ DeepchecksNotSupportedError """ if not (self._set_datetime_from_dataframe_index or self._datetime_name): raise DatasetValidationError( 'Dataset does not contain a datetime', html=f'Dataset does not contain a datetime. see {_get_dataset_docs_tag()}' ) def assert_index(self): """Check if index is defined and if not raise error. Raises ------ DeepchecksNotSupportedError """ if not (self._set_index_from_dataframe_index or self._index_name): raise DatasetValidationError( 'Dataset does not contain an index', html=f'Dataset does not contain an index. see {_get_dataset_docs_tag()}' ) def select( self: TDataset, columns: t.Union[Hashable, t.List[Hashable], None] = None, ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None, keep_label: bool = False ) -> TDataset: """Filter dataset columns by given params. Parameters ---------- columns : Union[Hashable, List[Hashable], None] Column names to keep. ignore_columns : Union[Hashable, List[Hashable], None] Column names to drop. Returns ------- TDataset horizontally filtered dataset Raises ------ DeepchecksValueError In case one of columns given don't exists raise error """ if ( keep_label and isinstance(columns, list) and self.label_name not in columns ): columns = columns[:] columns.append(self.label_name) new_data = select_from_dataframe(self._data, columns, ignore_columns) if new_data.equals(self.data): return self else: return self.copy(new_data) def cast_to_dataset(cls, obj: t.Any) -> 'Dataset': """Verify Dataset or transform to Dataset. Function verifies that provided value is a non-empty instance of Dataset, otherwise raises an exception, but if the 'cast' flag is set to True it will also try to transform provided value to the Dataset instance. Parameters ---------- obj value to verify Raises ------ DeepchecksValueError if the provided value is not a Dataset instance; if the provided value cannot be transformed into Dataset instance; """ if isinstance(obj, pd.DataFrame): get_logger().warning( 'Received a "pandas.DataFrame" instance. It is recommended to pass a "deepchecks.tabular.Dataset" ' 'instance by initializing it with the data and metadata, ' 'for example by doing "Dataset(dataframe, label=label, cat_features=cat_features)"' ) obj = Dataset(obj) elif not isinstance(obj, Dataset): raise DeepchecksValueError( f'non-empty instance of Dataset or DataFrame was expected, instead got {type(obj).__name__}' ) return obj.copy(obj.data) def datasets_share_features(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same features. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check features dtypes? features_names = set(datasets[0].features) for ds in datasets[1:]: if features_names != set(ds.features): return False return True def datasets_share_categorical_features(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same categorical features. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same categorical features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check features dtypes? first = set(datasets[0].cat_features) for ds in datasets[1:]: features = set(ds.cat_features) if first != features: return False return True def datasets_share_label(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same label column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same categorical features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check label dtypes? label_name = datasets[0].label_name for ds in datasets[1:]: if ds.label_name != label_name: return False return True def datasets_share_index(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same index column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same index column, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" first_ds = datasets[0] for ds in datasets[1:]: if (ds._index_name != first_ds._index_name or ds._set_index_from_dataframe_index != first_ds._set_index_from_dataframe_index): return False return True def datasets_share_date(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same date column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same date column, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" first_ds = datasets[0] for ds in datasets[1:]: if (ds._datetime_name != first_ds._datetime_name or ds._set_datetime_from_dataframe_index != first_ds._set_datetime_from_dataframe_index): return False return True def _dataset_description(self) -> pd.DataFrame: data = self.data features = self.features categorical_features = self.cat_features numerical_features = self.numerical_features label_column = t.cast(pd.Series, data[self.label_name]) if self.has_label() else None index_column = self.index_col datetime_column = self.datetime_col label_name = None index_name = None datetime_name = None dataset_columns_info = [] if index_column is not None: index_name = index_column.name dataset_columns_info.append([ index_name, infer_dtype(index_column, skipna=True), 'Index', 'set from dataframe index' if self._set_index_from_dataframe_index is True else '' ]) if datetime_column is not None: datetime_name = datetime_column.name dataset_columns_info.append([ datetime_name, infer_dtype(datetime_column, skipna=True), 'Datetime', 'set from DataFrame index' if self._set_datetime_from_dataframe_index is True else '' ]) if label_column is not None: label_name = label_column.name dataset_columns_info.append([ label_name, infer_dtype(label_column, skipna=True), '' if self.label_type is None else self.label_type.value.capitalize() + " LABEL", '' ]) all_columns = pd.Series(features + list(self.data.columns)).unique() for feature_name in t.cast(t.Iterable[str], all_columns): if feature_name in (index_name, datetime_name, label_name): continue feature_dtype = infer_dtype(data[feature_name], skipna=True) if feature_name in categorical_features: kind = 'Categorical Feature' elif feature_name in numerical_features: kind = 'Numerical Feature' elif feature_name in features: kind = 'Other Feature' else: kind = 'Dataset Column' dataset_columns_info.append([feature_name, feature_dtype, kind, '']) return pd.DataFrame( data=dataset_columns_info, columns=['Column', 'DType', 'Kind', 'Additional Info'], ) def __repr__( self, max_cols: int = 8, max_rows: int = 10, fmt: DatasetReprFmt = 'string' ) -> str: """Represent a dataset instance.""" info = self._dataset_description() columns = list(info[info['Additional Info'] == '']['Column']) data = self.data.loc[:, columns] # Sorting horizontally kwargs = dict(max_cols=max_cols, col_space=15) if fmt == 'string': features_info = info.to_string(max_rows=50, **kwargs) data_to_show = data.to_string(show_dimensions=True, max_rows=max_rows, **kwargs) title_template = '{:-^40}\n\n' return ''.join(( title_template.format(' Dataset Description '), f'{features_info}\n\n\n', title_template.format(' Dataset Content '), f'{data_to_show}\n\n', )) elif fmt == 'html': features_info = info.to_html(notebook=True, max_rows=50, **kwargs) data_to_show = data.to_html(notebook=True, max_rows=max_rows, **kwargs) return ''.join([ '<h4><b>Dataset Description</b></h4>', features_info, '<h4><b>Dataset Content</b></h4>', data_to_show ]) else: raise ValueError( '"fmt" parameter supports only next values [string, html]' ) def _ipython_display_(self): display_html(HTML(self.__repr__(fmt='html'))) def __len__(self) -> int: """Return number of samples in the member dataframe. Returns ------- int """ return self.n_samples def len_when_sampled(self, n_samples: int): """Return number of samples in the sampled dataframe this dataset is sampled with n_samples samples.""" return min(len(self), n_samples) def is_sampled(self, n_samples: int): """Return True if the dataset number of samples will decrease when sampled with n_samples samples.""" if n_samples is None: return False return len(self) > n_samples def format_number(x, floating_point: int = 2) -> str: """Format number for elegant display. Parameters ---------- x Number to be displayed floating_point : int , default: 2 Number of floating points to display Returns ------- str String of beautified number """ def add_commas(x): return f'{x:,}' # yes this actually formats the number 1000 to "1,000" if np.isnan(x): return 'nan' # 0 is lost in the next if case, so we have it here as a special use-case if x == 0: return '0' # If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3. if abs(x) < 10 ** (-floating_point): return f'{Decimal(x):.{floating_point}E}' # If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer: if round(x) == round(x, floating_point): return add_commas(round(x)) # If not, return as a float, but don't print unnecessary zeros at end: else: ret_x = round(x, floating_point) return add_commas(ret_x).rstrip('0') class Hashable(Protocol): """Trait for any hashable type that also defines comparison operators.""" def __hash__(self) -> int: # noqa: D105 ... def __le__(self, __value) -> bool: # noqa: D105 ... def __lt__(self, __value) -> bool: # noqa: D105 ... def __ge__(self, __value) -> bool: # noqa: D105 ... def __gt__(self, __value) -> bool: # noqa: D105 ... def __eq__(self, __value) -> bool: # noqa: D105 ... The provided code snippet includes necessary dependencies for implementing the `partition_column` function. Write a Python function `def partition_column( dataset: Dataset, column_name: Hashable, max_segments: int = 10, max_cat_proportions: float = 0.9, ) -> List[DeepchecksFilter]` to solve the following problem: Split column into segments. For categorical we'll have a max of max_segments + 1, for the 'Others'. We take the largest categories which cumulative percent in data is equal/larger than `max_cat_proportions`. the rest will go to 'Others' even if less than max_segments. For numerical we split into maximum number of `max_segments` quantiles. if some of the quantiles are duplicates then we merge them into the same segment range (so not all ranges necessarily will have same size). Parameters ---------- dataset : Dataset column_name : Hashable column to partition. max_segments : int, default: 10 maximum number of segments to split into. max_cat_proportions : float , default: 0.9 (for categorical) ratio to aggregate largest values to show. Returns ------- List[DeepchecksFilter] Here is the function: def partition_column( dataset: Dataset, column_name: Hashable, max_segments: int = 10, max_cat_proportions: float = 0.9, ) -> List[DeepchecksFilter]: """Split column into segments. For categorical we'll have a max of max_segments + 1, for the 'Others'. We take the largest categories which cumulative percent in data is equal/larger than `max_cat_proportions`. the rest will go to 'Others' even if less than max_segments. For numerical we split into maximum number of `max_segments` quantiles. if some of the quantiles are duplicates then we merge them into the same segment range (so not all ranges necessarily will have same size). Parameters ---------- dataset : Dataset column_name : Hashable column to partition. max_segments : int, default: 10 maximum number of segments to split into. max_cat_proportions : float , default: 0.9 (for categorical) ratio to aggregate largest values to show. Returns ------- List[DeepchecksFilter] """ column = dataset.data[column_name] if column_name in dataset.numerical_features: percentile_values = numeric_segmentation_edges(column, max_segments) # If for some reason only single value in the column (and column not categorical) we will get single item if len(percentile_values) == 1: f = lambda df, val=percentile_values[0]: (df[column_name] == val) label = str(percentile_values[0]) return [DeepchecksFilter([f], label)] filters = [] for start, end in zip(percentile_values[:-1], percentile_values[1:]): # In case of the last range, the end is closed. if end == percentile_values[-1]: f = lambda df, a=start, b=end: (df[column_name] >= a) & (df[column_name] <= b) label = f'[{format_number(start)} - {format_number(end)}]' else: f = lambda df, a=start, b=end: (df[column_name] >= a) & (df[column_name] < b) label = f'[{format_number(start)} - {format_number(end)})' filters.append(DeepchecksFilter([f], label)) return filters elif column_name in dataset.cat_features: # Get sorted histogram cat_hist_dict = column.value_counts() # Get index of last value in histogram to show n_large_cats = largest_category_index_up_to_ratio(cat_hist_dict, max_segments, max_cat_proportions) filters = [] for i in range(n_large_cats): f = lambda df, val=cat_hist_dict.index[i]: df[column_name] == val filters.append(DeepchecksFilter([f], str(cat_hist_dict.index[i]))) if len(cat_hist_dict) > n_large_cats: f = lambda df, values=cat_hist_dict.index[:n_large_cats]: ~df[column_name].isin(values) filters.append(DeepchecksFilter([f], 'Others')) return filters
Split column into segments. For categorical we'll have a max of max_segments + 1, for the 'Others'. We take the largest categories which cumulative percent in data is equal/larger than `max_cat_proportions`. the rest will go to 'Others' even if less than max_segments. For numerical we split into maximum number of `max_segments` quantiles. if some of the quantiles are duplicates then we merge them into the same segment range (so not all ranges necessarily will have same size). Parameters ---------- dataset : Dataset column_name : Hashable column to partition. max_segments : int, default: 10 maximum number of segments to split into. max_cat_proportions : float , default: 0.9 (for categorical) ratio to aggregate largest values to show. Returns ------- List[DeepchecksFilter]
690
from collections import defaultdict from copy import deepcopy from typing import Callable, List, Optional, Tuple, Union import numpy as np import pandas as pd from sklearn.tree import _tree from deepchecks.tabular.dataset import Dataset from deepchecks.utils.strings import format_number from deepchecks.utils.typing import Hashable class DeepchecksBaseFilter(DeepchecksFilter): """Extend DeepchecksFilter class for feature range based filters. Parameters ---------- filters: dict, default: None A dictionary in containing feature names as keys and the filtering range as value. filter_functions : List[Callable], default: None List of functions that receive a DataFrame and return a filter on it. If None, no filter is applied label : str, default = '' Name of the filter """ def __init__(self, filters: dict = None, filter_functions: List[Callable] = None, label: str = ''): if filters is None: filters = defaultdict() self.filters = filters super().__init__(filter_functions, label) def add_filter(self, feature_name: str, threshold: float, greater_then: bool = True): """Add a filter by intersecting it with existing filter.""" if greater_then: filter_func = [lambda df, a=threshold: df[feature_name] > a] if feature_name in self.filters.keys(): original_range = self.filters[feature_name] self.filters[feature_name] = [max(threshold, original_range[0]), original_range[1]] else: self.filters[feature_name] = [threshold, np.inf] else: filter_func = [lambda df, a=threshold: df[feature_name] <= a] if feature_name in self.filters.keys(): original_range = self.filters[feature_name] self.filters[feature_name] = [original_range[0], min(threshold, original_range[1])] else: self.filters[feature_name] = [np.NINF, threshold] self.filter_functions += filter_func return self def copy(self): """Return a copy of the object.""" return DeepchecksBaseFilter(self.filters.copy(), self.filter_functions.copy(), self.label) The provided code snippet includes necessary dependencies for implementing the `convert_tree_leaves_into_filters` function. Write a Python function `def convert_tree_leaves_into_filters(tree, feature_names: List[str]) -> List[DeepchecksBaseFilter]` to solve the following problem: Extract the leaves from a sklearn tree and covert them into DeepchecksBaseFilter. The function goes over the tree from root to leaf and concatenates (by intersecting) the relevant filters along the way. The function returns a list in which each element is a DeepchecksFilter representing the path between the root to a different leaf. Parameters ---------- tree : A sklearn tree. The tree_ property of a sklearn decision tree. feature_names : List[str] The feature names for elements within the tree. Normally it is the column names of the data frame the tree was trained on. Returns ------- List[DeepchecksFilter]: A list of filters describing the leaves of the tree. Here is the function: def convert_tree_leaves_into_filters(tree, feature_names: List[str]) -> List[DeepchecksBaseFilter]: """Extract the leaves from a sklearn tree and covert them into DeepchecksBaseFilter. The function goes over the tree from root to leaf and concatenates (by intersecting) the relevant filters along the way. The function returns a list in which each element is a DeepchecksFilter representing the path between the root to a different leaf. Parameters ---------- tree : A sklearn tree. The tree_ property of a sklearn decision tree. feature_names : List[str] The feature names for elements within the tree. Normally it is the column names of the data frame the tree was trained on. Returns ------- List[DeepchecksFilter]: A list of filters describing the leaves of the tree. """ node_to_feature = [feature_names[feature_idx] if feature_idx != _tree.TREE_UNDEFINED else None for feature_idx in tree.feature] def recurse(node_idx: int, filter_of_node: DeepchecksBaseFilter): if tree.feature[node_idx] != _tree.TREE_UNDEFINED: left_node_filter = filter_of_node.copy().add_filter(node_to_feature[node_idx], tree.threshold[node_idx], greater_then=False) right_node_filter = filter_of_node.copy().add_filter(node_to_feature[node_idx], tree.threshold[node_idx]) return recurse(tree.children_left[node_idx], left_node_filter) + \ recurse(tree.children_right[node_idx], right_node_filter) else: return [filter_of_node] filters_to_leaves = recurse(0, DeepchecksBaseFilter()) return filters_to_leaves
Extract the leaves from a sklearn tree and covert them into DeepchecksBaseFilter. The function goes over the tree from root to leaf and concatenates (by intersecting) the relevant filters along the way. The function returns a list in which each element is a DeepchecksFilter representing the path between the root to a different leaf. Parameters ---------- tree : A sklearn tree. The tree_ property of a sklearn decision tree. feature_names : List[str] The feature names for elements within the tree. Normally it is the column names of the data frame the tree was trained on. Returns ------- List[DeepchecksFilter]: A list of filters describing the leaves of the tree.
691
from typing import Any, Callable, Dict, Hashable, List, Optional, Tuple import numpy as np import pandas as pd import plotly.express as px from category_encoders import TargetEncoder from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import r2_score from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeRegressor from deepchecks import tabular from deepchecks.core.errors import DeepchecksProcessError from deepchecks.tabular import Dataset from deepchecks.tabular.utils.feature_importance import _calculate_feature_importance from deepchecks.tabular.utils.task_type import TaskType from deepchecks.utils.plot import colors from deepchecks.utils.strings import format_number, format_percent def create_error_regression_model(numeric_features, cat_features, random_state=42) -> Tuple[Pipeline, List[Hashable]]: """Create regression model to calculate error.""" numeric_transformer = SimpleImputer() categorical_transformer = Pipeline( steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())] ) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, cat_features), ] ) return Pipeline(steps=[ ('preprocessing', preprocessor), ('model', RandomForestRegressor(max_depth=4, n_jobs=-1, random_state=random_state)) ]), numeric_features + cat_features class DeepchecksProcessError(DeepchecksBaseError): """Exception class that represents an issue with a process.""" pass def _calculate_feature_importance( model: t.Any, dataset: t.Union['tabular.Dataset', pd.DataFrame], model_classes, observed_classes, task_type, force_permutation: bool = False, permutation_kwargs: t.Dict[str, t.Any] = None, ) -> t.Tuple[pd.Series, str]: """Calculate features effect on the label. Parameters ---------- model : t.Any a fitted model dataset : t.Union['tabular.Dataset', pd.DataFrame] dataset used to fit the model model_classes possible classes output for model. None for regression tasks. observed_classes Observed classes in the data. None for regression tasks. task_type The task type of the model. force_permutation : bool, default: False force permutation importance calculation permutation_kwargs : t.Dict[str, t.Any] , default: None kwargs for permutation importance calculation Returns ------- Tuple[Series, str]: first item - feature importance normalized to 0-1 indexed by feature names, second item - type of feature importance calculation (types: `permutation_importance`, `feature_importances_`, `coef_`) Raises ------ NotFittedError Call 'fit' with appropriate arguments before using this estimator. DeepchecksValueError if model validation failed. if it was not possible to calculate features importance. NumberOfFeaturesLimitError if the number of features limit were exceeded. """ if task_type == TaskType.REGRESSION: model_classes = None observed_classes = None permutation_kwargs = permutation_kwargs or {} permutation_kwargs['random_state'] = permutation_kwargs.get('random_state', 42) validate_model(dataset, model) permutation_failure = None calc_type = None importance = None if force_permutation: if isinstance(dataset, pd.DataFrame): raise errors.DeepchecksValueError('Cannot calculate permutation feature importance on a pandas Dataframe. ' 'In order to force permutation feature importance, please use the Dataset' ' object.') else: importance = _calc_permutation_importance(model, dataset, model_classes, observed_classes, task_type, **permutation_kwargs) calc_type = 'permutation_importance' # If there was no force permutation, or if it failed while trying to calculate importance, # we don't take built-in importance in pipelines because the pipeline is changing the features # (for example one-hot encoding) which leads to the inner model features # being different from the original dataset features if importance is None and not isinstance(model, Pipeline): # Get the actual model in case of pipeline importance, calc_type = _built_in_importance(model, dataset) # If found importance and was force permutation failure before, show warning if importance is not None and permutation_failure: get_logger().warning(permutation_failure) # If there was no permutation failure and no importance on the model, using permutation anyway if importance is None and permutation_failure is None and isinstance(dataset, tabular.Dataset): if not permutation_kwargs.get('skip_messages', False): if isinstance(model, Pipeline): pre_text = 'Cannot use model\'s built-in feature importance on a Scikit-learn Pipeline,' else: pre_text = 'Could not find built-in feature importance on the model,' get_logger().warning('%s using permutation feature importance calculation instead', pre_text) importance = _calc_permutation_importance(model, dataset, model_classes, observed_classes, task_type, **permutation_kwargs) calc_type = 'permutation_importance' # If after all importance is still none raise error if importance is None: # FIXME: better message raise errors.DeepchecksValueError('Was not able to calculate features importance') return importance.fillna(0), calc_type class TaskType(Enum): """Enum containing supported task types.""" REGRESSION = 'regression' BINARY = 'binary' MULTICLASS = 'multiclass' def format_number(x, floating_point: int = 2) -> str: """Format number for elegant display. Parameters ---------- x Number to be displayed floating_point : int , default: 2 Number of floating points to display Returns ------- str String of beautified number """ def add_commas(x): return f'{x:,}' # yes this actually formats the number 1000 to "1,000" if np.isnan(x): return 'nan' # 0 is lost in the next if case, so we have it here as a special use-case if x == 0: return '0' # If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3. if abs(x) < 10 ** (-floating_point): return f'{Decimal(x):.{floating_point}E}' # If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer: if round(x) == round(x, floating_point): return add_commas(round(x)) # If not, return as a float, but don't print unnecessary zeros at end: else: ret_x = round(x, floating_point) return add_commas(ret_x).rstrip('0') The provided code snippet includes necessary dependencies for implementing the `model_error_contribution` function. Write a Python function `def model_error_contribution(train_dataset: pd.DataFrame, train_scores: pd.Series, test_dataset: pd.DataFrame, test_scores: pd.Series, numeric_features: List, categorical_features: List, min_error_model_score=0.5, random_state=42) -> Tuple[pd.Series, pd.Series]` to solve the following problem: Calculate features contributing to model error. Here is the function: def model_error_contribution(train_dataset: pd.DataFrame, train_scores: pd.Series, test_dataset: pd.DataFrame, test_scores: pd.Series, numeric_features: List, categorical_features: List, min_error_model_score=0.5, random_state=42) -> Tuple[pd.Series, pd.Series]: """Calculate features contributing to model error.""" # Create and fit model to predict the per sample error error_model, new_feature_order = create_error_regression_model(numeric_features, categorical_features, random_state=random_state) error_model.fit(train_dataset, y=train_scores) # Check if fitted model is good enough error_model_predicted = error_model.predict(test_dataset) error_model_score = r2_score(test_scores, error_model_predicted) # This check should be ignored if no information gained from the error model (low r2_score) if error_model_score < min_error_model_score: raise DeepchecksProcessError(f'Unable to train meaningful error model ' f'(r^2 score: {format_number(error_model_score)})') error_fi, _ = _calculate_feature_importance(error_model, Dataset(test_dataset, test_scores, cat_features=categorical_features), model_classes=None, observed_classes=None, task_type=TaskType.REGRESSION, permutation_kwargs={'random_state': random_state, 'skip_messages': True}) error_fi.index = new_feature_order error_fi.sort_values(ascending=False, inplace=True) return error_fi, error_model_predicted
Calculate features contributing to model error.
692
from typing import Any, Callable, Dict, Hashable, List, Optional, Tuple import numpy as np import pandas as pd import plotly.express as px from category_encoders import TargetEncoder from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import r2_score from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeRegressor from deepchecks import tabular from deepchecks.core.errors import DeepchecksProcessError from deepchecks.tabular import Dataset from deepchecks.tabular.utils.feature_importance import _calculate_feature_importance from deepchecks.tabular.utils.task_type import TaskType from deepchecks.utils.plot import colors from deepchecks.utils.strings import format_number, format_percent def error_model_display(error_fi: pd.Series, error_model_predicted: pd.Series, dataset: tabular.Dataset, # score related parameters model: Optional[Any], scorer: Optional[Callable], # Output related parameters max_features_to_show: int, min_feature_contribution: float, n_display_samples: int, min_segment_size: float, random_state: int, with_display: bool) -> Tuple[List, Dict]: """Calculate and display segments with large error discrepancies. Parameters ---------- error_fi : pd.Series Feature Importances of the error model error_model_predicted : pd.Series Predictions of the values of the error model dataset : tabular.Dataset Dataset to create display from model : Optional[Any] Original model for calculating the score on tabular data (Must come with scorer) scorer : Optional[Callable] Scorer to calculate the output values of the segments (Must come with model) max_features_to_show : int Maximum number of features to output. min_feature_contribution : float Minimum value to consider a feature to output. n_display_samples : int Maximum number of values to represent in the display min_segment_size : float Minimum segment size to consider. random_state: int Random seed Returns ------- Tuple[List, Dict]: List of display elements and Dict of segment description """ n_samples_display = min(n_display_samples, len(dataset)) error_col_name = 'Deepchecks model error' display_error = pd.Series(error_model_predicted, name=error_col_name, index=dataset.data.index) display = [] value = {'scorer_name': scorer.name if scorer else None, 'feature_segments': {}} weak_color = '#d74949' ok_color = colors['Test'] for feature in error_fi.keys()[:max_features_to_show]: if error_fi[feature] < min_feature_contribution: # pylint: disable=unsubscriptable-object break data = pd.concat([dataset.data[feature], display_error], axis=1) value['feature_segments'][feature] = {} segment1_details = {} segment2_details = {} # Violin plot for categorical features, scatter plot for numerical features if feature in dataset.cat_features: # find categories with the weakest performance # if we use a scorer we want max mean to be in ok group and min mean when using error error_per_segment_ser = ( data.groupby(feature) .agg(['mean', 'count'])[error_col_name] .sort_values('mean', ascending=not scorer) ) cum_sum_ratio = error_per_segment_ser['count'].cumsum() / error_per_segment_ser['count'].sum() # Partition data into two groups - weak and ok: # In cum_sum_ratio the index is the categories sorted from "weakest" (the highest model error) to # strongest, and the value is the cumulative fraction of all data. # The weak segment contains all the weakest categories until they reach together a fraction of data of at # least min_segment_size. first_weakest_category_to_pass_min_segment_size = np.where(cum_sum_ratio.values >= min_segment_size)[0][0] in_segment_indices = np.arange(len(cum_sum_ratio)) <= first_weakest_category_to_pass_min_segment_size weak_categories = error_per_segment_ser.index[in_segment_indices] ok_categories = error_per_segment_ser.index[~in_segment_indices] # Calculate score for each group and assign label and color if scorer: ok_name_feature, segment1_details = get_segment_details(model, scorer, dataset, data[feature].isin(ok_categories)) else: ok_name_feature, segment1_details = get_segment_details_using_error(error_col_name, data, data[feature].isin(ok_categories)) if with_display: color_map = {ok_name_feature: ok_color} if len(weak_categories) >= 1: if scorer: weak_name_feature, segment2_details = get_segment_details(model, scorer, dataset, data[feature].isin(weak_categories)) else: weak_name_feature, segment1_details = \ get_segment_details_using_error(error_col_name, data, data[feature].isin(weak_categories)) color_map[weak_name_feature] = weak_color else: weak_name_feature = None replace_dict = {x: weak_name_feature if x in weak_categories else ok_name_feature for x in error_per_segment_ser.index} color_col = data[feature].replace(replace_dict) # Display display.append(px.violin( data, y=error_col_name, x=feature, title=f'Segmentation of error by feature: {feature}', box=False, labels={error_col_name: 'model error', 'color': 'Weak & OK Segments'}, color=color_col, color_discrete_map=color_map )) elif feature in dataset.numerical_features: # sample data for display np.random.seed(random_state) sampling_idx = np.random.choice(range(len(data)), size=n_samples_display, replace=False) data = data.iloc[sampling_idx] # Train tree to partition segments according to the model error tree_partitioner = DecisionTreeRegressor( max_depth=1, min_samples_leaf=min_segment_size + np.finfo(float).eps, random_state=random_state ).fit(data[[feature]], data[error_col_name]) if len(tree_partitioner.tree_.threshold) > 1: threshold = tree_partitioner.tree_.threshold[0] # pylint: disable=unsubscriptable-object color_col = data[feature].ge(threshold) sampled_dataset = dataset.data.iloc[sampling_idx] if scorer: segment1_text, segment1_details = get_segment_details(model, scorer, dataset.copy(sampled_dataset), color_col) segment2_text, segment2_details = get_segment_details(model, scorer, dataset.copy(sampled_dataset), ~color_col) segment1_ok = segment1_details['score'] >= segment2_details['score'] color_col = color_col.replace([True, False], [segment1_text, segment2_text]) else: # If there is no scorer, we use the error calculation to describe the segments # Colors are flipped, because lower error is better segment1_text, segment1_details = get_segment_details_using_error(error_col_name, data, ~color_col) segment2_text, segment2_details = get_segment_details_using_error(error_col_name, data, color_col) segment1_ok = segment1_details['score'] < segment2_details['score'] color_col = color_col.replace([False, True], [segment1_text, segment2_text]) # Segment with lower performance is assigned to the weak color if segment1_ok: color_map = {segment1_text: ok_color, segment2_text: weak_color} category_order = [segment2_text, segment1_text] else: color_map = {segment1_text: weak_color, segment2_text: ok_color} category_order = [segment1_text, segment2_text] else: color_col = data[error_col_name] color_map = None category_order = None if with_display: display.append(px.scatter(data, x=feature, y=error_col_name, color=color_col, title=f'Segmentation of error by the feature: {feature}', labels={error_col_name: 'model error', 'color': 'Weak & OK Segments'}, category_orders={'color': category_order}, color_discrete_map=color_map)) if segment1_details: value['feature_segments'][feature]['segment1'] = segment1_details if segment2_details: value['feature_segments'][feature]['segment2'] = segment2_details return display if with_display else None, value The provided code snippet includes necessary dependencies for implementing the `error_model_display_dataframe` function. Write a Python function `def error_model_display_dataframe(error_fi: pd.Series, error_model_predicted: pd.Series, dataset: pd.DataFrame, cat_features: List, max_features_to_show: int, min_feature_contribution: float, n_display_samples: int, min_segment_size: float, random_state: int, with_display: bool)` to solve the following problem: Wrap dataframe with tabular.Dataset for error_model_display with no scorer. Here is the function: def error_model_display_dataframe(error_fi: pd.Series, error_model_predicted: pd.Series, dataset: pd.DataFrame, cat_features: List, max_features_to_show: int, min_feature_contribution: float, n_display_samples: int, min_segment_size: float, random_state: int, with_display: bool): """Wrap dataframe with tabular.Dataset for error_model_display with no scorer.""" return error_model_display(error_fi, error_model_predicted, tabular.Dataset(dataset, cat_features=cat_features), None, None, max_features_to_show, min_feature_contribution, n_display_samples, min_segment_size, random_state, with_display)
Wrap dataframe with tabular.Dataset for error_model_display with no scorer.
693
import logging import warnings _logger = logging.getLogger('deepchecks') _logger.addHandler(_stream_handler) _logger.setLevel(logging.INFO) The provided code snippet includes necessary dependencies for implementing the `set_verbosity` function. Write a Python function `def set_verbosity(level: int)` to solve the following problem: Set the deepchecks logger verbosity level. Same as doing logging.getLogger('deepchecks').setLevel(level). Control the package wide log level and the progrees bars - progress bars are level INFO. Examples -------- >>> import logging >>> import deepchecks >>> # will disable progress bars >>> deepchecks.set_verbosity(logging.WARNING) >>> # will disable also any warnings deepchecks print >>> deepchecks.set_verbosity(logging.ERROR) Here is the function: def set_verbosity(level: int): """Set the deepchecks logger verbosity level. Same as doing logging.getLogger('deepchecks').setLevel(level). Control the package wide log level and the progrees bars - progress bars are level INFO. Examples -------- >>> import logging >>> import deepchecks >>> # will disable progress bars >>> deepchecks.set_verbosity(logging.WARNING) >>> # will disable also any warnings deepchecks print >>> deepchecks.set_verbosity(logging.ERROR) """ _logger.setLevel(level) if level >= logging.ERROR: warnings.filterwarnings( action='ignore', message=r'.*', module=r'deepchecks.*' )
Set the deepchecks logger verbosity level. Same as doing logging.getLogger('deepchecks').setLevel(level). Control the package wide log level and the progrees bars - progress bars are level INFO. Examples -------- >>> import logging >>> import deepchecks >>> # will disable progress bars >>> deepchecks.set_verbosity(logging.WARNING) >>> # will disable also any warnings deepchecks print >>> deepchecks.set_verbosity(logging.ERROR)
694
import random from dataclasses import dataclass, field from functools import partial from pathlib import Path import jax import jax.numpy as jnp import numpy as np from braceexpand import braceexpand from datasets import Dataset, load_dataset from .model.text import TextNormalizer def blank_caption_function(example, text_column, blank_caption_prob, rng=None): if ( blank_caption_prob and (rng.random() if rng is not None else np.random.random()) < blank_caption_prob ): example[text_column] = "" return example
null
695
import random from dataclasses import dataclass, field from functools import partial from pathlib import Path import jax import jax.numpy as jnp import numpy as np from braceexpand import braceexpand from datasets import Dataset, load_dataset from .model.text import TextNormalizer def normalize_function(example, text_column, text_normalizer): example[text_column] = text_normalizer(example[text_column]) return example
null
696
import random from dataclasses import dataclass, field from functools import partial from pathlib import Path import jax import jax.numpy as jnp import numpy as np from braceexpand import braceexpand from datasets import Dataset, load_dataset from .model.text import TextNormalizer def filter_function( example, min_clip_score, max_clip_score, clip_score_column, filter_column, filter_value, ): if min_clip_score is not None and example[clip_score_column] < min_clip_score: return False if max_clip_score is not None and example[clip_score_column] > max_clip_score: return False if filter_column is not None and example[filter_column] != filter_value: return False return True
null
697
import random from dataclasses import dataclass, field from functools import partial from pathlib import Path import jax import jax.numpy as jnp import numpy as np from braceexpand import braceexpand from datasets import Dataset, load_dataset from .model.text import TextNormalizer def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = np.zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1] shifted_input_ids[:, 0] = decoder_start_token_id return shifted_input_ids def preprocess_function( examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id, ): inputs = examples[text_column] # Setting padding="max_length" as we need fixed length inputs for jitted functions model_inputs = tokenizer( inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="np", ) # set up targets # Note: labels correspond to our target indices # decoder input ids are the same but shifted to the right with bos at the beginning (and without last token) labels = examples[encoding_column] labels = np.asarray(labels) # We need the labels, in addition to the decoder_input_ids, for the compute_loss function model_inputs["labels"] = labels # In our case, this prepends the bos token and removes the last one decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id) model_inputs["decoder_input_ids"] = decoder_input_ids return model_inputs
null
698
import html import math import random import re from pathlib import Path import emoji import ftfy from huggingface_hub import hf_hub_download from unidecode import unidecode person_token = [("a person", 282265), ("someone", 121194), ("somebody", 12219)] The provided code snippet includes necessary dependencies for implementing the `replace_person_token` function. Write a Python function `def replace_person_token(t)` to solve the following problem: Used for CC12M Here is the function: def replace_person_token(t): "Used for CC12M" t = re.sub("<person>([,\s]*(and)*[,\s]*<person>)+", " people ", t) while "<person>" in t: t = t.replace( "<person>", f" {random.choices(*tuple(zip(*person_token)))[0]} ", 1 ) return t
Used for CC12M
699
import html import math import random import re from pathlib import Path import emoji import ftfy from huggingface_hub import hf_hub_download from unidecode import unidecode def fix_html(t): # from OpenAI CLIP return html.unescape(html.unescape(t))
null
700
import html import math import random import re from pathlib import Path import emoji import ftfy from huggingface_hub import hf_hub_download from unidecode import unidecode def replace_punctuation_with_commas(t): return re.sub("[()[\].,|:;?!=+~\-\/{}]", ",", t)
null
701
import html import math import random import re from pathlib import Path import emoji import ftfy from huggingface_hub import hf_hub_download from unidecode import unidecode def simplify_quotes(t): return re.sub("""['"`]""", ' " ', t)
null