code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirhumidifierMjjsqOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirhumidifierJsqsOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirhumidifierJsqOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirfreshOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_reset_filter(self): """Reset the filter lifetime and usage.""" if self._device_features & FEATURE_RESET_FILTER == 0: return await self._try_command( "Resetting the filter lifetime of the miio device failed.", self._device.reset_filter, )
Reset the filter lifetime and usage.
async_reset_filter
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirfreshT2017OperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_reset_filter(self): """Reset the filter lifetime and usage.""" if self._device_features & FEATURE_RESET_FILTER == 0: return await self._try_command( "Resetting the upper filter lifetime of the miio device failed.", self._device.reset_upper_filter, ) await self._try_command( "Resetting the dust filter lifetime of the miio device failed.", self._device.reset_dust_filter, )
Reset the filter lifetime and usage.
async_reset_filter
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_reset_filter(self): """Reset the filter lifetime and usage.""" if self._device_features & FEATURE_RESET_FILTER == 0: return await self._try_command( "Resetting filter lifetime of the miio device failed.", self._device.reset_filter, )
Reset the filter lifetime and usage.
async_reset_filter
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) if preset_mode == SPEED_OFF: await self.async_turn_off() return if self._natural_mode: await self._try_command( "Setting fan speed of the miio device failed.", self._device.set_natural_speed, FAN_PRESET_MODE_VALUES[preset_mode], ) else: await self._try_command( "Setting fan speed of the miio device failed.", self._device.set_direct_speed, FAN_PRESET_MODE_VALUES[preset_mode], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_percentage(self, percentage: int) -> None: """Set the speed percentage of the fan.""" _LOGGER.debug("Setting the fan speed percentage to: %s", percentage) if percentage == 0: await self.async_turn_off() return if self._natural_mode: await self._try_command( "Setting fan speed percentage of the miio device failed.", self._device.set_natural_speed, percentage, ) else: await self._try_command( "Setting fan speed percentage of the miio device failed.", self._device.set_direct_speed, percentage, )
Set the speed percentage of the fan.
async_set_percentage
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_direction(self, direction: str) -> None: """Set the direction of the fan.""" if direction == "forward": direction = "right" if direction == "reverse": direction = "left" if self._oscillate: await self._try_command( "Setting oscillate off of the miio device failed.", self._device.set_oscillate, False, ) await self._try_command( "Setting move direction of the miio device failed.", self._device.set_rotate, FanMoveDirection(direction), )
Set the direction of the fan.
async_set_direction
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_delay_off(self, delay_off_countdown: int) -> None: """Set scheduled off timer in minutes.""" await self._try_command( "Setting delay off miio device failed.", self._device.delay_off, delay_off_countdown * 60, )
Set scheduled off timer in minutes.
async_set_delay_off
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) if preset_mode == SPEED_OFF: await self.async_turn_off() return await self._try_command( "Setting fan speed of the miio device failed.", self._device.set_speed, FAN_PRESET_MODE_VALUES_P5[preset_mode], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_percentage(self, percentage: int) -> None: """Set the speed percentage of the fan.""" _LOGGER.debug("Setting the fan speed percentage to: %s", percentage) if percentage == 0: await self.async_turn_off() return await self._try_command( "Setting fan speed percentage of the miio device failed.", self._device.set_speed, percentage, )
Set the speed percentage of the fan.
async_set_percentage
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_delay_off(self, delay_off_countdown: int) -> None: """Set scheduled off timer in minutes.""" await self._try_command( "Setting delay off miio device failed.", self._device.delay_off, delay_off_countdown, )
Set scheduled off timer in minutes.
async_set_delay_off
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, FanLeshowOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_percentage(self, percentage: int) -> None: """Set the speed percentage of the fan.""" _LOGGER.debug("Setting the fan speed percentage to: %s", percentage) if percentage == 0: await self.async_turn_off() return await self._try_command( "Setting fan speed percentage of the miio device failed.", self._device.set_speed, percentage, )
Set the speed percentage of the fan.
async_set_percentage
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_delay_off(self, delay_off_countdown: int) -> None: """Set scheduled off timer in minutes.""" await self._try_command( "Setting delay off miio device failed.", self._device.delay_off, delay_off_countdown, )
Set scheduled off timer in minutes.
async_set_delay_off
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) if not self._state: await self._try_command( "Turning the miio device on failed.", self._device.on ) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_speed, FAN_PRESET_MODES_1C[preset_mode], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_percentage(self, percentage: int) -> None: """Set the speed percentage of the fan.""" _LOGGER.debug("Setting the fan speed percentage to: %s", percentage) if percentage == 0: await self.async_turn_off() return if not self._state: await self._try_command( "Turning the miio device on failed.", self._device.on ) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_speed, FAN_PRESET_MODES_1C[ percentage_to_ordered_list_item(FAN_SPEEDS_1C, percentage) ], )
Set the speed percentage of the fan.
async_set_percentage
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_delay_off(self, delay_off_countdown: int) -> None: """Set scheduled off timer in minutes.""" await self._try_command( "Setting delay off miio device failed.", self._device.delay_off, delay_off_countdown, )
Set scheduled off timer in minutes.
async_set_delay_off
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) _LOGGER.debug( "Calling set_mode_and_speed with parameters: %s", self._preset_modes_to_mode_speed[preset_mode], ) # Following is true on AirDogX5 with firmware 1.3.5_0005. # Maybe this is different for other models. Needs testing # It looks like the device was not designed to switch from any arbitrary mode to any other mode. # Some of the combinations produce unexpected results # # For example, switching from 'Auto' to 'Speed X' switches to Manual mode, # but always sets speed to 1, regardless of the speed parameter. # # Switching from 'Night mode' to 'Speed X' sets device in Auto mode with speed X. # Tihs 'Auto X' state is quite strange and does not seem to be useful. # Furthermore, we request Manual mode and get Auto. # Switching from 'Auto X' mode to 'Manual X' works just fine. # Switching from 'Auto X' mode to 'Manual Y' switches to 'Manual X'. # Here is a full table of device behaviour # FROM TO RESULT # 'Night mode' -> # 'Auto' Good # 'Speed 1' 'Auto 1' + repeat -> Good # 'Speed 2' 'Auto 2' + repeat -> Good # 'Speed 3' 'Auto 3' + repeat -> Good # 'Speed 4' 'Auto 4' + repeat -> Good # 'Speed 1' # 'Night mode' Good # 'Auto' Good # 'Speed 2' -> # 'Night mode' Good # 'Auto' Good # 'Speed 3' -> # 'Night mode' Good # 'Auto' Good # 'Speed 4' -> # 'Night mode' Good # 'Auto'-> # 'Night mode' Good # 'Speed 1' Good # 'Speed 2' 'Speed 1' + repeat -> Good # 'Speed 3' 'Speed 1' + repeat -> Good # 'Speed 4' 'Speed 1' + repeat -> Good # To allow switching from any mode to any other mode command is repeated # twice when switching is from 'Night mode' or 'Auto' to 'Speed X'. await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode_and_speed, *self._preset_modes_to_mode_speed[ preset_mode ], # Corresponding mode and speed parameters are in tuple ) if ( self._state_attrs[ATTR_MODE] in ("auto", "sleep") and self._preset_modes_to_mode_speed[preset_mode][0].value == "manual" ): await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode_and_speed, *self._preset_modes_to_mode_speed[ preset_mode ], # Corresponding mode and speed parameters are in tuple ) self._state_attrs.update( { ATTR_MODE: self._preset_modes_to_mode_speed[preset_mode][0].value, ATTR_SPEED: self._preset_modes_to_mode_speed[preset_mode][1], } ) self._skip_update = True
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
def get_from_config(): """Get benchmarks configuration from the config.json file""" current_path = Path(__file__).resolve().parent config_path = current_path / "config.json" with open(config_path, "r") as config_file: config_file = "".join(line for line in config_file if line and "//" not in line) config = json.loads(config_file) profile = os.getenv("SKLBENCH_PROFILE", config["profile"]) n_jobs_vals_env = os.getenv("SKLBENCH_NJOBS") if n_jobs_vals_env: n_jobs_vals = json.loads(n_jobs_vals_env) else: n_jobs_vals = config["n_jobs_vals"] if not n_jobs_vals: n_jobs_vals = list(range(1, 1 + cpu_count())) cache_path = current_path / "cache" cache_path.mkdir(exist_ok=True) (cache_path / "estimators").mkdir(exist_ok=True) (cache_path / "tmp").mkdir(exist_ok=True) save_estimators = os.getenv("SKLBENCH_SAVE_ESTIMATORS", config["save_estimators"]) save_dir = os.getenv("ASV_COMMIT", "new")[:8] if save_estimators: (cache_path / "estimators" / save_dir).mkdir(exist_ok=True) base_commit = os.getenv("SKLBENCH_BASE_COMMIT", config["base_commit"]) bench_predict = os.getenv("SKLBENCH_PREDICT", config["bench_predict"]) bench_transform = os.getenv("SKLBENCH_TRANSFORM", config["bench_transform"]) return ( profile, n_jobs_vals, save_estimators, save_dir, base_commit, bench_predict, bench_transform, )
Get benchmarks configuration from the config.json file
get_from_config
python
scikit-learn/scikit-learn
asv_benchmarks/benchmarks/common.py
https://github.com/scikit-learn/scikit-learn/blob/master/asv_benchmarks/benchmarks/common.py
BSD-3-Clause
def get_estimator_path(benchmark, directory, params, save=False): """Get path of pickled fitted estimator""" path = Path(__file__).resolve().parent / "cache" path = (path / "estimators" / directory) if save else (path / "tmp") filename = ( benchmark.__class__.__name__ + "_estimator_" + "_".join(list(map(str, params))) + ".pkl" ) return path / filename
Get path of pickled fitted estimator
get_estimator_path
python
scikit-learn/scikit-learn
asv_benchmarks/benchmarks/common.py
https://github.com/scikit-learn/scikit-learn/blob/master/asv_benchmarks/benchmarks/common.py
BSD-3-Clause
def make_data(self, params): """Return the dataset for a combination of parameters""" # The datasets are cached using joblib.Memory so it's fast and can be # called for each repeat pass
Return the dataset for a combination of parameters
make_data
python
scikit-learn/scikit-learn
asv_benchmarks/benchmarks/common.py
https://github.com/scikit-learn/scikit-learn/blob/master/asv_benchmarks/benchmarks/common.py
BSD-3-Clause
def setup_cache(self): """Pickle a fitted estimator for all combinations of parameters""" # This is run once per benchmark class. clear_tmp() param_grid = list(itertools.product(*self.params)) for params in param_grid: if self.skip(params): continue estimator = self.make_estimator(params) X, _, y, _ = self.make_data(params) estimator.fit(X, y) est_path = get_estimator_path( self, Benchmark.save_dir, params, Benchmark.save_estimators ) with est_path.open(mode="wb") as f: pickle.dump(estimator, f)
Pickle a fitted estimator for all combinations of parameters
setup_cache
python
scikit-learn/scikit-learn
asv_benchmarks/benchmarks/common.py
https://github.com/scikit-learn/scikit-learn/blob/master/asv_benchmarks/benchmarks/common.py
BSD-3-Clause
def setup(self, *params): """Generate dataset and load the fitted estimator""" # This is run once per combination of parameters and per repeat so we # need to avoid doing expensive operations there. if self.skip(params): raise NotImplementedError self.X, self.X_val, self.y, self.y_val = self.make_data(params) est_path = get_estimator_path( self, Benchmark.save_dir, params, Benchmark.save_estimators ) with est_path.open(mode="rb") as f: self.estimator = pickle.load(f) self.make_scorers()
Generate dataset and load the fitted estimator
setup
python
scikit-learn/scikit-learn
asv_benchmarks/benchmarks/common.py
https://github.com/scikit-learn/scikit-learn/blob/master/asv_benchmarks/benchmarks/common.py
BSD-3-Clause
def load_data(dtype=np.float32, order="C", random_state=13): """Load the data, then cache and memmap the train/test split""" ###################################################################### # Load dataset print("Loading dataset...") data = fetch_covtype( download_if_missing=True, shuffle=True, random_state=random_state ) X = check_array(data["data"], dtype=dtype, order=order) y = (data["target"] != 1).astype(int) # Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 522911 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] # Standardize first 10 features (the numerical ones) mean = X_train.mean(axis=0) std = X_train.std(axis=0) mean[10:] = 0.0 std[10:] = 1.0 X_train = (X_train - mean) / std X_test = (X_test - mean) / std return X_train, X_test, y_train, y_test
Load the data, then cache and memmap the train/test split
load_data
python
scikit-learn/scikit-learn
benchmarks/bench_covertype.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_covertype.py
BSD-3-Clause
def print_outlier_ratio(y): """ Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py. """ uniq, cnt = np.unique(y, return_counts=True) print("----- Target count values: ") for u, c in zip(uniq, cnt): print("------ %s -> %d occurrences" % (str(u), c)) print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py.
print_outlier_ratio
python
scikit-learn/scikit-learn
benchmarks/bench_isolation_forest.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_isolation_forest.py
BSD-3-Clause
def get_data( n_samples_train, n_samples_test, n_features, contamination=0.1, random_state=0 ): """Function based on code from: https://scikit-learn.org/stable/ auto_examples/ensemble/plot_isolation_forest.html#sphx-glr-auto- examples-ensemble-plot-isolation-forest-py """ rng = np.random.RandomState(random_state) X = 0.3 * rng.randn(n_samples_train, n_features) X_train = np.r_[X + 2, X - 2] X = 0.3 * rng.randn(n_samples_test, n_features) X_test = np.r_[X + 2, X - 2] n_outliers = int(np.floor(contamination * n_samples_test)) X_outliers = rng.uniform(low=-4, high=4, size=(n_outliers, n_features)) outlier_idx = rng.choice(np.arange(0, n_samples_test), n_outliers, replace=False) X_test[outlier_idx, :] = X_outliers return X_train, X_test
Function based on code from: https://scikit-learn.org/stable/ auto_examples/ensemble/plot_isolation_forest.html#sphx-glr-auto- examples-ensemble-plot-isolation-forest-py
get_data
python
scikit-learn/scikit-learn
benchmarks/bench_isolation_forest_predict.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_isolation_forest_predict.py
BSD-3-Clause
def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = default_timer() isotonic_regression(Y) return default_timer() - tstart
Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds).
bench_isotonic_regression
python
scikit-learn/scikit-learn
benchmarks/bench_isotonic.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_isotonic.py
BSD-3-Clause
def load_data(dtype=np.float32, order="F"): """Load the data, then cache and memmap the train/test split""" ###################################################################### # Load dataset print("Loading dataset...") data = fetch_openml("mnist_784", as_frame=True) X = check_array(data["data"], dtype=dtype, order=order) y = data["target"] # Normalize features X = X / 255 # Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 60000 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] return X_train, X_test, y_train, y_test
Load the data, then cache and memmap the train/test split
load_data
python
scikit-learn/scikit-learn
benchmarks/bench_mnist.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_mnist.py
BSD-3-Clause
def benchmark( metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=0.2, n_times=5, ): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros( (len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float, ) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification( n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42 ) _, y_pred = make_multilabel_classification( n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84 ) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out
Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds.
benchmark
python
scikit-learn/scikit-learn
benchmarks/bench_multilabel_metrics.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_multilabel_metrics.py
BSD-3-Clause
def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = "{:<{fw}s}" + "{:>{cw}s}" * len(formats) row_fmt = "{:<{fw}s}" + "{:>{cw}.3f}" * len(formats) print(head_fmt.format("Metric", *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width))
Prints results by metric and format Uses the last ([-1]) value of other fields
_tabulate
python
scikit-learn/scikit-learn
benchmarks/bench_multilabel_metrics.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_multilabel_metrics.py
BSD-3-Clause
def _plot( results, metrics, formats, title, x_ticks, x_label, format_markers=("x", "|", "o", "+"), metric_colors=("c", "m", "y", "k", "g", "r", "b"), ): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure("scikit-learn multilabel metrics benchmarks") plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot( x_ticks, results[i, j].flat, label="{}, {}".format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)], ) ax.set_xlabel(x_label) ax.set_ylabel("Time (s)") ax.legend() plt.show()
Plot the results by metric, format and some other variable given by x_label
_plot
python
scikit-learn/scikit-learn
benchmarks/bench_multilabel_metrics.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_multilabel_metrics.py
BSD-3-Clause
def print_outlier_ratio(y): """ Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py. """ uniq, cnt = np.unique(y, return_counts=True) print("----- Target count values: ") for u, c in zip(uniq, cnt): print("------ %s -> %d occurrences" % (str(u), c)) print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
Helper function to show the distinct value count of element in the target. Useful indicator for the datasets used in bench_isolation_forest.py.
print_outlier_ratio
python
scikit-learn/scikit-learn
benchmarks/bench_online_ocsvm.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_online_ocsvm.py
BSD-3-Clause
def autolabel_auc(rects, ax): """Attach a text label above each bar displaying its height.""" for rect in rects: height = rect.get_height() ax.text( rect.get_x() + rect.get_width() / 2.0, 1.05 * height, "%.3f" % height, ha="center", va="bottom", )
Attach a text label above each bar displaying its height.
autolabel_auc
python
scikit-learn/scikit-learn
benchmarks/bench_online_ocsvm.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_online_ocsvm.py
BSD-3-Clause
def autolabel_time(rects, ax): """Attach a text label above each bar displaying its height.""" for rect in rects: height = rect.get_height() ax.text( rect.get_x() + rect.get_width() / 2.0, 1.05 * height, "%.1f" % height, ha="center", va="bottom", )
Attach a text label above each bar displaying its height.
autolabel_time
python
scikit-learn/scikit-learn
benchmarks/bench_online_ocsvm.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_online_ocsvm.py
BSD-3-Clause
def _nls_subproblem( X, W, H, tol, max_iter, alpha=0.0, l1_ratio=0.0, sigma=0.01, beta=0.1 ): """Non-negative least square solver Solves a non-negative least squares subproblem using the projected gradient descent algorithm. Parameters ---------- X : array-like, shape (n_samples, n_features) Constant matrix. W : array-like, shape (n_samples, n_components) Constant matrix. H : array-like, shape (n_components, n_features) Initial guess for the solution. tol : float Tolerance of the stopping condition. max_iter : int Maximum number of iterations before timing out. alpha : double, default: 0. Constant that multiplies the regularization terms. Set it to zero to have no regularization. l1_ratio : double, default: 0. The regularization mixing parameter, with 0 <= l1_ratio <= 1. For l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. sigma : float Constant used in the sufficient decrease condition checked by the line search. Smaller values lead to a looser sufficient decrease condition, thus reducing the time taken by the line search, but potentially increasing the number of iterations of the projected gradient procedure. 0.01 is a commonly used value in the optimization literature. beta : float Factor by which the step size is decreased (resp. increased) until (resp. as long as) the sufficient decrease condition is satisfied. Larger values allow to find a better step size but lead to longer line search. 0.1 is a commonly used value in the optimization literature. Returns ------- H : array-like, shape (n_components, n_features) Solution to the non-negative least squares problem. grad : array-like, shape (n_components, n_features) The gradient. n_iter : int The number of iterations done by the algorithm. References ---------- C.-J. Lin. Projected gradient methods for non-negative matrix factorization. Neural Computation, 19(2007), 2756-2779. https://www.csie.ntu.edu.tw/~cjlin/nmf/ """ WtX = safe_sparse_dot(W.T, X) WtW = np.dot(W.T, W) # values justified in the paper (alpha is renamed gamma) gamma = 1 for n_iter in range(1, max_iter + 1): grad = np.dot(WtW, H) - WtX if alpha > 0 and l1_ratio == 1.0: grad += alpha elif alpha > 0: grad += alpha * (l1_ratio + (1 - l1_ratio) * H) # The following multiplication with a boolean array is more than twice # as fast as indexing into grad. if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol: break Hp = H for inner_iter in range(20): # Gradient step. Hn = H - gamma * grad # Projection step. Hn *= Hn > 0 d = Hn - H gradd = np.dot(grad.ravel(), d.ravel()) dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel()) suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0 if inner_iter == 0: decr_gamma = not suff_decr if decr_gamma: if suff_decr: H = Hn break else: gamma *= beta elif not suff_decr or (Hp == Hn).all(): H = Hp break else: gamma /= beta Hp = Hn if n_iter == max_iter: warnings.warn("Iteration limit reached in nls subproblem.", ConvergenceWarning) return H, grad, n_iter
Non-negative least square solver Solves a non-negative least squares subproblem using the projected gradient descent algorithm. Parameters ---------- X : array-like, shape (n_samples, n_features) Constant matrix. W : array-like, shape (n_samples, n_components) Constant matrix. H : array-like, shape (n_components, n_features) Initial guess for the solution. tol : float Tolerance of the stopping condition. max_iter : int Maximum number of iterations before timing out. alpha : double, default: 0. Constant that multiplies the regularization terms. Set it to zero to have no regularization. l1_ratio : double, default: 0. The regularization mixing parameter, with 0 <= l1_ratio <= 1. For l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2. sigma : float Constant used in the sufficient decrease condition checked by the line search. Smaller values lead to a looser sufficient decrease condition, thus reducing the time taken by the line search, but potentially increasing the number of iterations of the projected gradient procedure. 0.01 is a commonly used value in the optimization literature. beta : float Factor by which the step size is decreased (resp. increased) until (resp. as long as) the sufficient decrease condition is satisfied. Larger values allow to find a better step size but lead to longer line search. 0.1 is a commonly used value in the optimization literature. Returns ------- H : array-like, shape (n_components, n_features) Solution to the non-negative least squares problem. grad : array-like, shape (n_components, n_features) The gradient. n_iter : int The number of iterations done by the algorithm. References ---------- C.-J. Lin. Projected gradient methods for non-negative matrix factorization. Neural Computation, 19(2007), 2756-2779. https://www.csie.ntu.edu.tw/~cjlin/nmf/
_nls_subproblem
python
scikit-learn/scikit-learn
benchmarks/bench_plot_nmf.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_plot_nmf.py
BSD-3-Clause
def norm_diff(A, norm=2, msg=True, random_state=None): """ Compute the norm diff with the original matrix, when randomized SVD is called with *params. norm: 2 => spectral; 'fro' => Frobenius """ if msg: print("... computing %s norm ..." % norm) if norm == 2: # s = sp.linalg.norm(A, ord=2) # slow v0 = _init_arpack_v0(min(A.shape), random_state) value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False, v0=v0) else: if sp.sparse.issparse(A): value = sp.sparse.linalg.norm(A, ord=norm) else: value = sp.linalg.norm(A, ord=norm) return value
Compute the norm diff with the original matrix, when randomized SVD is called with *params. norm: 2 => spectral; 'fro' => Frobenius
norm_diff
python
scikit-learn/scikit-learn
benchmarks/bench_plot_randomized_svd.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_plot_randomized_svd.py
BSD-3-Clause
def bench_scikit_tree_classifier(X, Y): """Benchmark with scikit-learn decision tree classifier""" from sklearn.tree import DecisionTreeClassifier gc.collect() # start time tstart = datetime.now() clf = DecisionTreeClassifier() clf.fit(X, Y).predict(X) delta = datetime.now() - tstart # stop time scikit_classifier_results.append(delta.seconds + delta.microseconds / mu_second)
Benchmark with scikit-learn decision tree classifier
bench_scikit_tree_classifier
python
scikit-learn/scikit-learn
benchmarks/bench_tree.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_tree.py
BSD-3-Clause
def bench_scikit_tree_regressor(X, Y): """Benchmark with scikit-learn decision tree regressor""" from sklearn.tree import DecisionTreeRegressor gc.collect() # start time tstart = datetime.now() clf = DecisionTreeRegressor() clf.fit(X, Y).predict(X) delta = datetime.now() - tstart # stop time scikit_regressor_results.append(delta.seconds + delta.microseconds / mu_second)
Benchmark with scikit-learn decision tree regressor
bench_scikit_tree_regressor
python
scikit-learn/scikit-learn
benchmarks/bench_tree.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_tree.py
BSD-3-Clause
def load_data(dtype=np.float32, order="C", shuffle=True, seed=0): """Load the data, then cache and memmap the train/test split""" print("Loading dataset...") data = fetch_openml("mnist_784", as_frame=True) X = check_array(data["data"], dtype=dtype, order=order) y = data["target"] if shuffle: X, y = _shuffle(X, y, random_state=seed) # Normalize features X /= 255 return X, y
Load the data, then cache and memmap the train/test split
load_data
python
scikit-learn/scikit-learn
benchmarks/bench_tsne_mnist.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_tsne_mnist.py
BSD-3-Clause
def nn_accuracy(X, X_embedded, k=1): """Accuracy of the first nearest neighbor""" knn = NearestNeighbors(n_neighbors=1, n_jobs=-1) _, neighbors_X = knn.fit(X).kneighbors() _, neighbors_X_embedded = knn.fit(X_embedded).kneighbors() return np.mean(neighbors_X == neighbors_X_embedded)
Accuracy of the first nearest neighbor
nn_accuracy
python
scikit-learn/scikit-learn
benchmarks/bench_tsne_mnist.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_tsne_mnist.py
BSD-3-Clause
def bhtsne(X): """Wrapper for the reference lvdmaaten/bhtsne implementation.""" # PCA preprocessing is done elsewhere in the benchmark script n_iter = -1 # TODO find a way to report the number of iterations return ( run_bh_tsne( X, use_pca=False, perplexity=args.perplexity, verbose=args.verbose > 0, ), n_iter, )
Wrapper for the reference lvdmaaten/bhtsne implementation.
bhtsne
python
scikit-learn/scikit-learn
benchmarks/bench_tsne_mnist.py
https://github.com/scikit-learn/scikit-learn/blob/master/benchmarks/bench_tsne_mnist.py
BSD-3-Clause
def has_openmp_flags(target): """Return whether target sources use OpenMP flags. Make sure that both compiler and linker source use OpenMP. Look at `get_meson_info` docstring to see what `target` looks like. """ target_sources = target["target_sources"] target_use_openmp_flags = any( has_source_openmp_flags(target_source) for target_source in target_sources ) if not target_use_openmp_flags: return False # When the target use OpenMP we expect a compiler + linker source and we # want to make sure that both the compiler and the linker use OpenMP assert len(target_sources) == 2 compiler_source, linker_source = target_sources assert "compiler" in compiler_source assert "linker" in linker_source compiler_use_openmp_flags = any( "openmp" in arg for arg in compiler_source["parameters"] ) linker_use_openmp_flags = any( "openmp" in arg for arg in linker_source["parameters"] ) assert compiler_use_openmp_flags == linker_use_openmp_flags return compiler_use_openmp_flags
Return whether target sources use OpenMP flags. Make sure that both compiler and linker source use OpenMP. Look at `get_meson_info` docstring to see what `target` looks like.
has_openmp_flags
python
scikit-learn/scikit-learn
build_tools/check-meson-openmp-dependencies.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/check-meson-openmp-dependencies.py
BSD-3-Clause
def get_canonical_name_meson(target, build_path): """Return a name based on generated shared library. The goal is to return a name that can be easily matched with the output from `git_grep_info`. Look at `get_meson_info` docstring to see what `target` looks like. """ # Expect a list with one element with the name of the shared library assert len(target["filename"]) == 1 shared_library_path = Path(target["filename"][0]) shared_library_relative_path = shared_library_path.relative_to( build_path.absolute() ) # Needed on Windows to match git grep output rel_path = shared_library_relative_path.as_posix() # OS-specific naming of the shared library .cpython- on POSIX and # something like .cp312- on Windows pattern = r"\.(cpython|cp\d+)-.+" return re.sub(pattern, "", str(rel_path))
Return a name based on generated shared library. The goal is to return a name that can be easily matched with the output from `git_grep_info`. Look at `get_meson_info` docstring to see what `target` looks like.
get_canonical_name_meson
python
scikit-learn/scikit-learn
build_tools/check-meson-openmp-dependencies.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/check-meson-openmp-dependencies.py
BSD-3-Clause
def get_meson_info(): """Return names of extension that use OpenMP based on meson introspect output. The meson introspect json info is a list of targets where a target is a dict that looks like this (parts not used in this script are not shown for simplicity): { 'name': '_k_means_elkan.cpython-312-x86_64-linux-gnu', 'filename': [ '<meson_build_dir>/sklearn/cluster/_k_means_elkan.cpython-312-x86_64-linux-gnu.so' ], 'target_sources': [ { 'compiler': ['ccache', 'cc'], 'parameters': [ '-Wall', '-std=c11', '-fopenmp', ... ], ... }, { 'linker': ['cc'], 'parameters': [ '-shared', '-fPIC', '-fopenmp', ... ] } ] } """ build_path = Path("build/introspect") subprocess.check_call(["meson", "setup", build_path, "--reconfigure"]) json_out = subprocess.check_output( ["meson", "introspect", build_path, "--targets"], text=True ) target_list = json.loads(json_out) meson_targets = [target for target in target_list if has_openmp_flags(target)] return [get_canonical_name_meson(each, build_path) for each in meson_targets]
Return names of extension that use OpenMP based on meson introspect output. The meson introspect json info is a list of targets where a target is a dict that looks like this (parts not used in this script are not shown for simplicity): { 'name': '_k_means_elkan.cpython-312-x86_64-linux-gnu', 'filename': [ '<meson_build_dir>/sklearn/cluster/_k_means_elkan.cpython-312-x86_64-linux-gnu.so' ], 'target_sources': [ { 'compiler': ['ccache', 'cc'], 'parameters': [ '-Wall', '-std=c11', '-fopenmp', ... ], ... }, { 'linker': ['cc'], 'parameters': [ '-shared', '-fPIC', '-fopenmp', ... ] } ] }
get_meson_info
python
scikit-learn/scikit-learn
build_tools/check-meson-openmp-dependencies.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/check-meson-openmp-dependencies.py
BSD-3-Clause
def get_git_grep_info(): """Return names of extensions that use OpenMP based on git grep regex.""" git_grep_filenames = subprocess.check_output( ["git", "grep", "-lP", "cython.*parallel|_openmp_helpers"], text=True ).splitlines() git_grep_filenames = [f for f in git_grep_filenames if ".pyx" in f] return [get_canonical_name_git_grep(each) for each in git_grep_filenames]
Return names of extensions that use OpenMP based on git grep regex.
get_git_grep_info
python
scikit-learn/scikit-learn
build_tools/check-meson-openmp-dependencies.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/check-meson-openmp-dependencies.py
BSD-3-Clause
def get_contributors(): """Get the list of contributor profiles. Require admin rights.""" # get core devs and contributor experience team core_devs = [] documentation_team = [] contributor_experience_team = [] comm_team = [] core_devs_slug = "core-devs" contributor_experience_team_slug = "contributor-experience-team" comm_team_slug = "communication-team" documentation_team_slug = "documentation-team" entry_point = "https://api.github.com/orgs/scikit-learn/" for team_slug, lst in zip( ( core_devs_slug, contributor_experience_team_slug, comm_team_slug, documentation_team_slug, ), (core_devs, contributor_experience_team, comm_team, documentation_team), ): print(f"Retrieving {team_slug}\n") for page in [1, 2]: # 30 per page reply = get(f"{entry_point}teams/{team_slug}/members?page={page}") lst.extend(reply.json()) # get members of scikit-learn on GitHub print("Retrieving members\n") members = [] for page in [1, 2, 3]: # 30 per page reply = get(f"{entry_point}members?page={page}") members.extend(reply.json()) # keep only the logins core_devs = set(c["login"] for c in core_devs) documentation_team = set(c["login"] for c in documentation_team) contributor_experience_team = set(c["login"] for c in contributor_experience_team) comm_team = set(c["login"] for c in comm_team) members = set(c["login"] for c in members) # add missing contributors with GitHub accounts members |= {"dubourg", "mbrucher", "thouis", "jarrodmillman"} # add missing contributors without GitHub accounts members |= {"Angel Soler Gollonet"} # remove CI bots members -= {"sklearn-ci", "sklearn-wheels", "sklearn-lgtm"} contributor_experience_team -= ( core_devs # remove ogrisel from contributor_experience_team ) emeritus = ( members - core_devs - contributor_experience_team - comm_team - documentation_team ) # hard coded emeritus_contributor_experience_team = { "cmarmo", } emeritus_comm_team = {"reshamas"} # Up-to-now, we can subtract the team emeritus from the original emeritus emeritus -= emeritus_contributor_experience_team | emeritus_comm_team comm_team -= {"reshamas"} # in the comm team but not on the web page # get profiles from GitHub core_devs = [get_profile(login) for login in core_devs] emeritus = [get_profile(login) for login in emeritus] contributor_experience_team = [ get_profile(login) for login in contributor_experience_team ] emeritus_contributor_experience_team = [ get_profile(login) for login in emeritus_contributor_experience_team ] comm_team = [get_profile(login) for login in comm_team] emeritus_comm_team = [get_profile(login) for login in emeritus_comm_team] documentation_team = [get_profile(login) for login in documentation_team] # sort by last name core_devs = sorted(core_devs, key=key) emeritus = sorted(emeritus, key=key) contributor_experience_team = sorted(contributor_experience_team, key=key) emeritus_contributor_experience_team = sorted( emeritus_contributor_experience_team, key=key ) documentation_team = sorted(documentation_team, key=key) comm_team = sorted(comm_team, key=key) emeritus_comm_team = sorted(emeritus_comm_team, key=key) return ( core_devs, emeritus, contributor_experience_team, emeritus_contributor_experience_team, comm_team, emeritus_comm_team, documentation_team, )
Get the list of contributor profiles. Require admin rights.
get_contributors
python
scikit-learn/scikit-learn
build_tools/generate_authors_table.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/generate_authors_table.py
BSD-3-Clause
def get_profile(login): """Get the GitHub profile from login""" print("get profile for %s" % (login,)) try: profile = get("https://api.github.com/users/%s" % login).json() except requests.exceptions.HTTPError: return dict(name=login, avatar_url=LOGO_URL, html_url="") if profile["name"] is None: profile["name"] = profile["login"] # fix missing names missing_names = { "bthirion": "Bertrand Thirion", "dubourg": "Vincent Dubourg", "Duchesnay": "Edouard Duchesnay", "Lars": "Lars Buitinck", "MechCoder": "Manoj Kumar", } if profile["name"] in missing_names: profile["name"] = missing_names[profile["name"]] return profile
Get the GitHub profile from login
get_profile
python
scikit-learn/scikit-learn
build_tools/generate_authors_table.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/generate_authors_table.py
BSD-3-Clause
def key(profile): """Get a sorting key based on the lower case last name, then firstname""" components = profile["name"].lower().split(" ") return " ".join([components[-1]] + components[:-1])
Get a sorting key based on the lower case last name, then firstname
key
python
scikit-learn/scikit-learn
build_tools/generate_authors_table.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/generate_authors_table.py
BSD-3-Clause
def get_versions(versions_file): """Get the versions of the packages used in the linter job. Parameters ---------- versions_file : str The path to the file that contains the versions of the packages. Returns ------- versions : dict A dictionary with the versions of the packages. """ with open("versions.txt", "r") as f: return dict(line.strip().split("=") for line in f)
Get the versions of the packages used in the linter job. Parameters ---------- versions_file : str The path to the file that contains the versions of the packages. Returns ------- versions : dict A dictionary with the versions of the packages.
get_versions
python
scikit-learn/scikit-learn
build_tools/get_comment.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/get_comment.py
BSD-3-Clause
def get_step_message(log, start, end, title, message, details): """Get the message for a specific test. Parameters ---------- log : str The log of the linting job. start : str The string that marks the start of the test. end : str The string that marks the end of the test. title : str The title for this section. message : str The message to be added at the beginning of the section. details : bool Whether to add the details of each step. Returns ------- message : str The message to be added to the comment. """ if end not in log: return "" res = ( f"-----------------------------------------------\n### {title}\n\n{message}\n\n" ) if details: res += ( "<details>\n\n```\n" + log[log.find(start) + len(start) + 1 : log.find(end) - 1] + "\n```\n\n</details>\n\n" ) return res
Get the message for a specific test. Parameters ---------- log : str The log of the linting job. start : str The string that marks the start of the test. end : str The string that marks the end of the test. title : str The title for this section. message : str The message to be added at the beginning of the section. details : bool Whether to add the details of each step. Returns ------- message : str The message to be added to the comment.
get_step_message
python
scikit-learn/scikit-learn
build_tools/get_comment.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/get_comment.py
BSD-3-Clause
def get_headers(token): """Get the headers for the GitHub API.""" return { "Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}", "X-GitHub-Api-Version": "2022-11-28", }
Get the headers for the GitHub API.
get_headers
python
scikit-learn/scikit-learn
build_tools/get_comment.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/get_comment.py
BSD-3-Clause
def create_or_update_comment(comment, message, repo, pr_number, token): """Create a new comment or update existing one.""" # repo is in the form of "org/repo" if comment is not None: print("updating existing comment") # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#update-an-issue-comment response = requests.patch( f"https://api.github.com/repos/{repo}/issues/comments/{comment['id']}", headers=get_headers(token), json={"body": message}, ) else: print("creating new comment") # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment response = requests.post( f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments", headers=get_headers(token), json={"body": message}, ) response.raise_for_status()
Create a new comment or update existing one.
create_or_update_comment
python
scikit-learn/scikit-learn
build_tools/get_comment.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/get_comment.py
BSD-3-Clause
def make_distributor_init_64_bits( distributor_init, vcomp140_dll_filename, msvcp140_dll_filename, ): """Create a _distributor_init.py file for 64-bit architectures. This file is imported first when importing the sklearn package so as to pre-load the vendored vcomp140.dll and msvcp140.dll. """ with open(distributor_init, "wt") as f: f.write( textwrap.dedent( """ '''Helper to preload vcomp140.dll and msvcp140.dll to prevent "not found" errors. Once vcomp140.dll and msvcp140.dll are preloaded, the namespace is made available to any subsequent vcomp140.dll and msvcp140.dll. This is created as part of the scripts that build the wheel. ''' import os import os.path as op from ctypes import WinDLL if os.name == "nt": libs_path = op.join(op.dirname(__file__), ".libs") vcomp140_dll_filename = op.join(libs_path, "{0}") msvcp140_dll_filename = op.join(libs_path, "{1}") WinDLL(op.abspath(vcomp140_dll_filename)) WinDLL(op.abspath(msvcp140_dll_filename)) """.format( vcomp140_dll_filename, msvcp140_dll_filename, ) ) )
Create a _distributor_init.py file for 64-bit architectures. This file is imported first when importing the sklearn package so as to pre-load the vendored vcomp140.dll and msvcp140.dll.
make_distributor_init_64_bits
python
scikit-learn/scikit-learn
build_tools/github/vendor.py
https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/github/vendor.py
BSD-3-Clause
def _get_guide(*refs, is_developer=False): """Get the rst to refer to user/developer guide. `refs` is several references that can be used in the :ref:`...` directive. """ if len(refs) == 1: ref_desc = f":ref:`{refs[0]}` section" elif len(refs) == 2: ref_desc = f":ref:`{refs[0]}` and :ref:`{refs[1]}` sections" else: ref_desc = ", ".join(f":ref:`{ref}`" for ref in refs[:-1]) ref_desc += f", and :ref:`{refs[-1]}` sections" guide_name = "Developer" if is_developer else "User" return f"**{guide_name} guide.** See the {ref_desc} for further details."
Get the rst to refer to user/developer guide. `refs` is several references that can be used in the :ref:`...` directive.
_get_guide
python
scikit-learn/scikit-learn
doc/api_reference.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/api_reference.py
BSD-3-Clause
def _get_submodule(module_name, submodule_name): """Get the submodule docstring and automatically add the hook. `module_name` is e.g. `sklearn.feature_extraction`, and `submodule_name` is e.g. `image`, so we get the docstring and hook for `sklearn.feature_extraction.image` submodule. `module_name` is used to reset the current module because autosummary automatically changes the current module. """ lines = [ f".. automodule:: {module_name}.{submodule_name}", f".. currentmodule:: {module_name}", ] return "\n\n".join(lines)
Get the submodule docstring and automatically add the hook. `module_name` is e.g. `sklearn.feature_extraction`, and `submodule_name` is e.g. `image`, so we get the docstring and hook for `sklearn.feature_extraction.image` submodule. `module_name` is used to reset the current module because autosummary automatically changes the current module.
_get_submodule
python
scikit-learn/scikit-learn
doc/api_reference.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/api_reference.py
BSD-3-Clause
def add_js_css_files(app, pagename, templatename, context, doctree): """Load additional JS and CSS files only for certain pages. Note that `html_js_files` and `html_css_files` are included in all pages and should be used for the ones that are used by multiple pages. All page-specific JS and CSS files should be added here instead. """ if pagename == "api/index": # External: jQuery and DataTables app.add_js_file("https://code.jquery.com/jquery-3.7.0.js") app.add_js_file("https://cdn.datatables.net/2.0.0/js/dataTables.min.js") app.add_css_file( "https://cdn.datatables.net/2.0.0/css/dataTables.dataTables.min.css" ) # Internal: API search initialization and styling app.add_js_file("scripts/api-search.js") app.add_css_file("styles/api-search.css") elif pagename == "index": app.add_css_file("styles/index.css") elif pagename.startswith("modules/generated/"): app.add_css_file("styles/api.css")
Load additional JS and CSS files only for certain pages. Note that `html_js_files` and `html_css_files` are included in all pages and should be used for the ones that are used by multiple pages. All page-specific JS and CSS files should be added here instead.
add_js_css_files
python
scikit-learn/scikit-learn
doc/conf.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/conf.py
BSD-3-Clause
def make_carousel_thumbs(app, exception): """produces the final resized carousel images""" if exception is not None: return print("Preparing carousel images") image_dir = os.path.join(app.builder.outdir, "_images") for glr_plot, max_width in carousel_thumbs.items(): image = os.path.join(image_dir, glr_plot) if os.path.exists(image): c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png") sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
produces the final resized carousel images
make_carousel_thumbs
python
scikit-learn/scikit-learn
doc/conf.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/conf.py
BSD-3-Clause
def skip_properties(app, what, name, obj, skip, options): """Skip properties that are fitted attributes""" if isinstance(obj, property): if name.endswith("_") and not name.startswith("_"): return True return skip
Skip properties that are fitted attributes
skip_properties
python
scikit-learn/scikit-learn
doc/conf.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/conf.py
BSD-3-Clause
def infer_next_release_versions(): """Infer the most likely next release versions to make.""" all_version_full = {"rc": "0.99.0rc1", "final": "0.99.0", "bf": "0.98.1"} all_version_short = {"rc": "0.99", "final": "0.99", "bf": "0.98"} all_previous_tag = {"rc": "unused", "final": "0.98.33", "bf": "0.97.22"} try: # Fetch the version switcher JSON; see `html_theme_options` for more details versions_json = json.loads( urlopen(html_theme_options["switcher"]["json_url"], timeout=10).read() ) # See `build_tools/circle/list_versions.py`, stable is always the second entry stable_version = parse(versions_json[1]["version"]) last_stable_version = parse(versions_json[2]["version"]) next_major_minor = f"{stable_version.major}.{stable_version.minor + 1}" # RC all_version_full["rc"] = f"{next_major_minor}.0rc1" all_version_short["rc"] = next_major_minor # Major/Minor final all_version_full["final"] = f"{next_major_minor}.0" all_version_short["final"] = next_major_minor all_previous_tag["final"] = stable_version.base_version # Bug-fix all_version_full["bf"] = ( f"{stable_version.major}.{stable_version.minor}.{stable_version.micro + 1}" ) all_version_short["bf"] = f"{stable_version.major}.{stable_version.minor}" all_previous_tag["bf"] = last_stable_version.base_version except Exception as e: logger.warning( "Failed to infer all possible next release versions because of " f"{type(e).__name__}: {e}" ) return { "version_full": all_version_full, "version_short": all_version_short, "previous_tag": all_previous_tag, }
Infer the most likely next release versions to make.
infer_next_release_versions
python
scikit-learn/scikit-learn
doc/conf.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/conf.py
BSD-3-Clause
def pytest_collection_modifyitems(config, items): """Called after collect is completed. Parameters ---------- config : pytest config items : list of collected items """ skip_doctests = False if np_base_version < parse_version("2"): # TODO: configure numpy to output scalar arrays as regular Python scalars # once possible to improve readability of the tests docstrings. # https://numpy.org/neps/nep-0051-scalar-representation.html#implementation reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2" skip_doctests = True if sp_version < parse_version("1.14"): reason = "Scipy sparse matrix repr has changed in scipy 1.14" skip_doctests = True # Normally doctest has the entire module's scope. Here we set globs to an empty dict # to remove the module's scope: # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context for item in items: if isinstance(item, DoctestItem): item.dtest.globs = {} if skip_doctests: skip_marker = pytest.mark.skip(reason=reason) for item in items: if isinstance(item, DoctestItem): item.add_marker(skip_marker)
Called after collect is completed. Parameters ---------- config : pytest config items : list of collected items
pytest_collection_modifyitems
python
scikit-learn/scikit-learn
doc/conftest.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/conftest.py
BSD-3-Clause
def add_content(self, more_content): """Override default behavior to add only the first line of the docstring. Modified based on the part of processing docstrings in the original implementation of this method. https://github.com/sphinx-doc/sphinx/blob/faa33a53a389f6f8bc1f6ae97d6015fa92393c4a/sphinx/ext/autodoc/__init__.py#L609-L622 """ sourcename = self.get_sourcename() docstrings = self.get_doc() if docstrings is not None: if not docstrings: docstrings.append([]) # Get the first non-empty line of the processed docstring; this could lead # to unexpected results if the object does not have a short summary line. short_summary = next( (s for s in self.process_doc(docstrings) if s), "<no summary>" ) self.add_line(short_summary, sourcename, 0)
Override default behavior to add only the first line of the docstring. Modified based on the part of processing docstrings in the original implementation of this method. https://github.com/sphinx-doc/sphinx/blob/faa33a53a389f6f8bc1f6ae97d6015fa92393c4a/sphinx/ext/autodoc/__init__.py#L609-L622
add_content
python
scikit-learn/scikit-learn
doc/sphinxext/autoshortsummary.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/autoshortsummary.py
BSD-3-Clause
def _linkcode_resolve(domain, info, package, url_fmt, revision): """Determine a link to online source for a class/method/function This is called by sphinx.ext.linkcode An example with a long-untouched module that everyone has >>> _linkcode_resolve('py', {'module': 'tty', ... 'fullname': 'setraw'}, ... package='tty', ... url_fmt='https://hg.python.org/cpython/file/' ... '{revision}/Lib/{package}/{path}#L{lineno}', ... revision='xxxx') 'https://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' """ if revision is None: return if domain not in ("py", "pyx"): return if not info.get("module") or not info.get("fullname"): return class_name = info["fullname"].split(".")[0] module = __import__(info["module"], fromlist=[class_name]) obj = attrgetter(info["fullname"])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator obj = inspect.unwrap(obj) try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except Exception: fn = None if not fn: return fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) try: lineno = inspect.getsourcelines(obj)[1] except Exception: lineno = "" return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
Determine a link to online source for a class/method/function This is called by sphinx.ext.linkcode An example with a long-untouched module that everyone has >>> _linkcode_resolve('py', {'module': 'tty', ... 'fullname': 'setraw'}, ... package='tty', ... url_fmt='https://hg.python.org/cpython/file/' ... '{revision}/Lib/{package}/{path}#L{lineno}', ... revision='xxxx') 'https://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
_linkcode_resolve
python
scikit-learn/scikit-learn
doc/sphinxext/github_link.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/github_link.py
BSD-3-Clause
def override_pst_pagetoc(app, pagename, templatename, context, doctree): """Overrides the `generate_toc_html` function of pydata-sphinx-theme for API.""" @cache def generate_api_toc_html(kind="html"): """Generate the in-page toc for an API page. This relies on the `generate_toc_html` function added by pydata-sphinx-theme into the context. We save the original function into `pst_generate_toc_html` and override `generate_toc_html` with this function for generated API pages. The pagetoc of an API page would look like the following: <ul class="visible ..."> <-- Unwrap <li class="toc-h1 ..."> <-- Unwrap <a class="..." href="#">{{obj}}</a> <-- Decompose <ul class="visible ..."> <li class="toc-h2 ..."> ...object <ul class="..."> <-- Set visible if exists <li class="toc-h3 ...">...method 1</li> <-- Shorten <li class="toc-h3 ...">...method 2</li> <-- Shorten ...more methods <-- Shorten </ul> </li> <li class="toc-h2 ...">...gallery examples</li> </ul> </li> <-- Unwrapped </ul> <-- Unwrapped """ soup = context["pst_generate_toc_html"](kind="soup") try: # Unwrap the outermost level soup.ul.unwrap() soup.li.unwrap() soup.a.decompose() # Get all toc-h2 level entries, where the first one should be the function # or class, and the second one, if exists, should be the examples; there # should be no more than two entries at this level for generated API pages lis = soup.ul.select("li.toc-h2") main_li = lis[0] meth_list = main_li.ul if meth_list is not None: # This is a class API page, we remove the class name from the method # names to make them better fit into the secondary sidebar; also we # make the toc-h3 level entries always visible to more easily navigate # through the methods meth_list["class"].append("visible") for meth in meth_list.find_all("li", {"class": "toc-h3"}): target = meth.a.code.span target.string = target.string.split(".", 1)[1] # This corresponds to the behavior of `generate_toc_html` return str(soup) if kind == "html" else soup except Exception as e: # Upon any failure we return the original pagetoc logger.warning( f"Failed to generate API pagetoc for {pagename}: {e}; falling back" ) return context["pst_generate_toc_html"](kind=kind) # Override the pydata-sphinx-theme implementation for generate API pages if pagename.startswith("modules/generated/"): context["pst_generate_toc_html"] = context["generate_toc_html"] context["generate_toc_html"] = generate_api_toc_html
Overrides the `generate_toc_html` function of pydata-sphinx-theme for API.
override_pst_pagetoc
python
scikit-learn/scikit-learn
doc/sphinxext/override_pst_pagetoc.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/override_pst_pagetoc.py
BSD-3-Clause
def generate_api_toc_html(kind="html"): """Generate the in-page toc for an API page. This relies on the `generate_toc_html` function added by pydata-sphinx-theme into the context. We save the original function into `pst_generate_toc_html` and override `generate_toc_html` with this function for generated API pages. The pagetoc of an API page would look like the following: <ul class="visible ..."> <-- Unwrap <li class="toc-h1 ..."> <-- Unwrap <a class="..." href="#">{{obj}}</a> <-- Decompose <ul class="visible ..."> <li class="toc-h2 ..."> ...object <ul class="..."> <-- Set visible if exists <li class="toc-h3 ...">...method 1</li> <-- Shorten <li class="toc-h3 ...">...method 2</li> <-- Shorten ...more methods <-- Shorten </ul> </li> <li class="toc-h2 ...">...gallery examples</li> </ul> </li> <-- Unwrapped </ul> <-- Unwrapped """ soup = context["pst_generate_toc_html"](kind="soup") try: # Unwrap the outermost level soup.ul.unwrap() soup.li.unwrap() soup.a.decompose() # Get all toc-h2 level entries, where the first one should be the function # or class, and the second one, if exists, should be the examples; there # should be no more than two entries at this level for generated API pages lis = soup.ul.select("li.toc-h2") main_li = lis[0] meth_list = main_li.ul if meth_list is not None: # This is a class API page, we remove the class name from the method # names to make them better fit into the secondary sidebar; also we # make the toc-h3 level entries always visible to more easily navigate # through the methods meth_list["class"].append("visible") for meth in meth_list.find_all("li", {"class": "toc-h3"}): target = meth.a.code.span target.string = target.string.split(".", 1)[1] # This corresponds to the behavior of `generate_toc_html` return str(soup) if kind == "html" else soup except Exception as e: # Upon any failure we return the original pagetoc logger.warning( f"Failed to generate API pagetoc for {pagename}: {e}; falling back" ) return context["pst_generate_toc_html"](kind=kind)
Generate the in-page toc for an API page. This relies on the `generate_toc_html` function added by pydata-sphinx-theme into the context. We save the original function into `pst_generate_toc_html` and override `generate_toc_html` with this function for generated API pages. The pagetoc of an API page would look like the following: <ul class="visible ..."> <-- Unwrap <li class="toc-h1 ..."> <-- Unwrap <a class="..." href="#">{{obj}}</a> <-- Decompose <ul class="visible ..."> <li class="toc-h2 ..."> ...object <ul class="..."> <-- Set visible if exists <li class="toc-h3 ...">...method 1</li> <-- Shorten <li class="toc-h3 ...">...method 2</li> <-- Shorten ...more methods <-- Shorten </ul> </li> <li class="toc-h2 ...">...gallery examples</li> </ul> </li> <-- Unwrapped </ul> <-- Unwrapped
generate_api_toc_html
python
scikit-learn/scikit-learn
doc/sphinxext/override_pst_pagetoc.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/override_pst_pagetoc.py
BSD-3-Clause
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() config = inliner.document.settings.env.app.config if config.issues_user_uri: ref = config.issues_user_uri.format(user=target) else: ref = "https://github.com/{0}".format(target) if has_explicit_title: text = title else: text = "@{0}".format(target) link = nodes.reference(text=text, refuri=ref, **options) return [link], []
Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>`
user_role
python
scikit-learn/scikit-learn
doc/sphinxext/sphinx_issues.py
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/sphinx_issues.py
BSD-3-Clause
def plot_digits(X, title): """Small helper function to plot 100 digits.""" fig, axs = plt.subplots(nrows=10, ncols=10, figsize=(8, 8)) for img, ax in zip(X, axs.ravel()): ax.imshow(img.reshape((16, 16)), cmap="Greys") ax.axis("off") fig.suptitle(title, fontsize=24)
Small helper function to plot 100 digits.
plot_digits
python
scikit-learn/scikit-learn
examples/applications/plot_digits_denoising.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_digits_denoising.py
BSD-3-Clause
def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.90, hspace=0.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(())
Helper function to plot a gallery of portraits
plot_gallery
python
scikit-learn/scikit-learn
examples/applications/plot_face_recognition.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_face_recognition.py
BSD-3-Clause
def benchmark_influence(conf): """ Benchmark influence of `changing_param` on both MSE and latency. """ prediction_times = [] prediction_powers = [] complexities = [] for param_value in conf["changing_param_values"]: conf["tuned_params"][conf["changing_param"]] = param_value estimator = conf["estimator"](**conf["tuned_params"]) print("Benchmarking %s" % estimator) estimator.fit(conf["data"]["X_train"], conf["data"]["y_train"]) conf["postfit_hook"](estimator) complexity = conf["complexity_computer"](estimator) complexities.append(complexity) start_time = time.time() for _ in range(conf["n_samples"]): y_pred = estimator.predict(conf["data"]["X_test"]) elapsed_time = (time.time() - start_time) / float(conf["n_samples"]) prediction_times.append(elapsed_time) pred_score = conf["prediction_performance_computer"]( conf["data"]["y_test"], y_pred ) prediction_powers.append(pred_score) print( "Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % ( complexity, conf["prediction_performance_label"], pred_score, elapsed_time, ) ) return prediction_powers, prediction_times, complexities
Benchmark influence of `changing_param` on both MSE and latency.
benchmark_influence
python
scikit-learn/scikit-learn
examples/applications/plot_model_complexity_influence.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_model_complexity_influence.py
BSD-3-Clause
def plot_influence(conf, mse_values, prediction_times, complexities): """ Plot influence of model complexity on both accuracy and latency. """ fig = plt.figure() fig.subplots_adjust(right=0.75) # first axes (prediction error) ax1 = fig.add_subplot(111) line1 = ax1.plot(complexities, mse_values, c="tab:blue", ls="-")[0] ax1.set_xlabel("Model Complexity (%s)" % conf["complexity_label"]) y1_label = conf["prediction_performance_label"] ax1.set_ylabel(y1_label) ax1.spines["left"].set_color(line1.get_color()) ax1.yaxis.label.set_color(line1.get_color()) ax1.tick_params(axis="y", colors=line1.get_color()) # second axes (latency) ax2 = fig.add_subplot(111, sharex=ax1, frameon=False) line2 = ax2.plot(complexities, prediction_times, c="tab:orange", ls="-")[0] ax2.yaxis.tick_right() ax2.yaxis.set_label_position("right") y2_label = "Time (s)" ax2.set_ylabel(y2_label) ax1.spines["right"].set_color(line2.get_color()) ax2.yaxis.label.set_color(line2.get_color()) ax2.tick_params(axis="y", colors=line2.get_color()) plt.legend( (line1, line2), ("prediction error", "prediction latency"), loc="upper center" ) plt.title( "Influence of varying '%s' on %s" % (conf["changing_param"], conf["estimator"].__name__) )
Plot influence of model complexity on both accuracy and latency.
plot_influence
python
scikit-learn/scikit-learn
examples/applications/plot_model_complexity_influence.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_model_complexity_influence.py
BSD-3-Clause
def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.tar.gz" ARCHIVE_SHA256 = "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30" ARCHIVE_FILENAME = "reuters21578.tar.gz" if data_path is None: data_path = Path(get_data_home()) / "reuters" else: data_path = Path(data_path) if not data_path.exists(): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) data_path.mkdir(parents=True, exist_ok=True) def progress(blocknum, bs, size): total_sz_mb = "%.2f MB" % (size / 1e6) current_sz_mb = "%.2f MB" % ((blocknum * bs) / 1e6) if _not_in_sphinx(): sys.stdout.write("\rdownloaded %s / %s" % (current_sz_mb, total_sz_mb)) archive_path = data_path / ARCHIVE_FILENAME urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): sys.stdout.write("\r") # Check that the archive was not tampered: assert sha256(archive_path.read_bytes()).hexdigest() == ARCHIVE_SHA256 print("untarring Reuters dataset...") with tarfile.open(archive_path, "r:gz") as fp: fp.extractall(data_path, filter="data") print("done.") parser = ReutersParser() for filename in data_path.glob("*.sgm"): for doc in parser.parse(open(filename, "rb")): yield doc
Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys.
stream_reuters_documents
python
scikit-learn/scikit-learn
examples/applications/plot_out_of_core_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_out_of_core_classification.py
BSD-3-Clause
def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [ ("{title}\n\n{body}".format(**doc), pos_class in doc["topics"]) for doc in itertools.islice(doc_iter, size) if doc["topics"] ] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int)
Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned.
get_minibatch
python
scikit-learn/scikit-learn
examples/applications/plot_out_of_core_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_out_of_core_classification.py
BSD-3-Clause
def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text( rect.get_x() + rect.get_width() / 2.0, 1.05 * height, "%.4f" % height, ha="center", va="bottom", ) plt.setp(plt.xticks()[1], rotation=30)
attach some text vi autolabel on rectangles.
autolabel
python
scikit-learn/scikit-learn
examples/applications/plot_out_of_core_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_out_of_core_classification.py
BSD-3-Clause
def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=float) for i in range(n_instances): instance = X_test[[i], :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print( "atomic_benchmark runtimes:", min(runtimes), np.percentile(runtimes, 50), max(runtimes), ) return runtimes
Measure runtime prediction of each instance.
atomic_benchmark_estimator
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print( "bulk_benchmark runtimes:", min(runtimes), np.percentile(runtimes, 50), max(runtimes), ) return runtimes
Measure runtime prediction of the whole input.
bulk_benchmark_estimator
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes
Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds.
benchmark_estimator
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression( n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True ) random_seed = 13 X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=n_train, test_size=n_test, random_state=random_seed ) X_train, y_train = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test
Generate a regression dataset with the given parameters.
generate_dataset
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def boxplot_runtimes(runtimes, pred_type, configuration): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ fig, ax1 = plt.subplots(figsize=(10, 6)) bp = plt.boxplot( runtimes, ) cls_infos = [ "%s\n(%d %s)" % ( estimator_conf["name"], estimator_conf["complexity_computer"](estimator_conf["instance"]), estimator_conf["complexity_label"], ) for estimator_conf in configuration["estimators"] ] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp["boxes"], color="black") plt.setp(bp["whiskers"], color="black") plt.setp(bp["fliers"], color="red", marker="+") ax1.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5) ax1.set_axisbelow(True) ax1.set_title( "Prediction Time per Instance - %s, %d feats." % (pred_type.capitalize(), configuration["n_features"]) ) ax1.set_ylabel("Prediction Time (us)") plt.show()
Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic'
boxplot_runtimes
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile) return percentiles
Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us))
n_feature_influence
python
scikit-learn/scikit-learn
examples/applications/plot_prediction_latency.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_prediction_latency.py
BSD-3-Clause
def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid)
Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages
construct_grids
python
scikit-learn/scikit-learn
examples/applications/plot_species_distribution_modeling.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_species_distribution_modeling.py
BSD-3-Clause
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid): """Create a bunch with information about a particular organism This will use the test/train record arrays to extract the data specific to the given species name. """ bunch = Bunch(name=" ".join(species_name.split("_")[:2])) species_name = species_name.encode("ascii") points = dict(test=test, train=train) for label, pts in points.items(): # choose points associated with the desired species pts = pts[pts["species"] == species_name] bunch["pts_%s" % label] = pts # determine coverage values for each of the training & testing points ix = np.searchsorted(xgrid, pts["dd long"]) iy = np.searchsorted(ygrid, pts["dd lat"]) bunch["cov_%s" % label] = coverages[:, -iy, ix].T return bunch
Create a bunch with information about a particular organism This will use the test/train record arrays to extract the data specific to the given species name.
create_species_bunch
python
scikit-learn/scikit-learn
examples/applications/plot_species_distribution_modeling.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_species_distribution_modeling.py
BSD-3-Clause
def build_projection_operator(l_x, n_dir): """Compute the tomography design matrix. Parameters ---------- l_x : int linear size of image array n_dir : int number of angles at which projections are acquired. Returns ------- p : sparse matrix of shape (n_dir l_x, l_x**2) """ X, Y = _generate_center_coordinates(l_x) angles = np.linspace(0, np.pi, n_dir, endpoint=False) data_inds, weights, camera_inds = [], [], [] data_unravel_indices = np.arange(l_x**2) data_unravel_indices = np.hstack((data_unravel_indices, data_unravel_indices)) for i, angle in enumerate(angles): Xrot = np.cos(angle) * X - np.sin(angle) * Y inds, w = _weights(Xrot, dx=1, orig=X.min()) mask = np.logical_and(inds >= 0, inds < l_x) weights += list(w[mask]) camera_inds += list(inds[mask] + i * l_x) data_inds += list(data_unravel_indices[mask]) proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds))) return proj_operator
Compute the tomography design matrix. Parameters ---------- l_x : int linear size of image array n_dir : int number of angles at which projections are acquired. Returns ------- p : sparse matrix of shape (n_dir l_x, l_x**2)
build_projection_operator
python
scikit-learn/scikit-learn
examples/applications/plot_tomography_l1_reconstruction.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/plot_tomography_l1_reconstruction.py
BSD-3-Clause
def index(redirects, index_map, k): """Find the index of an article name after redirect resolution""" k = redirects.get(k, k) return index_map.setdefault(k, len(index_map))
Find the index of an article name after redirect resolution
index
python
scikit-learn/scikit-learn
examples/applications/wikipedia_principal_eigenvector.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/wikipedia_principal_eigenvector.py
BSD-3-Clause
def get_redirects(redirects_filename): """Parse the redirections and build a transitively closed map out of it""" redirects = {} print("Parsing the NT redirect file") for l, line in enumerate(BZ2File(redirects_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue redirects[short_name(split[0])] = short_name(split[2]) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) # compute the transitive closure print("Computing the transitive closure of the redirect relation") for l, source in enumerate(redirects.keys()): transitive_target = None target = redirects[source] seen = {source} while True: transitive_target = target target = redirects.get(target) if target is None or target in seen: break seen.add(target) redirects[source] = transitive_target if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) return redirects
Parse the redirections and build a transitively closed map out of it
get_redirects
python
scikit-learn/scikit-learn
examples/applications/wikipedia_principal_eigenvector.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/wikipedia_principal_eigenvector.py
BSD-3-Clause
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None): """Extract the adjacency graph as a scipy sparse matrix Redirects are resolved first. Returns X, the scipy sparse adjacency matrix, redirects as python dict from article names to article names and index_map a python dict from article names to python int (article indexes). """ print("Computing the redirect map") redirects = get_redirects(redirects_filename) print("Computing the integer index map") index_map = dict() links = list() for l, line in enumerate(BZ2File(page_links_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue i = index(redirects, index_map, short_name(split[0])) j = index(redirects, index_map, short_name(split[2])) links.append((i, j)) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) if limit is not None and l >= limit - 1: break print("Computing the adjacency matrix") X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32) for i, j in links: X[i, j] = 1.0 del links print("Converting to CSR representation") X = X.tocsr() print("CSR conversion done") return X, redirects, index_map
Extract the adjacency graph as a scipy sparse matrix Redirects are resolved first. Returns X, the scipy sparse adjacency matrix, redirects as python dict from article names to article names and index_map a python dict from article names to python int (article indexes).
get_adjacency_matrix
python
scikit-learn/scikit-learn
examples/applications/wikipedia_principal_eigenvector.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/applications/wikipedia_principal_eigenvector.py
BSD-3-Clause
def predict_proba(self, X): """Min-max scale output of `decision_function` to [0, 1].""" df = self.decision_function(X) calibrated_df = (df - self.df_min_) / (self.df_max_ - self.df_min_) proba_pos_class = np.clip(calibrated_df, 0, 1) proba_neg_class = 1 - proba_pos_class proba = np.c_[proba_neg_class, proba_pos_class] return proba
Min-max scale output of `decision_function` to [0, 1].
predict_proba
python
scikit-learn/scikit-learn
examples/calibration/plot_calibration_curve.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/calibration/plot_calibration_curve.py
BSD-3-Clause
def predict_proba(self, X): """Min-max scale output of `decision_function` to [0,1].""" df = self.decision_function(X) calibrated_df = (df - self.df_min_) / (self.df_max_ - self.df_min_) proba_pos_class = np.clip(calibrated_df, 0, 1) proba_neg_class = 1 - proba_pos_class proba = np.c_[proba_neg_class, proba_pos_class] return proba
Min-max scale output of `decision_function` to [0,1].
predict_proba
python
scikit-learn/scikit-learn
examples/calibration/plot_compare_calibration.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/calibration/plot_compare_calibration.py
BSD-3-Clause
def generate_data(n_samples, n_features): """Generate random blob-ish data with noisy features. This returns an array of input data with shape `(n_samples, n_features)` and an array of `n_samples` target labels. Only one feature contains discriminative information, the other features contain only noise. """ X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]]) # add non-discriminative features if n_features > 1: X = np.hstack([X, np.random.randn(n_samples, n_features - 1)]) return X, y
Generate random blob-ish data with noisy features. This returns an array of input data with shape `(n_samples, n_features)` and an array of `n_samples` target labels. Only one feature contains discriminative information, the other features contain only noise.
generate_data
python
scikit-learn/scikit-learn
examples/classification/plot_lda.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/classification/plot_lda.py
BSD-3-Clause
def _classifier_has(attr): """Check if we can delegate a method to the underlying classifier. First, we check the first fitted classifier if available, otherwise we check the unfitted classifier. """ return lambda estimator: ( hasattr(estimator.classifier_, attr) if hasattr(estimator, "classifier_") else hasattr(estimator.classifier, attr) )
Check if we can delegate a method to the underlying classifier. First, we check the first fitted classifier if available, otherwise we check the unfitted classifier.
_classifier_has
python
scikit-learn/scikit-learn
examples/cluster/plot_inductive_clustering.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/cluster/plot_inductive_clustering.py
BSD-3-Clause
def bench_k_means(kmeans, name, data, labels): """Benchmark to evaluate the KMeans initialization methods. Parameters ---------- kmeans : KMeans instance A :class:`~sklearn.cluster.KMeans` instance with the initialization already set. name : str Name given to the strategy. It will be used to show the results in a table. data : ndarray of shape (n_samples, n_features) The data to cluster. labels : ndarray of shape (n_samples,) The labels used to compute the clustering metrics which requires some supervision. """ t0 = time() estimator = make_pipeline(StandardScaler(), kmeans).fit(data) fit_time = time() - t0 results = [name, fit_time, estimator[-1].inertia_] # Define the metrics which require only the true labels and estimator # labels clustering_metrics = [ metrics.homogeneity_score, metrics.completeness_score, metrics.v_measure_score, metrics.adjusted_rand_score, metrics.adjusted_mutual_info_score, ] results += [m(labels, estimator[-1].labels_) for m in clustering_metrics] # The silhouette score requires the full dataset results += [ metrics.silhouette_score( data, estimator[-1].labels_, metric="euclidean", sample_size=300, ) ] # Show the results formatter_result = ( "{:9s}\t{:.3f}s\t{:.0f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}" ) print(formatter_result.format(*results))
Benchmark to evaluate the KMeans initialization methods. Parameters ---------- kmeans : KMeans instance A :class:`~sklearn.cluster.KMeans` instance with the initialization already set. name : str Name given to the strategy. It will be used to show the results in a table. data : ndarray of shape (n_samples, n_features) The data to cluster. labels : ndarray of shape (n_samples,) The labels used to compute the clustering metrics which requires some supervision.
bench_k_means
python
scikit-learn/scikit-learn
examples/cluster/plot_kmeans_digits.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/cluster/plot_kmeans_digits.py
BSD-3-Clause
def ricker_function(resolution, center, width): """Discrete sub-sampled Ricker (Mexican hat) wavelet""" x = np.linspace(0, resolution - 1, resolution) x = ( (2 / (np.sqrt(3 * width) * np.pi**0.25)) * (1 - (x - center) ** 2 / width**2) * np.exp(-((x - center) ** 2) / (2 * width**2)) ) return x
Discrete sub-sampled Ricker (Mexican hat) wavelet
ricker_function
python
scikit-learn/scikit-learn
examples/decomposition/plot_sparse_coding.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/decomposition/plot_sparse_coding.py
BSD-3-Clause
def ricker_matrix(width, resolution, n_components): """Dictionary of Ricker (Mexican hat) wavelets""" centers = np.linspace(0, resolution - 1, n_components) D = np.empty((n_components, resolution)) for i, center in enumerate(centers): D[i] = ricker_function(resolution, center, width) D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis] return D
Dictionary of Ricker (Mexican hat) wavelets
ricker_matrix
python
scikit-learn/scikit-learn
examples/decomposition/plot_sparse_coding.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/decomposition/plot_sparse_coding.py
BSD-3-Clause
def fit(self, X, y): """ Fit the estimator to the training data. """ self.classes_ = sorted(set(y)) # Custom attribute to track if the estimator is fitted self._is_fitted = True return self
Fit the estimator to the training data.
fit
python
scikit-learn/scikit-learn
examples/developing_estimators/sklearn_is_fitted.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/developing_estimators/sklearn_is_fitted.py
BSD-3-Clause
def predict(self, X): """ Perform Predictions If the estimator is not fitted, then raise NotFittedError """ check_is_fitted(self) # Perform prediction logic predictions = [self.classes_[0]] * len(X) return predictions
Perform Predictions If the estimator is not fitted, then raise NotFittedError
predict
python
scikit-learn/scikit-learn
examples/developing_estimators/sklearn_is_fitted.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/developing_estimators/sklearn_is_fitted.py
BSD-3-Clause
def score(self, X, y): """ Calculate Score If the estimator is not fitted, then raise NotFittedError """ check_is_fitted(self) # Perform scoring logic return 0.5
Calculate Score If the estimator is not fitted, then raise NotFittedError
score
python
scikit-learn/scikit-learn
examples/developing_estimators/sklearn_is_fitted.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/developing_estimators/sklearn_is_fitted.py
BSD-3-Clause
def _f(self, s1, s2): """ kernel value between a pair of sequences """ return sum( [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2] )
kernel value between a pair of sequences
_f
python
scikit-learn/scikit-learn
examples/gaussian_process/plot_gpr_on_structured_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/gaussian_process/plot_gpr_on_structured_data.py
BSD-3-Clause
def plot_gpr_samples(gpr_model, n_samples, ax): """Plot samples drawn from the Gaussian process model. If the Gaussian process model is not trained then the drawn samples are drawn from the prior distribution. Otherwise, the samples are drawn from the posterior distribution. Be aware that a sample here corresponds to a function. Parameters ---------- gpr_model : `GaussianProcessRegressor` A :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model. n_samples : int The number of samples to draw from the Gaussian process distribution. ax : matplotlib axis The matplotlib axis where to plot the samples. """ x = np.linspace(0, 5, 100) X = x.reshape(-1, 1) y_mean, y_std = gpr_model.predict(X, return_std=True) y_samples = gpr_model.sample_y(X, n_samples) for idx, single_prior in enumerate(y_samples.T): ax.plot( x, single_prior, linestyle="--", alpha=0.7, label=f"Sampled function #{idx + 1}", ) ax.plot(x, y_mean, color="black", label="Mean") ax.fill_between( x, y_mean - y_std, y_mean + y_std, alpha=0.1, color="black", label=r"$\pm$ 1 std. dev.", ) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_ylim([-3, 3])
Plot samples drawn from the Gaussian process model. If the Gaussian process model is not trained then the drawn samples are drawn from the prior distribution. Otherwise, the samples are drawn from the posterior distribution. Be aware that a sample here corresponds to a function. Parameters ---------- gpr_model : `GaussianProcessRegressor` A :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model. n_samples : int The number of samples to draw from the Gaussian process distribution. ax : matplotlib axis The matplotlib axis where to plot the samples.
plot_gpr_samples
python
scikit-learn/scikit-learn
examples/gaussian_process/plot_gpr_prior_posterior.py
https://github.com/scikit-learn/scikit-learn/blob/master/examples/gaussian_process/plot_gpr_prior_posterior.py
BSD-3-Clause