File size: 895 Bytes
6377159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from sklearn.datasets import load_iris
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
import numpy as np


def iris() -> tuple[np.array]:
    """
    returns a tuple of numpy arrays containing the
    iris dataset split into training and testing sets
    after being normalized and one-hot encoded 
    """
    iris = load_iris()
    X_train, X_test, y_train, y_test = train_test_split(
        iris.data,
        iris.target,
        test_size=0.3,
        random_state=8675309,
    )
    scaler = StandardScaler()
    X_train, X_test = scaler.fit_transform(
        X_train
    ), scaler.fit_transform(
        X_test
    )

    y_train = OneHotEncoder().fit_transform(y_train.reshape(-1, 1)).toarray()
    y_test = OneHotEncoder().fit_transform(y_test.reshape(-1, 1)).toarray()
    return X_train, X_test, y_train, y_test