python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict
from pyspark.sql import SparkSession
from .conftest import _default_conf, get_spark_i_know_what_i_am_doing
# sparksession.py is copied from spark-rapids
def _from_scala_map(scala_map) -> Dict[str, Any]: # type: ignore
ret = {}
# The value we get is a scala map, not a java map, so we need to jump through some hoops
keys = scala_map.keys().iterator() # type: ignore
while keys.hasNext(): # type: ignore
key = keys.next() # type: ignore
ret[key] = scala_map.get(key).get() # type: ignore
return ret # type: ignore
_spark = get_spark_i_know_what_i_am_doing()
# Have to reach into a private member to get access to the API we need
_orig_conf = _from_scala_map(_spark.conf._jconf.getAll()) # type: ignore
_orig_conf_keys = _orig_conf.keys() # type: ignore
class CleanSparkSession:
"""
A context manager to auto reset spark conf.
"""
def __init__(self, conf: Dict[str, Any] = {}) -> None:
self.conf = conf
self.spark = _spark
def __enter__(self) -> SparkSession:
self._reset_spark_session_conf()
self._set_all_confs(self.conf)
return self.spark
def __exit__(self, *args: Any) -> None:
pass
def _set_all_confs(self, conf: Dict[str, Any]) -> None:
newconf = _default_conf.copy()
newconf.update(conf)
for key, value in newconf.items():
if self.spark.conf.get(key, None) != value:
self.spark.conf.set(key, value)
def _reset_spark_session_conf(self) -> None:
"""Reset all of the configs for a given spark session."""
self._set_all_confs(_orig_conf)
# Have to reach into a private member to get access to the API we need
current_keys = _from_scala_map(self.spark.conf._jconf.getAll()).keys() # type: ignore
for key in current_keys:
if key not in _orig_conf_keys:
self.spark.conf.unset(key)
| spark-rapids-ml-branch-23.10 | python/tests/sparksession.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
from pyspark import Row, TaskContext
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCols, HasOutputCols
from pyspark.sql import DataFrame
from pyspark.sql.types import StructType
from spark_rapids_ml.core import (
CumlT,
FitInputType,
_ConstructFunc,
_CumlEstimator,
_CumlModel,
_EvaluateFunc,
_TransformFunc,
param_alias,
transform_evaluate,
)
from spark_rapids_ml.params import _CumlClass, _CumlParams
from spark_rapids_ml.utils import PartitionDescriptor
from .utils import assert_params, get_default_cuml_parameters
class CumlDummy(object):
"""
A dummy class to mimic a cuml python class
"""
def __init__(self, a: float = 10.0, b: int = 20, k: int = 30, x: float = 40.0) -> None: # type: ignore
super().__init__()
self.a = a # alpha
self.b = b # dropped
self.k = k # k
self.x = x # extra, keep w/ default
class SparkRapidsMLDummyClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {
"alpha": "a", # direct map, different names
"beta": None, # unmapped, raise error if defined on Spark side
"gamma": "", # unmapped, ignore value from Spark side
"k": "k", # direct map, same name
}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {"a": 10.0, "k": 30, "x": 40.0}
class _SparkRapidsMLDummyParams(_CumlParams):
"""
Params for Spark Dummy class
"""
alpha = Param(
Params._dummy(), # type: ignore
"alpha",
"alpha dummy param",
TypeConverters.toFloat,
)
beta = Param(
Params._dummy(), # type: ignore
"beta",
"beta dummy param ",
TypeConverters.toInt,
)
gamma = Param(
Params._dummy(), # type: ignore
"gamma",
"gamma dummy param ",
TypeConverters.toString,
)
k = Param(
Params._dummy(), # type: ignore
"k",
"k dummy param ",
TypeConverters.toInt,
)
def __init__(self, *args: Any):
super(_SparkRapidsMLDummyParams, self).__init__(*args)
self._setDefault(
alpha=1.0,
# beta=2, # leave undefined to test mapping to None
gamma="three",
k=4,
)
class SparkRapidsMLDummy(
SparkRapidsMLDummyClass,
_CumlEstimator,
_SparkRapidsMLDummyParams,
HasInputCols,
HasOutputCols,
):
"""
PySpark estimator of CumlDummy
"""
def __init__(
self,
m: int = 0,
n: int = 0,
partition_num: int = 0,
runtime_check: bool = True,
**kwargs: Any,
) -> None:
#
super().__init__()
self.set_params(**kwargs)
self.m = m
self.n = n
self.partition_num = partition_num
self.runtime_check = runtime_check
"""
PySpark estimator of CumlDummy
"""
def setInputCols(self, value: List[str]) -> "SparkRapidsMLDummy":
return self._set(inputCols=value)
def setOutputCols(self, value: List[str]) -> "SparkRapidsMLDummy":
return self._set(outputCols=value)
def setAlpha(self, value: int) -> "SparkRapidsMLDummy":
return self.set_params(**{"alpha": value})
def setBeta(self, value: int) -> "SparkRapidsMLDummy":
raise ValueError("Not supported")
def setGamma(self, value: float) -> "SparkRapidsMLDummy":
return self.set_params(**{"gamma": value})
def setK(self, value: str) -> "SparkRapidsMLDummy":
return self.set_params(**{"k": value})
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
num_workers = self.num_workers
partition_num = self.partition_num
m = self.m
n = self.n
# if the common framework tries to pickle the whole class,
# it will throw exception since dataset is not picklable.
self.test_pickle_dataframe = dataset
runtime_check = self.runtime_check
def _cuml_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
context = TaskContext.get()
assert context is not None
assert param_alias.handle in params
assert param_alias.part_sizes in params
assert param_alias.num_cols in params
pd = PartitionDescriptor.build(
params[param_alias.part_sizes], params[param_alias.num_cols]
)
assert param_alias.cuml_init in params
init_params = params[param_alias.cuml_init]
dummy = CumlDummy(**init_params)
if runtime_check:
assert pd.rank == context.partitionId()
assert len(pd.parts_rank_size) == partition_num
assert pd.m == m
assert pd.n == n
assert init_params == {"a": 100, "k": 4, "x": 40.0}
assert dummy.a == 100
assert dummy.b == 20
assert dummy.k == 4
assert dummy.x == 40.0
import time
# sleep for 1 sec to bypass https://issues.apache.org/jira/browse/SPARK-40932
time.sleep(1)
return {
"dtype": np.dtype(np.float32).name,
"n_cols": n,
"model_attribute_a": [1024],
"model_attribute_b": "hello dummy",
}
return _cuml_fit
def _out_schema(self) -> Union[StructType, str]:
return (
"dtype string, n_cols int, model_attribute_a int, model_attribute_b string"
)
def _create_pyspark_model(self, result: Row) -> "SparkRapidsMLDummyModel":
assert result.dtype == np.dtype(np.float32).name
assert result.n_cols == self.n
assert result.model_attribute_a == 1024
assert result.model_attribute_b == "hello dummy"
return SparkRapidsMLDummyModel.from_row(result)
class SparkRapidsMLDummyModel(
SparkRapidsMLDummyClass,
_CumlModel,
_SparkRapidsMLDummyParams,
HasInputCols,
HasOutputCols,
):
"""
PySpark model of CumlDummy
"""
def __init__(
self,
dtype: str,
n_cols: int,
model_attribute_a: int,
model_attribute_b: str,
not_used: int = 1,
**kwargs: Any,
) -> None:
super().__init__(
dtype=dtype,
n_cols=n_cols,
model_attribute_a=model_attribute_a,
model_attribute_b=model_attribute_b,
) # type: ignore
self.model_attribute_a = model_attribute_a
self.model_attribute_b = model_attribute_b
self.set_params(**kwargs)
def setInputCols(self, value: List[str]) -> "SparkRapidsMLDummyModel":
return self._set(inputCols=value)
def setOutputCols(self, value: List[str]) -> "SparkRapidsMLDummyModel":
return self._set(outputCols=value)
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
model_attribute_a = self.model_attribute_a
# if the common framework tries to pickle the whole class,
# it will throw exception since dataset is not picklable.
self.test_pickle_dataframe = dataset
output_cols = self.getInputCols()
def _construct_dummy() -> CumlT:
dummy = CumlDummy(a=101, b=102, k=103)
return dummy
def _dummy_transform(
dummy: CumlT, df: Union[pd.DataFrame, np.ndarray]
) -> pd.DataFrame:
assert dummy.a == 101
assert dummy.b == 102
assert dummy.k == 103
assert model_attribute_a == 1024
if isinstance(df, pd.DataFrame):
col_mapper = dict(zip(df.columns, output_cols))
return df.rename(columns=col_mapper)
else:
# TODO: implement when adding single column test
raise NotImplementedError()
return _construct_dummy, _dummy_transform, None
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
return input_schema
def test_default_cuml_params() -> None:
cuml_params = get_default_cuml_parameters([CumlDummy], ["b"])
spark_params = SparkRapidsMLDummy()._get_cuml_params_default()
assert cuml_params == spark_params
def test_dummy_params(gpu_number: int, tmp_path: str) -> None:
# Default constructor
default_spark_params = {
"alpha": 1.0, # a
# "beta": 2, # should raise exception if defined
"gamma": "three", # should be ignored
"k": 4, # k
}
default_cuml_params = {
"a": 1.0, # default value for Spark 'alpha'
# "b": 20 # should be dropped
"k": 4, # default value for Spark 'k'
"x": 40.0, # default value for cuML
}
default_dummy = SparkRapidsMLDummy()
assert_params(default_dummy, default_spark_params, default_cuml_params)
# Spark constructor (with ignored param "gamma")
spark_params = {"alpha": 2.0, "gamma": "test", "k": 1}
spark_dummy = SparkRapidsMLDummy(
m=0, n=0, partition_num=0, runtime_check=True, **spark_params
)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update({"a": 2.0, "k": 1})
assert_params(spark_dummy, expected_spark_params, expected_cuml_params)
# cuML constructor
cuml_params = {"a": 1.1, "k": 2, "x": 3.3}
cuml_dummy = SparkRapidsMLDummy(
m=0, n=0, partition_num=0, runtime_check=True, **cuml_params
)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(
{
"alpha": 1.1,
"k": 2,
}
)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(cuml_params)
assert_params(cuml_dummy, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/dummy_tests"
estimator_path = f"{path}/dummy_estimator"
cuml_dummy.write().overwrite().save(estimator_path)
loaded_dummy = SparkRapidsMLDummy.load(estimator_path)
assert_params(loaded_dummy, expected_spark_params, expected_cuml_params)
# Spark constructor (with error param "beta")
spark_params = {"alpha": 2.0, "beta": 0, "k": 1}
with pytest.raises(ValueError, match="Spark Param 'beta' is not supported by cuML"):
spark_dummy = SparkRapidsMLDummy(
m=0, n=0, partition_num=0, runtime_check=True, **spark_params
)
# cuML constructor (with unsupported param "b")
cuml_params = {"a": 1.1, "b": 0, "k": 2, "x": 3.3}
with pytest.raises(ValueError, match="Unsupported param 'b'"):
cuml_dummy = SparkRapidsMLDummy(
m=0, n=0, partition_num=0, runtime_check=True, **cuml_params
)
# test the parameter copy
dummy = SparkRapidsMLDummy()
dummy2 = dummy.copy({dummy.alpha: 1111})
assert dummy.getOrDefault(dummy.alpha) == 1
assert dummy.cuml_params["a"] == 1
assert dummy2.getOrDefault(dummy.alpha) == 1111
assert dummy2.cuml_params["a"] == 1111
def test_dummy(gpu_number: int, tmp_path: str) -> None:
data = [
[1.0, 4.0, 4.0, 4.0],
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 2.0],
[3.0, 3.0, 3.0, 2.0],
[5.0, 2.0, 1.0, 3.0],
]
m = len(data)
n = len(data[0])
input_cols = ["c1", "c2", "c3", "c4"]
max_records_per_batch = 1
def assert_estimator(dummy: SparkRapidsMLDummy) -> None:
assert dummy.getInputCols() == input_cols
assert dummy.cuml_params == {"a": 100, "k": 4, "x": 40.0}
assert dummy.num_workers == gpu_number
def ceiling_division(n: int, d: int) -> int:
return -(n // -d)
# Generate estimator
dummy = SparkRapidsMLDummy(
inputCols=input_cols,
a=100,
num_workers=gpu_number,
partition_num=ceiling_division(m, max_records_per_batch),
m=m,
n=n,
)
assert_estimator(dummy)
# Estimator persistence
path = tmp_path + "/dummy_tests"
estimator_path = f"{path}/dummy_estimator"
dummy.write().overwrite().save(estimator_path)
dummy_loaded = SparkRapidsMLDummy.load(estimator_path)
assert_estimator(dummy_loaded)
def assert_model(model: SparkRapidsMLDummyModel) -> None:
assert model.model_attribute_a == 1024
assert model.model_attribute_b == "hello dummy"
assert model.cuml_params == {"a": 100, "k": 4, "x": 40.0}
assert model.num_workers == gpu_number
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_records_per_batch)}
from .sparksession import CleanSparkSession
# Estimator fit and get a model
with CleanSparkSession(conf) as spark:
df = spark.sparkContext.parallelize(data).toDF(input_cols)
model: SparkRapidsMLDummyModel = dummy.fit(df)
assert_model(model)
# Model persistence
model_path = f"{path}/dummy_model"
model.write().overwrite().save(model_path)
model_loaded = SparkRapidsMLDummyModel.load(model_path)
assert_model(model_loaded)
dummy2 = dummy.copy()
assert dummy2.cuml_params["a"] == 100
with pytest.raises(
Exception,
match="assert {'a': 9876.0, 'k': 4, 'x': 40.0} == {'a': 100, 'k': 4, 'x': 40.0}",
):
dummy2.fit(df, {dummy2.alpha: 9876.0})
assert dummy2.cuml_params["a"] == 100
assert dummy2.getOrDefault(dummy2.alpha) == 100
dummy3 = SparkRapidsMLDummy(
inputCols=input_cols,
a=100,
num_workers=gpu_number,
partition_num=ceiling_division(m, max_records_per_batch),
m=m,
n=n,
runtime_check=False, # don't assert on the runtime.
)
model3 = dummy3.fit(df, {dummy3.alpha: 9876.0})
assert dummy3.cuml_params["a"] == 100
assert dummy3.getOrDefault(dummy3.alpha) == 100
assert model3.cuml_params["a"] == 9876.0
assert model3.getOrDefault(model3.alpha) == 9876.0
# Transform the training dataset with a clean spark
with CleanSparkSession() as clean_spark:
test_df = clean_spark.sparkContext.parallelize(data, m).toDF(input_cols)
transformed_df = model.transform(test_df)
ret = transformed_df.collect()
assert len(ret) == m
# Compare data
for x, y in zip(ret, data):
for i in range(n):
assert x[i] == y[i]
def test_num_workers_validation() -> None:
from .sparksession import CleanSparkSession
with CleanSparkSession() as spark:
data = [
[1.0, 4.0, 4.0, 4.0],
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 2.0],
[3.0, 3.0, 3.0, 2.0],
[5.0, 2.0, 1.0, 3.0],
]
m = len(data)
n = len(data[0])
input_cols = ["c1", "c2", "c3", "c4"]
df = spark.sparkContext.parallelize(data).toDF(input_cols)
dummy = SparkRapidsMLDummy(
inputCols=input_cols,
a=100,
num_workers=55,
partition_num=1,
m=m,
n=n,
)
with pytest.raises(
ValueError,
match="The num_workers \(55\) should be less than or equal to spark default parallelism",
):
dummy.fit(df)
| spark-rapids-ml-branch-23.10 | python/tests/test_common_estimator.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import math
from typing import Any, Dict, List, Tuple, Type, TypeVar, Union, cast
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from cuml import accuracy_score
from pyspark.ml.classification import (
RandomForestClassificationModel as SparkRFClassificationModel,
)
from pyspark.ml.classification import RandomForestClassifier as SparkRFClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.linalg import Vectors
from pyspark.ml.param import Param
from pyspark.ml.regression import RandomForestRegressionModel as SparkRFRegressionModel
from pyspark.ml.regression import RandomForestRegressor as SparkRFRegressor
from pyspark.ml.tuning import CrossValidator as SparkCrossValidator
from pyspark.ml.tuning import CrossValidatorModel, ParamGridBuilder
from pyspark.sql.types import DoubleType
from sklearn.metrics import r2_score
from spark_rapids_ml.classification import (
RandomForestClassificationModel,
RandomForestClassifier,
)
from spark_rapids_ml.regression import (
RandomForestRegressionModel,
RandomForestRegressor,
)
from spark_rapids_ml.tuning import CrossValidator
from .sparksession import CleanSparkSession
from .utils import (
array_equal,
assert_params,
create_pyspark_dataframe,
cuml_supported_data_types,
feature_types,
get_default_cuml_parameters,
idfn,
make_classification_dataset,
make_regression_dataset,
pyspark_supported_feature_types,
)
RandomForest = TypeVar(
"RandomForest", Type[RandomForestClassifier], Type[RandomForestRegressor]
)
RandomForestEvaluator = TypeVar(
"RandomForestEvaluator",
Type[MulticlassClassificationEvaluator],
Type[RegressionEvaluator],
)
RandomForestModel = TypeVar(
"RandomForestModel",
Type[RandomForestClassificationModel],
Type[RandomForestRegressionModel],
)
RandomForestType = TypeVar(
"RandomForestType",
Type[SparkRFClassifier],
Type[SparkRFRegressor],
Type[RandomForestClassifier],
Type[RandomForestRegressor],
)
RandomForestModelType = TypeVar(
"RandomForestModelType",
Type[SparkRFClassificationModel],
Type[SparkRFRegressionModel],
Type[RandomForestClassificationModel],
Type[RandomForestRegressionModel],
)
@pytest.mark.parametrize("Estimator", [RandomForestClassifier, RandomForestRegressor])
def test_default_cuml_params(Estimator: RandomForest) -> None:
from cuml.ensemble.randomforest_common import BaseRandomForestModel
cuml_params = get_default_cuml_parameters(
[BaseRandomForestModel],
[
"handle",
"output_type",
"accuracy_metric",
"dtype",
"criterion",
"min_weight_fraction_leaf",
"max_leaf_nodes",
"min_impurity_split",
"oob_score",
"n_jobs",
"warm_start",
"class_weight",
],
)
spark_params = Estimator()._get_cuml_params_default()
assert cuml_params == spark_params
@pytest.mark.parametrize("RFEstimator", [RandomForestClassifier, RandomForestRegressor])
def test_random_forest_params(
tmp_path: str, RFEstimator: RandomForest, caplog: LogCaptureFixture
) -> None:
# Default params
default_spark_params = {
"maxBins": 32,
"maxDepth": 5,
"numTrees": 20,
"bootstrap": True,
"featureSubsetStrategy": "auto",
}
default_cuml_params = {
"n_bins": 32,
"n_estimators": 20,
"max_depth": 5,
"bootstrap": True,
"max_features": "auto",
}
est = RFEstimator()
assert_params(est, default_spark_params, default_cuml_params)
# Spark ML Params
spark_params: Dict[str, Any] = {
"maxBins": 17,
"maxDepth": 9,
"numTrees": 17,
"featureSubsetStrategy": "onethird",
}
est = RFEstimator(**spark_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(
{
"n_bins": 17,
"max_depth": 9,
"n_estimators": 17,
"max_features": 1 / 3.0,
}
)
assert_params(est, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/random_forest_classifier_tests"
estimator_path = f"{path}/random_forest_classifier_tests"
est.write().overwrite().save(estimator_path)
loaded_est = RandomForestClassifier.load(estimator_path)
assert_params(loaded_est, expected_spark_params, expected_cuml_params)
if RFEstimator == RandomForestRegressor:
est = RFEstimator(impurity="variance")
est.cuml_params["split_criterion"] == "mse"
# make sure no warning when enabling float64 inputs
rf_float32 = RFEstimator(float32_inputs=False)
assert "float32_inputs to False" not in caplog.text
assert not rf_float32._float32_inputs
rf_est_model_classes = [
# (estimator, model, n_classes)
(RandomForestClassifier, RandomForestClassificationModel, 2),
(RandomForestClassifier, RandomForestClassificationModel, 4),
(RandomForestRegressor, RandomForestRegressionModel, -1),
]
@pytest.mark.parametrize("est_model_classes", rf_est_model_classes, ids=idfn)
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("data_shape", [(100, 8)], ids=idfn)
def test_random_forest_basic(
tmp_path: str,
est_model_classes: Tuple[RandomForest, RandomForestModel, int],
feature_type: str,
data_type: np.dtype,
data_shape: Tuple[int, int],
) -> None:
RFEstimator, RFEstimatorModel, n_classes = est_model_classes
# Train a toy model
if RFEstimator == RandomForestClassifier:
X, _, y, _ = make_classification_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
n_classes=n_classes,
n_informative=8,
n_redundant=0,
n_repeated=0,
)
else:
X, _, y, _ = make_regression_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
)
with CleanSparkSession() as spark:
df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X, y
)
est = RFEstimator()
est.setFeaturesCol(features_col)
assert est.getFeaturesCol() == features_col
assert label_col is not None
est.setLabelCol(label_col)
assert est.getLabelCol() == label_col
def assert_model(lhs: RandomForestModel, rhs: RandomForestModel) -> None:
assert lhs.cuml_params == rhs.cuml_params
# Vector and array(double) type will be cast to array(float) by default
assert lhs.dtype == np.dtype(np.float32).name
assert lhs.dtype == rhs.dtype
assert lhs.n_cols == rhs.n_cols
assert lhs.n_cols == data_shape[1]
if isinstance(lhs, RandomForestClassificationModel):
assert lhs.numClasses == rhs.numClasses
assert lhs.numClasses == n_classes
# train a model
model = est.fit(df)
assert (
model.transform(df).schema[model.getPredictionCol()].dataType
== DoubleType()
)
# model persistence
path = tmp_path + "/random_forest_tests"
model_path = f"{path}/random_forest_tests"
model.write().overwrite().save(model_path)
model_loaded = RFEstimatorModel.load(model_path)
assert_model(model, model_loaded)
@pytest.mark.parametrize("data_type", ["byte", "short", "int", "long"])
@pytest.mark.parametrize("RFEstimator", [RandomForestClassifier, RandomForestRegressor])
def test_random_forest_numeric_type(
gpu_number: int, RFEstimator: RandomForest, data_type: str
) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
[1, 4, 4, 4, 0],
[2, 2, 2, 2, 1],
[3, 3, 3, 2, 2],
[3, 3, 3, 2, 3],
[5, 2, 1, 3, 4],
]
with CleanSparkSession() as spark:
feature_cols = ["c1", "c2", "c3", "c4"]
schema = (
", ".join([f"{c} {data_type}" for c in feature_cols])
+ f", label {data_type}"
)
df = spark.createDataFrame(data, schema=schema)
lr = RFEstimator(num_workers=gpu_number)
lr.setFeaturesCol(feature_cols)
lr.fit(df)
from .conftest import _gpu_number
if _gpu_number > 1:
num_workers = [1, _gpu_number]
else:
num_workers = [1]
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_shape", [(2000, 8)], ids=idfn)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.parametrize("n_classes", [2, 4])
@pytest.mark.parametrize("num_workers", num_workers)
@pytest.mark.slow
def test_random_forest_classifier(
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
n_classes: int,
num_workers: int,
) -> None:
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype=cast(np.dtype, np.float32),
nrows=data_shape[0],
ncols=data_shape[1],
n_classes=n_classes,
n_informative=8,
n_redundant=0,
n_repeated=0,
)
rf_params: Dict[str, Any] = {
"n_estimators": 20,
"n_bins": 64,
"max_depth": 6,
"bootstrap": False,
"max_features": 1.0,
}
from cuml import RandomForestClassifier as cuRf
cu_rf = cuRf(n_streams=1, **rf_params)
cu_rf.fit(X_train, y_train)
cu_preds = cu_rf.predict(X_test)
cu_preds_proba = cu_rf.predict_proba(X_test)
cu_acc = accuracy_score(y_test, cu_preds)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
spark_rf = RandomForestClassifier(
num_workers=num_workers,
**rf_params,
)
spark_rf.setFeaturesCol(features_col)
spark_rf.setLabelCol(label_col)
spark_rf_model: RandomForestClassificationModel = spark_rf.fit(train_df)
test_df, _, _ = create_pyspark_dataframe(
spark, feature_type, data_type, X_test, y_test
)
result = spark_rf_model.transform(test_df).collect()
pred_result = [row.prediction for row in result]
# no need to compare all feature types.
if feature_type == feature_types.vector:
spark_cpu_result = spark_rf_model.cpu().transform(test_df).collect()
spark_cpu_pred_result = [row.prediction for row in spark_cpu_result]
# Since the precision issue, we can ensure all the predictions are same.
commons = np.count_nonzero(
np.array(spark_cpu_pred_result) == np.array(pred_result)
)
assert commons / len(spark_cpu_pred_result) >= 0.99
spark_acc = accuracy_score(y_test, np.array(pred_result))
# Since vector type will force to convert to array<double>
# which may cause precision issue for random forest.
if num_workers == 1 and not (
data_type == np.float32 and feature_type == feature_types.vector
):
assert cu_acc == spark_acc
pred_proba_result = [row.probability for row in result]
np.testing.assert_allclose(pred_proba_result, cu_preds_proba, rtol=1e-3)
else:
assert cu_acc - spark_acc < 0.07
# for multi-class classification evaluation
if n_classes > 2:
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator(
predictionCol=spark_rf_model.getPredictionCol(),
labelCol=spark_rf_model.getLabelCol(),
)
spark_cuml_f1_score = spark_rf_model._transformEvaluate(test_df, evaluator)
transformed_df = spark_rf_model.transform(test_df)
pyspark_f1_score = evaluator.evaluate(transformed_df)
assert math.fabs(pyspark_f1_score - spark_cuml_f1_score[0]) < 1e-6
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_shape", [(2000, 8)], ids=idfn)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.parametrize("num_workers", num_workers)
@pytest.mark.slow
def test_random_forest_regressor(
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
num_workers: int,
) -> None:
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype=cast(np.dtype, np.float32),
nrows=data_shape[0],
ncols=data_shape[1],
)
rf_params: Dict[str, Any] = {
"n_estimators": 20,
"n_bins": 64,
"max_depth": 6,
"bootstrap": False,
"max_features": 1.0,
"random_state": 1.0,
}
from cuml import RandomForestRegressor as cuRf
cu_rf = cuRf(n_streams=1, **rf_params)
cu_rf.fit(X_train, y_train)
cu_preds = cu_rf.predict(X_test)
cu_acc = r2_score(y_test, cu_preds)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
spark_rf = RandomForestRegressor(
num_workers=num_workers,
**rf_params,
)
spark_rf.setFeaturesCol(features_col)
spark_rf.setLabelCol(label_col)
spark_rf_model = spark_rf.fit(train_df)
test_df, _, _ = create_pyspark_dataframe(spark, feature_type, data_type, X_test)
result = spark_rf_model.transform(test_df).collect()
pred_result = [row.prediction for row in result]
if feature_type == feature_types.vector:
# no need to compare all feature type.
spark_cpu_result = spark_rf_model.cpu().transform(test_df).collect()
spark_cpu_pred_result = [row.prediction for row in spark_cpu_result]
assert array_equal(spark_cpu_pred_result, pred_result)
spark_acc = r2_score(y_test, np.array(pred_result))
# Since vector type will force to convert to array<double>
# which may cause precision issue for random forest.
if num_workers == 1 and not (
data_type == np.float32 and feature_type == feature_types.vector
):
assert pytest.approx(cu_acc) == spark_acc
else:
assert cu_acc - spark_acc < 0.09
@pytest.mark.parametrize("rf_type", [RandomForestClassifier, RandomForestRegressor])
@pytest.mark.parametrize(
"feature_subset", ["auto", "all", "0.85", "2", "onethird", "log2", "sqrt", "foo"]
)
def test_random_forest_featuresubset(
rf_type: RandomForestType,
feature_subset: str,
) -> None:
with CleanSparkSession() as spark:
df = spark.createDataFrame(
[
(1.0, Vectors.dense(1.0, 0.0)),
(1.0, Vectors.dense(0.8, 1.0)),
(0.0, Vectors.dense(0.2, 0.8)),
(0.0, Vectors.sparse(2, [1], [1.0])),
(1.0, Vectors.dense(1.0, 0.0)),
(1.0, Vectors.dense(0.8, 1.0)),
(0.0, Vectors.dense(0.2, 0.8)),
(0.0, Vectors.sparse(2, [1], [1.0])),
],
["label", "features"],
)
if feature_subset != "foo":
rf = rf_type(
numTrees=3,
maxDepth=2,
labelCol="label",
seed=42,
featureSubsetStrategy=feature_subset,
)
m = rf.fit(df)
else:
with pytest.raises(ValueError):
rf = rf_type(
numTrees=3,
maxDepth=2,
labelCol="label",
seed=42,
featureSubsetStrategy=feature_subset,
)
@pytest.mark.compat
@pytest.mark.parametrize(
"rf_types",
[
(SparkRFClassifier, SparkRFClassificationModel),
(RandomForestClassifier, RandomForestClassificationModel),
],
)
@pytest.mark.parametrize("impurity", ["gini", "entropy"])
def test_random_forest_classifier_spark_compat(
rf_types: Tuple[RandomForestType, RandomForestModelType],
gpu_number: int,
tmp_path: str,
impurity: str,
) -> None:
# based on https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.classification.RandomForestClassifier.html
# cuML does not support single feature, so using expanded dataset
_RandomForestClassifier, _RandomForestClassificationModel = rf_types
with CleanSparkSession() as spark:
df = spark.createDataFrame(
[
(1.0, Vectors.dense(1.0, 0.0)),
(1.0, Vectors.dense(0.8, 1.0)),
(0.0, Vectors.dense(0.2, 0.8)),
(0.0, Vectors.sparse(2, [1], [1.0])),
(1.0, Vectors.dense(1.0, 0.0)),
(1.0, Vectors.dense(0.8, 1.0)),
(0.0, Vectors.dense(0.2, 0.8)),
(0.0, Vectors.sparse(2, [1], [1.0])),
],
["label", "features"],
)
rf = _RandomForestClassifier(
numTrees=3, maxDepth=2, labelCol="label", seed=42, impurity=impurity
)
rf.setLeafCol("leafId")
assert rf.getLeafCol() == "leafId"
if isinstance(rf, RandomForestClassifier):
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
rf.num_workers = gpu_number
df = df.repartition(gpu_number)
assert rf.getMinWeightFractionPerNode() == 0.0
assert rf.getNumTrees() == 3
assert rf.getMaxDepth() == 2
assert rf.getSeed() == 42
assert rf.getFeaturesCol() == "features"
assert rf.getLabelCol() == "label"
model = rf.fit(df)
assert model.getFeaturesCol() == "features"
assert model.getLabelCol() == "label"
assert model.getBootstrap()
model.setRawPredictionCol("newRawPrediction")
assert model.getRawPredictionCol() == "newRawPrediction"
featureImportances = model.featureImportances
assert np.allclose(model.treeWeights, [1.0, 1.0, 1.0])
if isinstance(rf, SparkRFClassifier):
assert featureImportances == Vectors.sparse(2, {0: 1.0})
else:
# TODO: investigate difference
assert featureImportances == Vectors.sparse(2, {})
test0 = spark.createDataFrame([(Vectors.dense(-1.0, 0.0),)], ["features"])
example = test0.head()
if example:
model.predict(example.features)
model.predictRaw(example.features)
model.predictProbability(example.features)
result = model.transform(test0).head()
if result:
if isinstance(model, SparkRFClassificationModel):
assert result.prediction == 0.0
assert np.argmax(result.probability) == 0
else:
# TODO: investigate difference
assert result.prediction == 1.0
assert np.argmax(result.probability) == 1
if isinstance(model, SparkRFClassificationModel):
assert np.argmax(result.newRawPrediction) == 0
assert result.leafId == Vectors.dense([0.0, 0.0, 0.0])
else:
with pytest.raises((NotImplementedError, AttributeError)):
assert np.argmax(result.newRawPrediction) == 0
with pytest.raises((NotImplementedError, AttributeError)):
assert result.leafId == Vectors.dense([0.0, 0.0, 0.0])
test1 = spark.createDataFrame([(Vectors.sparse(2, [0], [1.0]),)], ["features"])
example = test1.head()
if example:
assert model.transform(test1).head().prediction == 1.0
trees = model.trees
assert len(trees) == 3
rfc_path = tmp_path + "/rfc"
rf.save(rfc_path)
rf2 = _RandomForestClassifier.load(rfc_path)
assert rf2.getNumTrees() == 3
model_path = tmp_path + "/rfc_model"
model.save(model_path)
model2 = _RandomForestClassificationModel.load(model_path)
assert model.transform(test0).take(1) == model2.transform(test0).take(1)
assert model.featureImportances == model2.featureImportances
@pytest.mark.compat
@pytest.mark.parametrize(
"rf_types",
[
(SparkRFRegressor, SparkRFRegressionModel),
(RandomForestRegressor, RandomForestRegressionModel),
],
)
def test_random_forest_regressor_spark_compat(
rf_types: Tuple[RandomForestType, RandomForestModelType],
gpu_number: int,
tmp_path: str,
) -> None:
# based on https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.regression.RandomForestRegressor.html
# cuML does not support single feature, so using expanded dataset
_RandomForestRegressor, _RandomForestRegressionModel = rf_types
with CleanSparkSession() as spark:
df = spark.createDataFrame(
[(1.0, Vectors.dense(1.0, 1.0)), (0.0, Vectors.sparse(2, [], []))],
["label", "features"],
)
rf = _RandomForestRegressor(numTrees=2, maxDepth=2)
rf.setSeed(42)
assert rf.getMaxDepth() == 2
assert rf.getMinWeightFractionPerNode() == 0.0
assert rf.getNumTrees() == 2
assert rf.getSeed() == 42
if isinstance(rf, RandomForestRegressor):
# force single GPU worker while testing compat
rf.num_workers = 1
model = rf.fit(df)
model.setLeafCol("leafId")
assert np.allclose(model.treeWeights, [1.0, 1.0])
if isinstance(model, SparkRFRegressionModel):
assert model.featureImportances == Vectors.sparse(2, {1: 1.0})
else:
# need to investigate
assert model.featureImportances == Vectors.sparse(2, {})
assert model.getBootstrap()
assert model.getSeed() == 42
assert model.getLeafCol() == "leafId"
test0 = spark.createDataFrame([(Vectors.dense(-1.0, -1.0),)], ["features"])
example = test0.head()
if example:
assert model.predict(example.features) == 0.0
assert model.predictLeaf(example.features) == Vectors.dense([0.0, 0.0])
result = model.transform(test0).head()
assert result.prediction == 0.0
assert len(model.trees) == 2
if isinstance(model, SparkRFRegressionModel):
assert result.leafId == Vectors.dense([0.0, 0.0])
else:
with pytest.raises((NotImplementedError, AttributeError)):
result.leafId
assert model.numFeatures == 2
assert model.getNumTrees == 2 # implemented as a property
test1 = spark.createDataFrame([(Vectors.sparse(2, [0], [1.0]),)], ["features"])
result = model.transform(test1).head()
if result:
assert result.prediction == 0.0
rfr_path = tmp_path + "/rfr"
rf.save(rfr_path)
rf2 = _RandomForestRegressor.load(rfr_path)
assert rf2.getNumTrees() == 2 # implemented as a method
model_path = tmp_path + "/rfr_model"
model.save(model_path)
model2 = _RandomForestRegressionModel.load(model_path)
assert model.featureImportances == model2.featureImportances
assert model.transform(test0).take(1) == model2.transform(test0).take(1)
@pytest.mark.parametrize("RFEstimator", [RandomForestClassifier, RandomForestRegressor])
@pytest.mark.parametrize("feature_type", [feature_types.vector])
@pytest.mark.parametrize("data_type", [np.float32])
def test_fit_multiple_in_single_pass(
RFEstimator: RandomForest,
feature_type: str,
data_type: np.dtype,
) -> None:
X_train, _, y_train, _ = make_classification_dataset(
datatype=data_type,
nrows=100,
ncols=5,
)
with CleanSparkSession() as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
rf = RFEstimator(bootstrap=False, max_features=1.0, random_state=1.0)
rf.setFeaturesCol(features_col)
rf.setLabelCol(label_col)
initial_rf = rf.copy()
param_maps: List[Dict[Param, Any]] = [
# all supported pyspark parameters
{
rf.maxDepth: 3,
rf.maxBins: 3,
rf.numTrees: 5,
rf.featureSubsetStrategy: "onethird",
rf.impurity: "entropy"
if isinstance(rf, RandomForestClassifier)
else "variance",
rf.minInstancesPerNode: 2,
},
# different values for all supported pyspark parameters
{
rf.maxDepth: 4,
rf.maxBins: 4,
rf.numTrees: 6,
rf.featureSubsetStrategy: "sqrt",
rf.impurity: "gini"
if isinstance(rf, RandomForestClassifier)
else "variance",
rf.minInstancesPerNode: 3,
},
# part of all supported pyspark parameters.
{rf.maxDepth: 5, rf.maxBins: 5, rf.featureSubsetStrategy: "log2"},
{rf.maxDepth: 6, rf.maxBins: 6, rf.numTrees: 8},
]
models = rf.fit(train_df, param_maps)
def get_num_trees(
model: Union[RandomForestClassificationModel, RandomForestRegressionModel]
) -> int:
model_jsons = cast(List[str], model._model_json)
trees = [
None for trees_json in model_jsons for trees in json.loads(trees_json)
]
return len(trees)
for i, param_map in enumerate(param_maps):
rf = initial_rf.copy()
single_model = rf.fit(train_df, param_map)
assert single_model._treelite_model == models[i]._treelite_model
assert models[i].getMaxDepth() == param_map[rf.maxDepth]
assert models[i].getMaxBins() == param_map[rf.maxBins]
assert (
models[i].getFeatureSubsetStrategy()
== param_map[rf.featureSubsetStrategy]
if rf.featureSubsetStrategy in param_map
else single_model.getFeatureSubsetStrategy()
)
assert (
models[i].getImpurity() == param_map[rf.impurity]
if rf.impurity in param_map
else single_model.getImpurity()
)
assert (
models[i].getMinInstancesPerNode() == param_map[rf.minInstancesPerNode]
if rf.minInstancesPerNode in param_map
else single_model.getMinInstancesPerNode()
)
assert (
get_num_trees(models[i]) == param_map[rf.numTrees]
if rf.numTrees in param_map
else single_model.getNumTrees
)
@pytest.mark.parametrize(
"estimator_evaluator",
[
(RandomForestClassifier, MulticlassClassificationEvaluator),
(RandomForestRegressor, RegressionEvaluator),
],
)
@pytest.mark.parametrize("feature_type", [feature_types.vector])
@pytest.mark.parametrize("data_type", [np.float32])
@pytest.mark.parametrize("data_shape", [(100, 8)], ids=idfn)
def test_crossvalidator_random_forest(
estimator_evaluator: Tuple[RandomForest, RandomForestEvaluator],
feature_type: str,
data_type: np.dtype,
data_shape: Tuple[int, int],
) -> None:
RF, Evaluator = estimator_evaluator
# Train a toy model
if RF == RandomForestClassifier:
X, _, y, _ = make_classification_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
n_classes=4,
n_informative=data_shape[1],
n_redundant=0,
n_repeated=0,
)
else:
X, _, y, _ = make_regression_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
)
with CleanSparkSession() as spark:
df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X, y
)
assert label_col is not None
rfc = RF()
rfc.setFeaturesCol(features_col)
rfc.setLabelCol(label_col)
evaluator = Evaluator()
evaluator.setLabelCol(label_col)
grid = (
ParamGridBuilder()
.addGrid(rfc.maxDepth, [2, 4])
.addGrid(rfc.maxBins, [3, 5])
.build()
)
cv = CrossValidator(
estimator=rfc,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=2,
seed=1,
)
# without exception
model: CrossValidatorModel = cv.fit(df)
spark_cv = SparkCrossValidator(
estimator=rfc,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=2,
seed=1,
)
spark_cv_model = spark_cv.fit(df)
assert array_equal(model.avgMetrics, spark_cv_model.avgMetrics)
| spark-rapids-ml-branch-23.10 | python/tests/test_random_forest.py |
from typing import List, Tuple
import numpy as np
import pandas as pd
import pytest
from _pytest.logging import LogCaptureFixture
from pyspark.sql import DataFrame
from sklearn.datasets import make_blobs
from spark_rapids_ml.core import alias
from spark_rapids_ml.knn import NearestNeighbors
from .sparksession import CleanSparkSession
from .utils import (
array_equal,
create_pyspark_dataframe,
get_default_cuml_parameters,
idfn,
pyspark_supported_feature_types,
)
def test_default_cuml_params(caplog: LogCaptureFixture) -> None:
from cuml import NearestNeighbors as CumlNearestNeighbors
from cuml.neighbors.nearest_neighbors_mg import (
NearestNeighborsMG, # to include the batch_size parameter that exists in the MG class
)
cuml_params = get_default_cuml_parameters(
[CumlNearestNeighbors, NearestNeighborsMG],
[
"handle",
"algorithm",
"metric",
"p",
"algo_params",
"metric_expanded",
"metric_params",
"output_type",
],
)
spark_params = NearestNeighbors()._get_cuml_params_default()
assert cuml_params == spark_params
# float32_inputs warn, NearestNeighbors only accepts float32
nn_float32 = NearestNeighbors(float32_inputs=False)
assert "float32_inputs to False" in caplog.text
assert nn_float32._float32_inputs
def test_example(gpu_number: int, tmp_path: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
([1.0, 1.0], "a"),
([2.0, 2.0], "b"),
([3.0, 3.0], "c"),
([4.0, 4.0], "d"),
([5.0, 5.0], "e"),
([6.0, 6.0], "f"),
([7.0, 7.0], "g"),
([8.0, 8.0], "h"),
]
query = [
([0.0, 0.0], "qa"),
([1.0, 1.0], "qb"),
([4.1, 4.1], "qc"),
([8.0, 8.0], "qd"),
([9.0, 9.0], "qe"),
]
topk = 2
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(2)}
with CleanSparkSession(conf) as spark:
schema = f"features array<float>, metadata string"
data_df = spark.createDataFrame(data, schema)
query_df = spark.createDataFrame(query, schema)
gpu_knn = NearestNeighbors(num_workers=gpu_number)
gpu_knn = gpu_knn.setInputCol("features")
gpu_knn = gpu_knn.setK(topk)
with pytest.raises(NotImplementedError):
gpu_knn.save(tmp_path + "/knn_esimator")
gpu_model = gpu_knn.fit(data_df)
with pytest.raises(NotImplementedError):
gpu_model.save(tmp_path + "/knn_model")
(item_df_withid, query_df_withid, knn_df) = gpu_model.kneighbors(query_df)
item_df_withid.show()
query_df_withid.show()
knn_df.show()
# check knn results
import math
distances_df = knn_df.select("distances")
indices_df = knn_df.select("indices")
distance_rows = distances_df.collect()
distances = [r.distances for r in distance_rows]
index_rows = indices_df.collect()
indices = [r.indices for r in index_rows]
def assert_distances_equal(distances: List[List[float]]) -> None:
assert len(distances) == len(query)
assert array_equal(distances[0], [math.sqrt(2.0), math.sqrt(8.0)])
assert array_equal(distances[1], [0.0, math.sqrt(2.0)])
assert array_equal(
distances[2], [math.sqrt(0.01 + 0.01), math.sqrt(0.81 + 0.81)]
)
assert array_equal(distances[3], [0.0, math.sqrt(2.0)])
assert array_equal(distances[4], [math.sqrt(2.0), math.sqrt(8.0)])
item_ids = list(
item_df_withid.select(alias.row_number).toPandas()[alias.row_number]
)
def assert_indices_equal(indices: List[List[int]]) -> None:
assert len(indices) == len(query)
assert indices[0] == [item_ids[0], item_ids[1]]
assert indices[1] == [item_ids[0], item_ids[1]]
assert indices[2] == [item_ids[3], item_ids[4]]
assert indices[3] == [item_ids[7], item_ids[6]]
assert indices[4] == [item_ids[7], item_ids[6]]
assert_distances_equal(distances=distances)
assert_indices_equal(indices=indices)
# test transform: throw an error if transform function is called
with pytest.raises(NotImplementedError):
gpu_model.transform(query_df)
# test exactNearestNeighborsJoin
knnjoin_df = gpu_model.exactNearestNeighborsJoin(query_df, distCol="distCol")
knnjoin_df.show()
assert len(knnjoin_df.dtypes) == 3
assert knnjoin_df.dtypes[0] == (
"item_df",
"struct<features:array<float>,metadata:string>",
)
assert knnjoin_df.dtypes[1] == (
"query_df",
"struct<features:array<float>,metadata:string>",
)
assert knnjoin_df.dtypes[2] == ("distCol", "float")
def assert_knn_metadata_equal(knn_metadata: List[List[str]]) -> None:
"""
This is equivalent to assert_indices_equal but replaces indices with item_metadata.
"""
assert len(knn_metadata) == len(query)
assert knn_metadata[0] == ["a", "b"]
assert knn_metadata[1] == ["a", "b"]
assert knn_metadata[2] == ["d", "e"]
assert knn_metadata[3] == ["h", "g"]
assert knn_metadata[4] == ["h", "g"]
reconstructed_knn_df = reconstruct_knn_df(
knnjoin_df=knnjoin_df, row_identifier_col="metadata", distCol="distCol"
)
reconstructed_rows = reconstructed_knn_df.collect()
reconstructed_knn_metadata = [r.indices for r in reconstructed_rows]
assert_knn_metadata_equal(reconstructed_knn_metadata)
reconstructed_distances = [r.distances for r in reconstructed_rows]
assert_distances_equal(reconstructed_distances)
reconstructed_query_ids = [r.query_id for r in reconstructed_rows]
assert reconstructed_query_ids == ["qa", "qb", "qc", "qd", "qe"]
knnjoin_items = (
knnjoin_df.select(
knnjoin_df["item_df.features"].alias("features"),
knnjoin_df["item_df.metadata"].alias("metadata"),
)
.distinct()
.sort("metadata")
.collect()
)
assert len(knnjoin_items) == 6
assert knnjoin_items[0]["features"] == data[0][0]
assert knnjoin_items[0]["metadata"] == data[0][1]
assert knnjoin_items[1]["features"] == data[1][0]
assert knnjoin_items[1]["metadata"] == data[1][1]
assert knnjoin_items[2]["features"] == data[3][0]
assert knnjoin_items[2]["metadata"] == data[3][1]
assert knnjoin_items[3]["features"] == data[4][0]
assert knnjoin_items[3]["metadata"] == data[4][1]
assert knnjoin_items[4]["features"] == data[6][0]
assert knnjoin_items[4]["metadata"] == data[6][1]
assert knnjoin_items[5]["features"] == data[7][0]
assert knnjoin_items[5]["metadata"] == data[7][1]
knnjoin_queries = (
knnjoin_df.select(
knnjoin_df["query_df.features"].alias("features"),
knnjoin_df["query_df.metadata"].alias("metadata"),
)
.distinct()
.sort("metadata")
.collect()
)
assert len(knnjoin_queries) == len(query)
for i in range(len(knnjoin_queries)):
if i == 2:
assert array_equal(knnjoin_queries[i]["features"], query[i][0])
else:
assert knnjoin_queries[i]["features"] == query[i][0]
assert knnjoin_queries[i]["metadata"] == query[i][1]
def test_example_with_id(gpu_number: int) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
(101, [1.0, 1.0], "a"),
(102, [2.0, 2.0], "b"),
(103, [3.0, 3.0], "c"),
(104, [4.0, 4.0], "d"),
(105, [5.0, 5.0], "e"),
(106, [6.0, 6.0], "f"),
(107, [7.0, 7.0], "g"),
(108, [8.0, 8.0], "h"),
]
query = [
(201, [0.0, 0.0], "qa"),
(202, [1.0, 1.0], "qb"),
(203, [4.1, 4.1], "qc"),
(204, [8.0, 8.0], "qd"),
(205, [9.0, 9.0], "qe"),
]
topk = 2
with CleanSparkSession() as spark:
schema = f"id int, features array<float>, metadata string"
data_df = spark.createDataFrame(data, schema)
query_df = spark.createDataFrame(query, schema)
gpu_knn = NearestNeighbors(num_workers=gpu_number)
gpu_knn = gpu_knn.setInputCol("features")
gpu_knn = gpu_knn.setIdCol("id")
gpu_knn = gpu_knn.setK(topk)
gpu_model = gpu_knn.fit(data_df)
item_df_withid, query_df_withid, knn_df = gpu_model.kneighbors(query_df)
item_df_withid.show()
query_df_withid.show()
knn_df.show()
distances_df = knn_df.select("distances")
indices_df = knn_df.select("indices")
indices = indices_df.collect()
indices = [r[0] for r in indices]
def assert_indices_equal(indices: List[List[int]]) -> None:
assert indices[0] == [101, 102]
assert indices[1] == [101, 102]
assert indices[2] == [104, 105]
assert indices[3] == [108, 107]
assert indices[4] == [108, 107]
# test exactNearestNeighborsJoin
knnjoin_df = gpu_model.exactNearestNeighborsJoin(query_df, distCol="distCol")
knnjoin_df.show()
assert len(knnjoin_df.dtypes) == 3
assert knnjoin_df.dtypes[0] == (
"item_df",
f"struct<id:int,features:array<float>,metadata:string>",
)
assert knnjoin_df.dtypes[1] == (
"query_df",
"struct<id:int,features:array<float>,metadata:string>",
)
assert knnjoin_df.dtypes[2] == ("distCol", "float")
reconstructed_knn_df = reconstruct_knn_df(
knnjoin_df=knnjoin_df, row_identifier_col="id", distCol="distCol"
)
reconstructed_rows = reconstructed_knn_df.collect()
reconstructed_knn_indices = [r.indices for r in reconstructed_rows]
assert_indices_equal(reconstructed_knn_indices)
reconstructed_query_ids = [r.query_id for r in reconstructed_rows]
assert reconstructed_query_ids == [201, 202, 203, 204, 205]
@pytest.mark.parametrize(
"feature_type", pyspark_supported_feature_types
) # vector feature type will be converted to float32 to be compatible with cuml multi-gpu NearestNeighbors Class
@pytest.mark.parametrize("data_shape", [(1000, 50)], ids=idfn)
@pytest.mark.parametrize("data_type", [np.float32])
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.parametrize(
"batch_size", [100, 10000]
) # larger batch_size higher query throughput, yet more memory
@pytest.mark.slow
def test_nearest_neighbors(
gpu_number: int,
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
batch_size: int,
) -> None:
n_neighbors = 5
n_clusters = 10
batch_size = batch_size
X, _ = make_blobs(
n_samples=data_shape[0],
n_features=data_shape[1],
centers=n_clusters,
random_state=0,
) # make_blobs creates a random dataset of isotropic gaussian blobs.
# obtain cuml results
from cuml import NearestNeighbors as cuNN
cuml_knn = cuNN(n_neighbors=n_neighbors, output_type="numpy")
cuml_knn.fit(X)
cuml_distances, cuml_indices = cuml_knn.kneighbors(X)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
data_df, features_col, _ = create_pyspark_dataframe(
spark, feature_type, data_type, X, None
)
knn_est = NearestNeighbors(
n_neighbors=n_neighbors, batch_size=batch_size
).setInputCol(features_col)
# test kneighbors: obtain spark results
knn_model = knn_est.fit(data_df)
query_df = data_df
(item_df_withid, query_df_withid, knn_df) = knn_model.kneighbors(query_df)
distances_df = knn_df.select("distances")
indices_df = knn_df.select("indices")
# test kneighbors: compare spark results with cuml results
distances = distances_df.collect()
distances = [r[0] for r in distances]
indices = indices_df.collect()
indices = [r[0] for r in indices]
# test kneighbors: compare top-1 nn indices(self) and distances(self)
self_index = [knn[0] for knn in indices]
assert self_index == list(
item_df_withid.select(alias.row_number).toPandas()[alias.row_number]
)
# test kneighbors: compare distances
assert len(distances) == len(cuml_distances)
for i in range(len(distances)):
assert array_equal(distances[i], cuml_distances[i])
# test exactNearestNeighborsJoin
with pytest.raises(ValueError):
knn_model.exactNearestNeighborsJoin(query_df_withid)
knn_model.setIdCol(item_df_withid.dtypes[0][0])
knnjoin_df = knn_model.exactNearestNeighborsJoin(query_df_withid)
reconstructed_knn_df = reconstruct_knn_df(
knnjoin_df, row_identifier_col=knn_model.getIdCol()
)
assert reconstructed_knn_df.collect() == knn_df.collect()
def test_lsh_spark_compat(gpu_number: int) -> None:
from pyspark.ml.feature import BucketedRandomProjectionLSH
from pyspark.ml.linalg import Vectors
from pyspark.sql.functions import col
# reduce the number of GPUs for toy dataset to avoid empty partition.
# cuml backend requires k <= the number of rows in the smallest index partition.
gpu_number = min(gpu_number, 2)
topk = 2
with CleanSparkSession() as spark:
dataA = [
(0, Vectors.dense([1.0, 1.0])),
(1, Vectors.dense([1.0, -1.0])),
(2, Vectors.dense([-1.0, -1.0])),
(3, Vectors.dense([-1.0, 1.0])),
(4, Vectors.dense([100.0, 100.0])),
(5, Vectors.dense([100.0, -100.0])),
(6, Vectors.dense([-100.0, -100.0])),
(7, Vectors.dense([-100.0, 100.0])),
]
dfA = spark.createDataFrame(dataA, ["id", "features"])
dataB = [
(4, Vectors.dense([1.0, 0.0])),
(5, Vectors.dense([-1.0, 0.0])),
(6, Vectors.dense([0.0, 1.0])),
(7, Vectors.dense([0.0, -1.0])),
]
dfB = spark.createDataFrame(dataB, ["id", "features"])
dfA.show()
dfB.show()
# get CPU results
brp = BucketedRandomProjectionLSH(
inputCol="features", outputCol="hashes", bucketLength=5.0, numHashTables=3
)
model = brp.fit(dfA)
spark_res = model.approxSimilarityJoin(
dfA, dfB, 1.5, distCol="EuclideanDistance"
)
spark_res.show(truncate=False)
# get GPU results with exactNearestNeighborsJoin(dfA, dfB, k, distCol="EuclideanDistance")
gpu_knn = NearestNeighbors(num_workers=gpu_number, inputCol="features").setK(
topk
)
gpu_model = gpu_knn.fit(dfA)
gpu_res = gpu_model.exactNearestNeighborsJoin(
query_df=dfB, distCol="EuclideanDistance"
)
gpu_res.show(truncate=False)
# check results
def check_dtypes(res_df: DataFrame, from_spark: bool) -> None:
assert len(res_df.dtypes) == 3
assert res_df.dtypes[0][0] == ("datasetA" if from_spark else "item_df")
assert res_df.dtypes[1][0] == ("datasetB" if from_spark else "query_df")
assert res_df.dtypes[2][0] == ("EuclideanDistance")
if from_spark:
assert res_df.dtypes[0][1].startswith(
"struct<id:bigint,features:vector"
)
assert res_df.dtypes[1][1].startswith(
"struct<id:bigint,features:vector"
)
assert res_df.dtypes[2][1] == "double"
else:
assert res_df.dtypes[0][1] == "struct<id:bigint,features:vector>"
assert res_df.dtypes[1][1] == "struct<id:bigint,features:vector>"
assert res_df.dtypes[2][1] == "float"
check_dtypes(res_df=spark_res, from_spark=True)
check_dtypes(res_df=gpu_res, from_spark=False)
items = gpu_res.select(
gpu_res["item_df.id"], gpu_res["item_df.features"]
).collect()
assert len(items) == topk * len(dataB)
for item in items:
id = item.id
features = item.features
assert features == dataA[id][1]
queries = gpu_res.select(
gpu_res["query_df.id"], gpu_res["query_df.features"]
).collect()
for query in queries:
id = query.id
features = query.features
assert features == dataB[id - 4][1]
knn_results = reconstruct_knn_df(
gpu_res, row_identifier_col="id", distCol="EuclideanDistance"
).collect()
assert knn_results[0]["query_id"] == 4
assert knn_results[0]["indices"] == [1, 0] or knn_results[0]["indices"] == [
0,
1,
]
assert knn_results[0]["distances"] == [1.0, 1.0]
assert knn_results[1]["query_id"] == 5
assert knn_results[1]["indices"] == [2, 3] or knn_results[1]["indices"] == [
3,
2,
]
assert knn_results[1]["distances"] == [1.0, 1.0]
assert knn_results[2]["query_id"] == 6
assert knn_results[2]["indices"] == [3, 0] or knn_results[1]["indices"] == [
0,
3,
]
assert knn_results[2]["distances"] == [1.0, 1.0]
assert knn_results[3]["query_id"] == 7
assert knn_results[3]["indices"] == [2, 1] or knn_results[1]["indices"] == [
1,
2,
]
assert knn_results[3]["distances"] == [1.0, 1.0]
def reconstruct_knn_df(
knnjoin_df: DataFrame, row_identifier_col: str, distCol: str = "distCol"
) -> DataFrame:
"""
This function accepts the returned dataframe (denoted as knnjoin_df) of exactNearestNeighborsjoin,
then reconstructs the returned dataframe (i.e. knn_df) of kneighbors.
"""
knn_df: DataFrame = knnjoin_df.select(
knnjoin_df[f"query_df.{row_identifier_col}"].alias(f"query_id"),
knnjoin_df[f"item_df.{row_identifier_col}"].alias("index"),
knnjoin_df[distCol].alias("distance"),
)
def functor(pdf: pd.DataFrame) -> pd.DataFrame:
pdf = pdf.sort_values(by=["distance"])
indices = pdf["index"].tolist()
distances = pdf["distance"].tolist()
query_id = pdf[f"query_id"].tolist()[0]
return pd.DataFrame(
{"query_id": [query_id], "indices": [indices], "distances": [distances]}
)
knn_df = knn_df.groupBy("query_id").applyInPandas(
functor,
schema=f"query_id {knn_df.dtypes[0][1]}, "
+ f"indices array<{knn_df.dtypes[1][1]}>, "
+ f"distances array<{knn_df.dtypes[2][1]}>",
)
knn_df = knn_df.sort("query_id")
return knn_df
| spark-rapids-ml-branch-23.10 | python/tests/test_nearest_neighbors.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple, Type, TypeVar
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from pyspark.ml.feature import PCA as SparkPCA
from pyspark.ml.feature import PCAModel as SparkPCAModel
from pyspark.ml.functions import array_to_vector
from pyspark.ml.linalg import DenseMatrix, Vectors
from pyspark.sql.functions import col
from sklearn.datasets import make_blobs
from spark_rapids_ml.feature import PCA, PCAModel
from .sparksession import CleanSparkSession
from .utils import (
array_equal,
assert_params,
create_pyspark_dataframe,
cuml_supported_data_types,
get_default_cuml_parameters,
idfn,
pyspark_supported_feature_types,
)
PCAType = TypeVar("PCAType", Type[SparkPCA], Type[PCA])
PCAModelType = TypeVar("PCAModelType", Type[SparkPCAModel], Type[PCAModel])
def test_default_cuml_params(caplog: LogCaptureFixture) -> None:
from cuml import PCA as CumlPCA
cuml_params = get_default_cuml_parameters(
[CumlPCA],
[
"copy",
"handle",
"iterated_power",
"output_type",
"random_state",
"tol",
],
)
spark_params = PCA()._get_cuml_params_default()
assert cuml_params == spark_params
# make sure no warning when enabling float64 inputs
pca_float32 = PCA(float32_inputs=False)
assert "float32_inputs to False" not in caplog.text
assert not pca_float32._float32_inputs
def test_fit(gpu_number: int) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
topk = 1
with CleanSparkSession() as spark:
df = (
spark.sparkContext.parallelize(data)
.map(lambda row: (row,))
.toDF(["features"])
)
gpu_pca = (
PCA(num_workers=gpu_number, verbose=6).setInputCol("features").setK(topk)
)
gpu_model = gpu_pca.fit(df)
assert gpu_model.getInputCol() == "features"
assert len(gpu_model.mean_) == 2
assert gpu_model.mean_[0] == pytest.approx(2.0, 0.001)
assert gpu_model.mean_[1] == pytest.approx(2.0, 0.001)
assert len(gpu_model.components_) == 1
assert len(gpu_model.components_[0]) == 2
assert gpu_model.components_[0][0] == pytest.approx(0.707, 0.001)
assert gpu_model.components_[0][1] == pytest.approx(0.707, 0.001)
assert len(gpu_model.explained_variance_ratio_) == 1
assert gpu_model.explained_variance_ratio_[0] == pytest.approx(1.0, 0.001)
def test_fit_rectangle(gpu_number: int) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [[1.0, 1.0], [1.0, 3.0], [5.0, 1.0], [5.0, 3.0]]
topk = 2
with CleanSparkSession() as spark:
df = (
spark.sparkContext.parallelize(data)
.map(lambda row: (row,))
.toDF(["coordinates"])
)
gpu_pca = PCA(num_workers=gpu_number).setInputCol("coordinates").setK(topk)
gpu_model = gpu_pca.fit(df)
assert gpu_model.getInputCol() == "coordinates"
assert len(gpu_model.mean_) == 2
assert gpu_model.mean_[0] == pytest.approx(3.0, 0.001)
assert gpu_model.mean_[1] == pytest.approx(2.0, 0.001)
assert len(gpu_model.components_) == 2
first_pc = gpu_model.components_[0]
assert len(first_pc) == 2
assert first_pc[0] == pytest.approx(1.0, 0.001)
assert first_pc[1] == pytest.approx(0.0, 0.001)
second_pc = gpu_model.components_[1]
assert len(second_pc) == 2
assert second_pc[0] == pytest.approx(0.0, 0.001)
assert second_pc[1] == pytest.approx(1.0, 0.001)
assert len(gpu_model.explained_variance_ratio_) == 2
assert gpu_model.explained_variance_ratio_[0] == pytest.approx(0.8, 0.001)
assert gpu_model.explained_variance_ratio_[1] == pytest.approx(0.2, 0.001)
def test_pca_params(gpu_number: int, tmp_path: str, caplog: LogCaptureFixture) -> None:
# Default constructor
default_spark_params = {
"k": None,
}
default_cuml_params = {
"num_workers": 1,
"n_components": None,
"svd_solver": "auto",
"verbose": False,
"whiten": False,
}
default_pca = PCA()
assert_params(default_pca, default_spark_params, default_cuml_params)
# Spark Params constructor
spark_params = {"k": 2}
spark_pca = PCA(**spark_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params) # type: ignore
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update({"n_components": 2})
assert_params(spark_pca, expected_spark_params, expected_cuml_params)
# cuml_params constructor
cuml_params = {
"n_components": 5,
"num_workers": 5,
"svd_solver": "jacobi",
"verbose": True,
"whiten": True,
}
cuml_pca = PCA(**cuml_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update({"k": 5}) # type: ignore
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(cuml_params) # type: ignore
assert_params(cuml_pca, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/pca_tests"
estimator_path = f"{path}/pca"
cuml_pca.write().overwrite().save(estimator_path)
custom_pca_loaded = PCA.load(estimator_path)
assert_params(custom_pca_loaded, expected_spark_params, expected_cuml_params)
# Conflicting params
conflicting_params = {
"k": 1,
"n_components": 2,
}
with pytest.raises(ValueError, match="set one or the other"):
conflicting_pca = PCA(**conflicting_params)
def test_pca_basic(gpu_number: int, tmp_path: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
# Train a PCA model
data = [[1.0, 1.0, 1.0], [1.0, 3.0, 2.0], [5.0, 1.0, 3.9], [5.0, 3.0, 2.9]]
topk = 2
path = tmp_path + "/pca_tests"
with CleanSparkSession() as spark:
df = (
spark.sparkContext.parallelize(data)
.map(lambda row: (row,))
.toDF(["coordinates"])
)
gpu_pca = PCA(num_workers=gpu_number).setInputCol("coordinates").setK(topk)
pca_model: PCAModel = gpu_pca.fit(df)
assert isinstance(pca_model.cpu(), SparkPCAModel)
model_path = f"{path}/pca_model"
pca_model.write().overwrite().save(model_path)
pca_model_loaded = PCAModel.load(model_path)
assert isinstance(pca_model_loaded.cpu(), SparkPCAModel)
def assert_cuml_model(model: PCAModel, loaded_model: PCAModel) -> None:
"""
Expect the model attributes are same
"""
assert model.mean_ == loaded_model.mean_
assert model.singular_values_ == loaded_model.singular_values_
assert (
model.explained_variance_ratio_
== loaded_model.explained_variance_ratio_
)
assert model.components_ == loaded_model.components_
assert (
model.cuml_params["n_components"]
== loaded_model.cuml_params["n_components"]
)
assert model.dtype == loaded_model.dtype
assert model.n_cols == model.n_cols
assert model.n_cols == 3
assert model.dtype == "float32"
assert_cuml_model(pca_model, pca_model_loaded)
def assert_cuml_spark_model(
model: PCAModel, spark_model: SparkPCAModel
) -> None:
"""
Expect the model attributes are same
"""
assert model.pc == spark_model.pc
assert model.explainedVariance == spark_model.explainedVariance
assert model.getK() == spark_model.getK()
assert model.getInputCol() == spark_model.getInputCol()
assert model.getInputCol() == "coordinates"
assert model.getOutputCol() == spark_model.getOutputCol()
assert_cuml_spark_model(pca_model, pca_model.cpu())
assert_cuml_spark_model(pca_model, pca_model_loaded.cpu())
# cpu model transform without raising any exception
pca_model.cpu().transform(
df.select(array_to_vector(col("coordinates")).alias("coordinates"))
).collect()
@pytest.mark.parametrize("data_type", ["byte", "short", "int", "long"])
def test_pca_numeric_type(gpu_number: int, data_type: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
[1, 4, 4, 4, 0],
[2, 2, 2, 2, 1],
[3, 3, 3, 2, 2],
[3, 3, 3, 2, 3],
[5, 2, 1, 3, 4],
]
with CleanSparkSession() as spark:
feature_cols = ["c1", "c2", "c3", "c4", "c5"]
schema = ", ".join([f"{c} {data_type}" for c in feature_cols])
df = spark.createDataFrame(data, schema=schema)
pca = PCA(num_workers=gpu_number, inputCols=feature_cols)
pca.fit(df)
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_shape", [(1000, 20)], ids=idfn)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.slow
def test_pca(
gpu_number: int,
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
) -> None:
X, _ = make_blobs(n_samples=data_shape[0], n_features=data_shape[1], random_state=0)
from cuml import PCA as cuPCA
n_components = 3
cu_pca = cuPCA(n_components=n_components, output_type="numpy", verbose=7)
cu_model = cu_pca.fit(X)
# Spark does not remove the mean from the transformed data
# adding mean to input data to sanity-check the transformed mean approach in main class
cu_result = cu_model.transform(X + np.array(cu_model.mean_, data_type))
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
train_df, features_col, _ = create_pyspark_dataframe(
spark, feature_type, data_type, X, None
)
output_col = "pca_features"
spark_pca = (
PCA(n_components=3).setInputCol(features_col).setOutputCol(output_col)
)
model = spark_pca.fit(train_df)
assert model.getK() == model.cpu().getK()
assert model.getK() == 3
assert model.getOutputCol() == model.cpu().getOutputCol()
assert model.getOutputCol() == "pca_features"
assert array_equal(cu_pca.components_, model.components_, 1e-3, with_sign=False)
assert array_equal(
cu_pca.explained_variance_ratio_, model.explained_variance_ratio_, 1e-3
)
assert array_equal(cu_pca.mean_, model.mean_, 1e-3)
assert array_equal(cu_pca.singular_values_, model.singular_values_, 1e-3)
transform_df = model.transform(train_df)
spark_result = transform_df.collect()
pred_result = [v.pca_features for v in spark_result]
assert array_equal(cu_result, pred_result, 1e-2, with_sign=False)
@pytest.mark.compat
@pytest.mark.parametrize("pca_types", [(SparkPCA, SparkPCAModel), (PCA, PCAModel)])
def test_pca_spark_compat(
pca_types: Tuple[PCAType, PCAModelType],
tmp_path: str,
) -> None:
# based on https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.PCA.htm
_PCA, _PCAModel = pca_types
with CleanSparkSession() as spark:
data = [
(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
(Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
(Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),),
(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
(Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
(Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),),
]
df = spark.createDataFrame(data, ["features"])
pca = _PCA(k=2, inputCol="features")
pca.setOutputCol("pca_features")
assert pca.getInputCol() == "features"
assert pca.getK() == 2
assert pca.getOutputCol() == "pca_features"
model = pca.fit(df)
model.setOutputCol("output")
assert model.getOutputCol() == "output"
output = model.transform(df).collect()[0].output
expected_output = [1.6485728230883814, -4.0132827005162985]
assert array_equal(output, expected_output, with_sign=False)
variance = model.explainedVariance.toArray()
expected_variance = [0.7943932532230531, 0.20560674677694699]
assert array_equal(variance, expected_variance)
pc = model.pc
expected_pc = DenseMatrix(
5,
2,
[
-0.4486,
0.133,
-0.1252,
0.2165,
-0.8477,
-0.2842,
-0.0562,
0.7636,
-0.5653,
-0.1156,
],
False,
)
assert array_equal(pc.toArray(), expected_pc.toArray(), with_sign=False)
pcaPath = tmp_path + "/pca"
pca.save(pcaPath)
loadedPca = _PCA.load(pcaPath)
assert loadedPca.getK() == pca.getK()
modelPath = tmp_path + "/pca-model"
model.save(modelPath)
loadedModel = _PCAModel.load(modelPath)
assert loadedModel.pc == model.pc
assert loadedModel.explainedVariance == model.explainedVariance
assert loadedModel.transform(df).take(1) == model.transform(df).take(1)
| spark-rapids-ml-branch-23.10 | python/tests/test_pca.py |
from typing import Any, Dict, Tuple, Type, TypeVar
import cuml
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from packaging import version
from pyspark.ml.classification import LogisticRegression as SparkLogisticRegression
from pyspark.ml.classification import (
LogisticRegressionModel as SparkLogisticRegressionModel,
)
from pyspark.ml.functions import array_to_vector
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import Row
from pyspark.sql.functions import array, col
if version.parse(cuml.__version__) < version.parse("23.08.00"):
raise ValueError(
"Logistic Regression requires cuml 23.08.00 or above. Try upgrading cuml or ignoring this file in testing"
)
import warnings
from spark_rapids_ml.classification import LogisticRegression, LogisticRegressionModel
from .sparksession import CleanSparkSession
from .utils import (
array_equal,
assert_params,
create_pyspark_dataframe,
idfn,
make_classification_dataset,
)
def test_toy_example(gpu_number: int, caplog: LogCaptureFixture) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
([1.0, 2.0], 1.0),
([1.0, 3.0], 1.0),
([2.0, 1.0], 0.0),
([3.0, 1.0], 0.0),
]
with CleanSparkSession() as spark:
features_col = "features"
label_col = "label"
probability_col = "probs"
schema = features_col + " array<float>, " + label_col + " float"
df = spark.createDataFrame(data, schema=schema)
lr_estimator = LogisticRegression(regParam=1.0, num_workers=gpu_number)
lr_estimator.setFeaturesCol(features_col)
lr_estimator.setLabelCol(label_col)
lr_estimator.setProbabilityCol(probability_col)
lr_model = lr_estimator.fit(df)
assert lr_model.n_cols == 2
assert lr_model.dtype == "float32"
assert len(lr_model.coef_) == 1
assert lr_model.coef_[0] == pytest.approx([-0.287264, 0.287264], abs=1e-6)
assert lr_model.intercept_ == pytest.approx([0], abs=1e-6)
assert lr_model.coefficients.toArray() == pytest.approx(
[-0.287264, 0.287264], abs=1e-6
)
assert lr_model.intercept == pytest.approx(0, abs=1e-6)
def assert_transform(model: LogisticRegressionModel) -> None:
preds_df_local = model.transform(df).collect()
preds = [row["prediction"] for row in preds_df_local]
assert preds == [1.0, 1.0, 0.0, 0.0]
probs = [row["probs"] for row in preds_df_local]
assert len(probs) == len(preds)
assert [p[1] > 0.5 for p in probs] == [True, True, False, False]
assert_transform(lr_model)
# test with regParam set to 0
caplog.clear()
lr_regParam_zero = LogisticRegression(
regParam=0.0,
)
assert "no regularization is not supported yet" in caplog.text
lr_regParam_zero.setProbabilityCol(probability_col)
assert lr_regParam_zero.getRegParam() == 0
assert (
lr_regParam_zero.cuml_params["C"] == 1.0 / np.finfo("float32").tiny.item()
)
model_regParam_zero = lr_regParam_zero.fit(df)
assert_transform(model_regParam_zero)
lr_regParam_zero.setRegParam(0.1)
assert lr_regParam_zero.getRegParam() == 0.1
assert lr_regParam_zero.cuml_params["C"] == 1.0 / 0.1
caplog.clear()
lr_regParam_zero.setRegParam(0.0)
assert "no regularization is not supported yet" in caplog.text
assert lr_regParam_zero.getRegParam() == 0.0
assert (
lr_regParam_zero.cuml_params["C"] == 1.0 / np.finfo("float32").tiny.item()
)
def test_params(tmp_path: str, caplog: LogCaptureFixture) -> None:
# Default params
default_spark_params = {
"maxIter": 100,
"regParam": 0.0, # will be mapped to numpy.finfo('float32').tiny
"tol": 1e-06,
"fitIntercept": True,
}
default_cuml_params = {
"max_iter": 100,
"C": 1.0
/ np.finfo(
"float32"
).tiny, # TODO: support default value 0.0, i.e. no regularization
"tol": 1e-6,
"fit_intercept": True,
}
default_lr = LogisticRegression()
assert_params(default_lr, default_spark_params, default_cuml_params)
# Spark ML Params
spark_params: Dict[str, Any] = {
"maxIter": 30,
"regParam": 0.5,
"tol": 1e-2,
"fitIntercept": False,
}
spark_lr = LogisticRegression(**spark_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(
{
"max_iter": 30,
"C": 2.0, # C should be equal to 1 / regParam
"tol": 1e-2,
"fit_intercept": False,
}
)
assert_params(spark_lr, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/logistic_regression_tests"
estimator_path = f"{path}/logistic_regression"
spark_lr.write().overwrite().save(estimator_path)
loaded_lr = LogisticRegression.load(estimator_path)
assert_params(loaded_lr, expected_spark_params, expected_cuml_params)
# float32_inputs warn, logistic only accepts float32
lr_float32 = LogisticRegression(float32_inputs=False)
assert "float32_inputs to False" in caplog.text
assert lr_float32._float32_inputs
# TODO support float64
# 'vector' will be converted to float32
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("feature_type", ["array", "multi_cols", "vector"])
@pytest.mark.parametrize("data_shape", [(2000, 8)], ids=idfn)
@pytest.mark.parametrize("data_type", [np.float32, np.float64])
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.parametrize("n_classes", [2])
@pytest.mark.slow
def test_classifier(
fit_intercept: bool,
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
n_classes: int,
gpu_number: int,
) -> None:
tolerance = 0.001
reg_param = np.finfo("float32").tiny.item()
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
n_classes=n_classes,
n_informative=8,
n_redundant=0,
n_repeated=0,
)
from cuml import LogisticRegression as cuLR
cu_lr = cuLR(fit_intercept=fit_intercept, C=1 / reg_param)
cu_lr.fit(X_train, y_train)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
spark_lr = LogisticRegression(
fitIntercept=fit_intercept,
regParam=reg_param,
num_workers=gpu_number,
)
spark_lr.setFeaturesCol(features_col)
spark_lr.setLabelCol(label_col)
spark_lr_model: LogisticRegressionModel = spark_lr.fit(train_df)
# test coefficients and intercepts
assert spark_lr_model.n_cols == cu_lr.n_cols
assert spark_lr_model.dtype == "float32"
assert array_equal(np.array(spark_lr_model.coef_), cu_lr.coef_, tolerance)
assert array_equal(spark_lr_model.intercept_, cu_lr.intercept_, tolerance)
assert len(spark_lr_model.coef_) == 1
assert len(cu_lr.coef_) == 1
assert array_equal(spark_lr_model.coef_[0], cu_lr.coef_[0], tolerance)
assert array_equal(spark_lr_model.intercept_, cu_lr.intercept_, tolerance)
assert array_equal(
spark_lr_model.coefficients.toArray(), cu_lr.coef_[0], tolerance
)
assert spark_lr_model.intercept == pytest.approx(cu_lr.intercept_[0], tolerance)
# test transform
test_df, _, _ = create_pyspark_dataframe(spark, feature_type, data_type, X_test)
result = spark_lr_model.transform(test_df).collect()
spark_preds = [row["prediction"] for row in result]
cu_preds = cu_lr.predict(X_test)
assert array_equal(cu_preds, spark_preds, 1e-3)
LogisticRegressionType = TypeVar(
"LogisticRegressionType", Type[LogisticRegression], Type[SparkLogisticRegression]
)
LogisticRegressionModelType = TypeVar(
"LogisticRegressionModelType",
Type[LogisticRegressionModel],
Type[SparkLogisticRegressionModel],
)
@pytest.mark.compat
@pytest.mark.parametrize(
"lr_types",
[
(SparkLogisticRegression, SparkLogisticRegressionModel),
(LogisticRegression, LogisticRegressionModel),
],
)
def test_compat(
lr_types: Tuple[LogisticRegressionType, LogisticRegressionModelType],
tmp_path: str,
) -> None:
_LogisticRegression, _LogisticRegressionModel = lr_types
X = np.array(
[
[1.0, 2.0],
[1.0, 3.0],
[2.0, 1.0],
[3.0, 1.0],
]
)
y = np.array(
[
1.0,
1.0,
0.0,
0.0,
]
)
num_rows = len(X)
weight = np.ones([num_rows])
feature_cols = ["c0", "c1"]
schema = ["c0 float, c1 float, weight float, label float"]
with CleanSparkSession() as spark:
np_array = np.concatenate(
(X, weight.reshape(num_rows, 1), y.reshape(num_rows, 1)), axis=1
)
bdf = spark.createDataFrame(
np_array.tolist(),
",".join(schema),
)
bdf = bdf.withColumn("features", array_to_vector(array(*feature_cols))).drop(
*feature_cols
)
assert _LogisticRegression().getRegParam() == 0.0
if lr_types[0] is SparkLogisticRegression:
blor = _LogisticRegression(regParam=0.1, standardization=False)
else:
warnings.warn("spark rapids ml does not accept standardization")
blor = _LogisticRegression(regParam=0.1)
assert blor.getRegParam() == 0.1
blor.setFeaturesCol("features")
blor.setMaxIter(10)
blor.setRegParam(0.01)
blor.setLabelCol("label")
if isinstance(blor, SparkLogisticRegression):
blor.setWeightCol("weight")
assert blor.getFeaturesCol() == "features"
assert blor.getMaxIter() == 10
assert blor.getRegParam() == 0.01
assert blor.getLabelCol() == "label"
blor.clear(blor.maxIter)
assert blor.getMaxIter() == 100
blor_model = blor.fit(bdf)
blor_model.setFeaturesCol("features")
blor_model.setProbabilityCol("newProbability")
blor_model.setRawPredictionCol("newRawPrediction")
assert blor_model.getRawPredictionCol() == "newRawPrediction"
assert blor_model.getProbabilityCol() == "newProbability"
coefficients = blor_model.coefficients.toArray()
intercept = blor_model.intercept
assert array_equal(coefficients, [-2.42377087, 2.42377087])
assert intercept == pytest.approx(0, abs=1e-6)
example = bdf.head()
if example:
blor_model.predict(example.features)
blor_model.predictRaw(example.features)
blor_model.predictProbability(example.features)
if isinstance(blor_model, SparkLogisticRegressionModel):
assert blor_model.hasSummary
blor_model.evaluate(bdf).accuracy == blor_model.summary.accuracy
else:
assert not blor_model.hasSummary
with pytest.raises(RuntimeError, match="No training summary available"):
blor_model.summary
output_df = blor_model.transform(bdf)
assert isinstance(output_df.schema["features"].dataType, VectorUDT)
output = output_df.head()
assert output.prediction == 1.0
assert array_equal(
output.newProbability.toArray(),
Vectors.dense([0.0814, 0.9186]).toArray(),
)
if isinstance(blor_model, SparkLogisticRegressionModel):
assert array_equal(
output.newRawPrediction.toArray(),
Vectors.dense([-2.4238, 2.4238]).toArray(),
)
else:
warnings.warn(
"transform of spark rapids ml currently does not support rawPredictionCol"
)
blor_path = tmp_path + "/log_reg"
blor.save(blor_path)
blor2 = _LogisticRegression.load(blor_path)
assert blor2.getRegParam() == 0.01
model_path = tmp_path + "/log_reg_model"
blor_model.save(model_path)
model2 = _LogisticRegressionModel.load(model_path)
assert array_equal(
blor_model.coefficients.toArray(), model2.coefficients.toArray()
)
assert blor_model.intercept == model2.intercept
assert blor_model.transform(bdf).take(1) == model2.transform(bdf).take(1)
assert blor_model.numFeatures == 2
| spark-rapids-ml-branch-23.10 | python/tests/test_logistic_regression.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Tuple, Type, TypeVar
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from pyspark.ml.clustering import KMeans as SparkKMeans
from pyspark.ml.clustering import KMeansModel as SparkKMeansModel
from pyspark.ml.functions import array_to_vector
from pyspark.ml.linalg import Vectors
from pyspark.sql.functions import col
from spark_rapids_ml.clustering import KMeans, KMeansModel
from .sparksession import CleanSparkSession
from .utils import (
assert_params,
create_pyspark_dataframe,
cuml_supported_data_types,
feature_types,
get_default_cuml_parameters,
idfn,
pyspark_supported_feature_types,
)
KMeansType = TypeVar("KMeansType", Type[KMeans], Type[SparkKMeans])
KMeansModelType = TypeVar("KMeansModelType", Type[KMeansModel], Type[SparkKMeansModel])
def assert_centers_equal(
a_clusters: List[List[float]], b_clusters: List[List[float]], tolerance: float
) -> None:
assert len(a_clusters) == len(b_clusters)
a_clusters = sorted(a_clusters, key=lambda l: l)
b_clusters = sorted(b_clusters, key=lambda l: l)
for i in range(len(a_clusters)):
a_center = a_clusters[i]
b_center = b_clusters[i]
assert len(a_center) == len(b_center)
assert a_center == pytest.approx(b_center, tolerance)
def test_default_cuml_params() -> None:
from cuml import KMeans as CumlKMeans
cuml_params = get_default_cuml_parameters([CumlKMeans], ["handle", "output_type"])
spark_params = KMeans()._get_cuml_params_default()
assert cuml_params == spark_params
def test_kmeans_params(
gpu_number: int, tmp_path: str, caplog: LogCaptureFixture
) -> None:
# Default constructor
default_spark_params = {
"initMode": "k-means||",
"k": 2,
"maxIter": 20,
}
default_cuml_params = {
"n_clusters": 2,
"max_iter": 20,
"tol": 0.0001,
"verbose": False,
"init": "k-means||",
"oversampling_factor": 2.0,
"max_samples_per_batch": 32768,
"num_workers": 1,
}
default_kmeans = KMeans()
assert_params(default_kmeans, default_spark_params, default_cuml_params)
# Spark Params constructor
spark_params = {"k": 10, "maxIter": 100}
spark_kmeans = KMeans(**spark_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update({"n_clusters": 10, "max_iter": 100})
assert_params(spark_kmeans, expected_spark_params, expected_cuml_params)
# cuml_params constructor
cuml_params = {
"n_clusters": 10,
"max_iter": 100,
"tol": 1e-1,
"verbose": True,
"random_state": 5,
"init": "k-means||",
"oversampling_factor": 3,
"max_samples_per_batch": 45678,
}
cuml_kmeans = KMeans(**cuml_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update({"k": 10, "maxIter": 100})
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(cuml_params)
assert_params(cuml_kmeans, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/kmeans_tests"
estimator_path = f"{path}/kmeans"
cuml_kmeans.write().overwrite().save(estimator_path)
loaded_kmeans = KMeans.load(estimator_path)
assert_params(loaded_kmeans, expected_spark_params, expected_cuml_params)
# conflicting params
conflicting_params = {
"k": 2,
"n_clusters": 10,
}
with pytest.raises(ValueError, match="set one or the other"):
conflicting_kmeans = KMeans(**conflicting_params)
# make sure no warning when enabling float64 inputs
kmeans_float32 = KMeans(float32_inputs=False)
assert "float32_inputs to False" not in caplog.text
assert not kmeans_float32._float32_inputs
def test_kmeans_basic(gpu_number: int, tmp_path: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [[1.0, 1.0], [1.0, 2.0], [3.0, 2.0], [4.0, 3.0]]
with CleanSparkSession() as spark:
df = (
spark.sparkContext.parallelize(data, gpu_number)
.map(lambda row: (row,))
.toDF(["features"])
)
kmeans = KMeans(num_workers=gpu_number, n_clusters=2).setFeaturesCol("features")
def assert_kmeans_model(model: KMeansModel) -> None:
assert len(model.cluster_centers_) == 2
sorted_centers = sorted(model.cluster_centers_, key=lambda p: p)
assert sorted_centers[0] == pytest.approx([1.0, 1.5], 0.001)
assert sorted_centers[1] == pytest.approx([3.5, 2.5], 0.001)
assert model.dtype == "float32"
assert model.n_cols == 2
def assert_cuml_spark_model(
model: KMeansModel, spark_model: SparkKMeansModel
) -> None:
lhs = model.clusterCenters()
rhs = spark_model.clusterCenters()
assert len(lhs) == len(rhs)
for i in range(len(lhs)):
comp = lhs[i] == rhs[i]
assert comp.all()
kmeans_model = kmeans.fit(df)
assert_kmeans_model(model=kmeans_model)
assert isinstance(kmeans_model.cpu(), SparkKMeansModel)
assert_cuml_spark_model(kmeans_model, kmeans_model.cpu())
# Model persistence
path = tmp_path + "/kmeans_tests"
model_path = f"{path}/kmeans_model"
kmeans_model.write().overwrite().save(model_path)
kmeans_model_loaded = KMeansModel.load(model_path)
assert_kmeans_model(model=kmeans_model_loaded)
assert isinstance(kmeans_model_loaded.cpu(), SparkKMeansModel)
assert_cuml_spark_model(kmeans_model_loaded, kmeans_model_loaded.cpu())
# test transform function
label_df = kmeans_model.transform(df)
assert "features" in label_df.columns
o_col = kmeans_model.getPredictionCol()
labels = [row[o_col] for row in label_df.collect()]
assert len(labels) == 4
assert labels[0] == labels[1]
assert labels[1] != labels[2]
assert labels[2] == labels[3]
# without raising exception for cuml model predict
kmeans_model.predict(Vectors.dense(1.0, 1.0))
# without raising exception for cpu transform
kmeans_model.cpu().transform(
df.select(array_to_vector(col("features")).alias("features"))
).collect()
@pytest.mark.parametrize("data_type", ["byte", "short", "int", "long"])
def test_kmeans_numeric_type(gpu_number: int, data_type: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
[1, 4, 4, 4, 0],
[2, 2, 2, 2, 1],
[3, 3, 3, 2, 2],
[3, 3, 3, 2, 3],
[5, 2, 1, 3, 4],
]
with CleanSparkSession() as spark:
feature_cols = ["c1", "c2", "c3", "c4", "c5"]
schema = ", ".join([f"{c} {data_type}" for c in feature_cols])
df = spark.createDataFrame(data, schema=schema)
kmeans = KMeans(num_workers=gpu_number, featuresCols=feature_cols, n_clusters=2)
kmeans.fit(df)
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_shape", [(1000, 20)], ids=idfn)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.slow
def test_kmeans(
gpu_number: int,
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
) -> None:
"""
The dataset of this test case comes from cuml:
https://github.com/rapidsai/cuml/blob/496f1f155676fb4b7d99aeb117cbb456ce628a4b/python/cuml/tests/test_kmeans.py#L39
"""
from cuml.datasets import make_blobs
n_rows = data_shape[0]
n_cols = data_shape[1]
n_clusters = 8
cluster_std = 1.0
tolerance = 0.001
X, _ = make_blobs(
n_rows, n_cols, n_clusters, cluster_std=cluster_std, random_state=0
) # make_blobs creates a random dataset of isotropic gaussian blobs.
from cuml import KMeans as cuKMeans
cuml_kmeans = cuKMeans(
n_clusters=n_clusters, output_type="numpy", tol=1.0e-20, verbose=7
)
import cudf
gdf = cudf.DataFrame(X)
cuml_kmeans.fit(gdf)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
df, features_col, _ = create_pyspark_dataframe(
spark, feature_type, data_type, X, None
)
kmeans = KMeans(
num_workers=gpu_number, n_clusters=n_clusters, verbose=7
).setFeaturesCol(features_col)
kmeans_model = kmeans.fit(df)
cuml_cluster_centers = cuml_kmeans.cluster_centers_.tolist()
assert_centers_equal(
kmeans_model.cluster_centers_,
cuml_cluster_centers,
tolerance,
)
# test transform function
sid_ordered = sorted(
range(n_clusters), key=lambda idx: kmeans_model.cluster_centers_[idx]
)
cid_ordered = sorted(
range(n_clusters), key=lambda idx: cuml_cluster_centers[idx]
)
s2c = dict(
zip(sid_ordered, cid_ordered)
) # map spark-rapids-ml center id to cuml center id
labelDf = kmeans_model.transform(df)
o_col = kmeans_model.getPredictionCol()
slabels = [row[o_col] for row in labelDf.collect()]
clabels = cuml_kmeans.predict(gdf).tolist()
assert len(slabels) == len(clabels)
to_clabels = [s2c[v] for v in slabels]
assert to_clabels == clabels
@pytest.mark.compat
@pytest.mark.parametrize(
"kmeans_types", [(SparkKMeans, SparkKMeansModel), (KMeans, KMeansModel)]
)
def test_kmeans_spark_compat(
gpu_number: int,
kmeans_types: Tuple[KMeansType, KMeansModelType],
tmp_path: str,
) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
# based on https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.PCA.htm
_KMeans, _KMeansModel = kmeans_types
with CleanSparkSession() as spark:
from pyspark.ml.linalg import Vectors
data = [
(Vectors.dense([0.0, 0.0]), 2.0),
(Vectors.dense([1.0, 1.0]), 2.0),
(Vectors.dense([9.0, 8.0]), 2.0),
(Vectors.dense([8.0, 9.0]), 2.0),
]
df = spark.createDataFrame(data, ["features", "weighCol"])
kmeans = _KMeans(k=2)
kmeans.setSeed(1)
kmeans.setMaxIter(10)
if isinstance(kmeans, SparkKMeans):
kmeans.setWeightCol("weighCol")
else:
with pytest.raises(ValueError):
kmeans.setWeightCol("weighCol")
assert kmeans.getMaxIter() == 10
assert kmeans.getK() == 2
assert kmeans.getSeed() == 1
kmeans.clear(kmeans.maxIter)
assert kmeans.getMaxIter() == 20
model = kmeans.fit(df)
assert model.getDistanceMeasure() == "euclidean"
model.setPredictionCol("newPrediction")
assert model.getPredictionCol() == "newPrediction"
example = df.head()
if example:
model.predict(example.features)
centers = model.clusterCenters()
# [array([0.5, 0.5]), array([8.5, 8.5])]
assert len(centers) == 2
sorted_centers = sorted([x.tolist() for x in centers])
expected_centers = [[0.5, 0.5], [8.5, 8.5]]
assert sorted_centers == expected_centers
transformed = model.transform(df).select("features", "newPrediction")
rows = transformed.collect()
# [Row(features=DenseVector([0.0, 0.0]), newPrediction=0),
# Row(features=DenseVector([1.0, 1.0]), newPrediction=0),
# Row(features=DenseVector([9.0, 8.0]), newPrediction=1),
# Row(features=DenseVector([8.0, 9.0]), newPrediction=1)]
assert rows[0].newPrediction == rows[1].newPrediction
assert rows[2].newPrediction == rows[3].newPrediction
if isinstance(model, SparkKMeansModel):
assert model.hasSummary == True
summary = model.summary
assert summary.k == 2
assert summary.clusterSizes == [2, 2]
assert summary.trainingCost == 4.0
else:
assert model.hasSummary == False
kmeans_path = tmp_path + "/kmeans"
kmeans.save(kmeans_path)
kmeans2 = _KMeans.load(kmeans_path)
assert kmeans2.getK() == 2
model_path = tmp_path + "/kmeans_model"
model.save(model_path)
model2 = _KMeansModel.load(model_path)
assert model2.hasSummary == False
assert all(model.clusterCenters()[0] == model2.clusterCenters()[0])
# array([ True, True], dtype=bool)
assert all(model.clusterCenters()[1] == model2.clusterCenters()[1])
# array([ True, True], dtype=bool)
assert model.transform(df).take(1) == model2.transform(df).take(1)
# True
| spark-rapids-ml-branch-23.10 | python/tests/test_kmeans.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import math
import pickle
from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast
import numpy as np
import pandas as pd
from pyspark.ml.classification import DecisionTreeClassificationModel
from pyspark.ml.classification import (
RandomForestClassificationModel as SparkRandomForestClassificationModel,
)
from pyspark.ml.linalg import Vector
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol
from pyspark.ml.regression import DecisionTreeRegressionModel
from pyspark.ml.regression import (
RandomForestRegressionModel as SparkRandomForestRegressionModel,
)
from pyspark.sql import DataFrame
from pyspark.sql.types import (
ArrayType,
IntegerType,
StringType,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
TransformInputType,
_ConstructFunc,
_CumlEstimatorSupervised,
_CumlModelWithPredictionCol,
_EvaluateFunc,
_TransformFunc,
param_alias,
transform_evaluate,
)
from .params import HasFeaturesCols, P, _CumlClass, _CumlParams
from .utils import (
_concat_and_free,
_get_spark_session,
_str_or_numerical,
java_uid,
translate_trees,
)
class _RandomForestClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {
"maxBins": "n_bins",
"maxDepth": "max_depth",
"numTrees": "n_estimators",
"impurity": "split_criterion",
"featureSubsetStrategy": "max_features",
"bootstrap": "bootstrap",
"seed": "random_state",
"minInstancesPerNode": "min_samples_leaf",
"minInfoGain": "",
"maxMemoryInMB": "",
"cacheNodeIds": "",
"checkpointInterval": "",
"subsamplingRate": "",
"minWeightFractionPerNode": "",
"weightCol": None,
"leafCol": None,
}
@classmethod
def _param_value_mapping(
cls,
) -> Dict[str, Callable[[Any], Union[None, str, float, int]]]:
def _tree_mapping(feature_subset: str) -> Union[None, str, float, int]:
_maybe_numerical = _str_or_numerical(feature_subset)
if isinstance(_maybe_numerical, int) or isinstance(_maybe_numerical, float):
_numerical = _maybe_numerical
return _numerical
else:
_str = _maybe_numerical
_tree_string_mapping: Dict[str, Union[None, str, float, int]] = {
"onethird": 1 / 3.0,
"all": 1.0,
"auto": "auto",
"sqrt": "sqrt",
"log2": "log2",
}
return _tree_string_mapping.get(_str, None)
return {
"max_features": _tree_mapping,
}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"n_streams": 4,
"n_estimators": 100,
"max_depth": 16,
"max_features": "auto",
"n_bins": 128,
"bootstrap": True,
"verbose": False,
"min_samples_leaf": 1,
"min_samples_split": 2,
"max_samples": 1.0,
"max_leaves": -1,
"min_impurity_decrease": 0.0,
"random_state": None,
"max_batch_size": 4096,
}
class _RandomForestCumlParams(
_CumlParams,
HasFeaturesCol,
HasFeaturesCols,
HasLabelCol,
):
def __init__(self) -> None:
super().__init__()
# restrict default seed to max value of 32-bit signed integer for CuML
self._setDefault(seed=hash(type(self).__name__) & 0x07FFFFFFF)
def getFeaturesCol(self) -> Union[str, List[str]]: # type:ignore
"""
Gets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`
"""
if self.isDefined(self.featuresCols):
return self.getFeaturesCols()
elif self.isDefined(self.featuresCol):
return self.getOrDefault("featuresCol")
else:
raise RuntimeError("featuresCol is not set")
def setFeaturesCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`featuresCol` or :py:attr:`featureCols`.
"""
if isinstance(value, str):
self.set_params(featuresCol=value)
else:
self.set_params(featuresCols=value)
return self
def setFeaturesCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`featuresCols`.
"""
return self.set_params(featuresCols=value)
def setLabelCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`labelCol`.
"""
self._set(labelCol=value) # type: ignore
return self
def setPredictionCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`predictionCol`.
"""
self._set(predictionCol=value) # type: ignore
return self
class _RandomForestEstimatorParams(_RandomForestCumlParams):
def __init__(self) -> None:
super().__init__()
# include setters used only in estimator classes (classifier and regressor) here
def setBootstrap(self: P, value: bool) -> P:
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self.set_params(bootstrap=value)
def setFeatureSubsetStrategy(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self.set_params(featureSubsetStrategy=value)
def setImpurity(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`impurity`.
"""
return self.set_params(impurity=value) # type: ignore
def setMaxBins(self: P, value: int) -> P:
"""
Sets the value of :py:attr:`maxBins`.
"""
return self.set_params(maxBins=value)
def setMaxDepth(self: P, value: int) -> P:
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self.set_params(maxDepth=value)
def setMinInstancesPerNode(self: P, value: int) -> P:
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self.set_params(minInstancesPerNode=value)
def setNumTrees(self: P, value: int) -> P:
"""
Sets the value of :py:attr:`numTrees`.
"""
return self.set_params(numTrees=value)
def setSeed(self: P, value: int) -> P:
"""
Sets the value of :py:attr:`seed`.
"""
if value > 0x07FFFFFFF:
raise ValueError("cuML seed value must be a 32-bit integer.")
return self.set_params(seed=value)
class _RandomForestEstimator(
_CumlEstimatorSupervised,
_RandomForestEstimatorParams,
):
def __init__(self, **kwargs: Any):
super().__init__()
self.set_params(**kwargs)
if "n_streams" not in kwargs:
# cuML will throw exception when running on a node with multi-gpus when n_streams > 0
self._set_cuml_value("n_streams", 1)
@abstractmethod
def _is_classification(self) -> bool:
"""Indicate if it is regression or classification estimator"""
raise NotImplementedError()
def _estimators_per_worker(self, n_estimators: int) -> List[int]:
"""Calculate the number of trees each task should train according to n_estimators"""
n_workers = self.num_workers
if n_estimators < n_workers:
raise ValueError("n_estimators cannot be lower than number of spark tasks.")
n_est_per_worker = math.floor(n_estimators / n_workers)
n_estimators_per_worker = [n_est_per_worker for i in range(n_workers)]
remaining_est = n_estimators - (n_est_per_worker * n_workers)
for i in range(remaining_est):
n_estimators_per_worker[i] = n_estimators_per_worker[i] + 1
return n_estimators_per_worker
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
# Each element of n_estimators_of_all_params is a list value which
# is composed of n_estimators per worker.
n_estimators_of_all_params: List[List[int]] = []
total_trees: List[int] = []
all_params = [{}] if extra_params is None else extra_params
for params in all_params:
num_trees = (
self.cuml_params["n_estimators"]
if "n_estimators" not in params
else params["n_estimators"]
)
n_estimators_of_all_params.append(self._estimators_per_worker(num_trees))
total_trees.append(num_trees)
is_classification = self._is_classification()
def _rf_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
X_list = [item[0] for item in dfs]
y_list = [item[1] for item in dfs]
if isinstance(X_list[0], pd.DataFrame):
X = pd.concat(X_list)
y = pd.concat(y_list)
else:
# features are either cp or np arrays here
X = _concat_and_free(X_list)
y = _concat_and_free(y_list)
if is_classification:
from cuml import RandomForestClassifier as cuRf
else:
from cuml import RandomForestRegressor as cuRf
from pyspark import BarrierTaskContext
context = BarrierTaskContext.get()
part_id = context.partitionId()
def _single_fit(rf: cuRf) -> Dict[str, Any]:
# Fit a random forest model on the dataset (X, y)
rf.fit(X, y, convert_dtype=False)
# serialized_model is Dictionary type
serialized_model = rf._get_serialized_model()
pickled_model = pickle.dumps(serialized_model)
msg = base64.b64encode(pickled_model).decode("utf-8")
trees = rf.get_json()
data = {"model_bytes": msg, "model_json": trees}
messages = context.allGather(json.dumps(data))
# concatenate the random forest in the worker0
if part_id == 0:
mod_bytes = []
mod_jsons = []
for msg in messages:
data = json.loads(msg)
mod_bytes.append(
pickle.loads(base64.b64decode(data["model_bytes"]))
)
mod_jsons.append(data["model_json"])
all_tl_mod_handles = [
rf._tl_handle_from_bytes(i) for i in mod_bytes
]
rf._concatenate_treelite_handle(all_tl_mod_handles)
from cuml.fil.fil import TreeliteModel
for tl_handle in all_tl_mod_handles:
TreeliteModel.free_treelite_model(tl_handle)
final_model_bytes = pickle.dumps(rf._get_serialized_model())
final_model = base64.b64encode(final_model_bytes).decode("utf-8")
result = {
"treelite_model": final_model,
"dtype": rf.dtype.name,
"n_cols": rf.n_cols,
"model_json": mod_jsons,
}
if is_classification:
result["num_classes"] = rf.num_classes
return result
else:
return {}
rf_params = params[param_alias.cuml_init]
fit_multiple_params = params[param_alias.fit_multiple_params]
if len(fit_multiple_params) == 0:
fit_multiple_params.append({})
models = []
for i in range(len(fit_multiple_params)):
tmp_rf_params = rf_params.copy()
tmp_rf_params.update(fit_multiple_params[i])
tmp_rf_params.pop("n_estimators")
if tmp_rf_params["max_features"] == "auto":
if total_trees[i] == 1:
tmp_rf_params["max_features"] = 1.0
else:
tmp_rf_params["max_features"] = (
"sqrt" if is_classification else (1 / 3.0)
)
rf = cuRf(
n_estimators=n_estimators_of_all_params[i][part_id],
output_type="cudf",
**tmp_rf_params,
)
models.append(_single_fit(rf))
del rf
models_dict = {}
if part_id == 0:
for k in models[0].keys():
models_dict[k] = [m[k] for m in models]
return models_dict
return _rf_fit
def _out_schema(self) -> Union[StructType, str]:
fields = [
StructField("treelite_model", StringType(), False),
StructField("n_cols", IntegerType(), False),
StructField("dtype", StringType(), False),
StructField("model_json", ArrayType(StringType()), False),
]
if self._is_classification():
fields.append(StructField("num_classes", IntegerType(), False))
return StructType(fields)
def _require_nccl_ucx(self) -> Tuple[bool, bool]:
return False, False
def _enable_fit_multiple_in_single_pass(self) -> bool:
return True
class _RandomForestModel(
_CumlModelWithPredictionCol,
_RandomForestCumlParams,
):
def __init__(
self,
n_cols: int,
dtype: str,
treelite_model: Union[str, List[str]],
model_json: Union[List[str], List[List[str]]] = [], # type: ignore
num_classes: int = -1, # only for classification
):
if self._is_classification():
super().__init__(
dtype=dtype,
n_cols=n_cols,
treelite_model=treelite_model,
num_classes=num_classes,
model_json=model_json,
)
else:
super().__init__(
dtype=dtype,
n_cols=n_cols,
treelite_model=treelite_model,
model_json=model_json,
)
self._num_classes = num_classes
self._model_json = model_json
self._treelite_model = treelite_model
def cpu(
self,
) -> Union[SparkRandomForestRegressionModel, SparkRandomForestClassificationModel]:
raise NotImplementedError()
@property
def featureImportances(self) -> Vector:
"""Estimate the importance of each feature."""
return self.cpu().featureImportances
@property
def getNumTrees(self) -> int:
"""Number of trees in ensemble."""
return self.getOrDefault("numTrees")
@property
def toDebugString(self) -> str:
"""Full description of model."""
return self.cpu().toDebugString
@property
def totalNumNodes(self) -> int:
"""Total number of nodes, summed over all trees in the ensemble."""
return self.cpu().totalNumNodes
@property
def trees(
self,
) -> Union[
List[DecisionTreeRegressionModel], List[DecisionTreeClassificationModel]
]:
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return self.cpu().trees
@property
def treeWeights(self) -> List[float]:
"""Return the weights for each tree."""
return self.cpu().treeWeights
def predict(self, value: Vector) -> float:
"""
Predict label for the given features.
"""
return self.cpu().predict(value)
def predictLeaf(self, value: Vector) -> float:
"""
Predict the indices of the leaves corresponding to the feature vector.
"""
return self.cpu().predictLeaf(value)
@abstractmethod
def _is_classification(self) -> bool:
"""Indicate if it is regression or classification model"""
raise NotImplementedError()
def _convert_to_java_trees(self, impurity: str) -> Tuple[Any, List[Any]]:
"""Convert cuml trees to Java decision tree model"""
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
assert sc._gateway is not None
# This function shouldn't be called for the multiple models scenario.
model_json = cast(List[str], self._model_json)
# Convert cuml trees to Spark trees
trees = [
translate_trees(sc, impurity, trees)
for trees_json in model_json
for trees in json.loads(trees_json)
]
if self._is_classification():
uid = java_uid(sc, "rfc")
java_decision_tree_model_class = (
sc._jvm.org.apache.spark.ml.classification.DecisionTreeClassificationModel
)
# Wrap the trees into Spark DecisionTreeClassificationModel
decision_trees = [
java_decision_tree_model_class(
uid, tree, self.numFeatures, self._num_classes
)
for tree in trees
]
else:
uid = java_uid(sc, "rfr")
java_decision_tree_model_class = (
sc._jvm.org.apache.spark.ml.regression.DecisionTreeRegressionModel
)
# Wrap the trees into Spark DecisionTreeClassificationModel
decision_trees = [
java_decision_tree_model_class(uid, tree, self.numFeatures)
for tree in trees
]
java_trees = sc._gateway.new_array(
java_decision_tree_model_class, len(decision_trees)
)
for i in range(len(decision_trees)):
java_trees[i] = decision_trees[i]
return uid, java_trees
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
treelite_model = self._treelite_model
is_classification = self._is_classification()
def _construct_rf() -> CumlT:
if is_classification:
from cuml import RandomForestClassifier as cuRf
else:
from cuml import RandomForestRegressor as cuRf
rfs = []
treelite_models = (
treelite_model if isinstance(treelite_model, list) else [treelite_model]
)
for m in treelite_models:
model = pickle.loads(base64.b64decode(m))
rf = cuRf()
rf._concatenate_treelite_handle([rf._tl_handle_from_bytes(model)])
rfs.append(rf)
return rfs
def _predict(rf: CumlT, pdf: TransformInputType) -> pd.Series:
rf.update_labels = False
ret = rf.predict(pdf)
return pd.Series(ret)
# TBD: figure out why RF algo's warns regardless of what np array order is set
return _construct_rf, _predict, None
def _transform(self, dataset: DataFrame) -> DataFrame:
df = super()._transform(dataset)
return df.withColumn(
self.getPredictionCol(), df[self.getPredictionCol()].cast("double")
)
@classmethod
def _combine(
cls: Type["_RandomForestModel"], models: List["_RandomForestModel"] # type: ignore
) -> "_RandomForestModel":
assert len(models) > 0 and all(isinstance(model, cls) for model in models)
first_model = models[0]
treelite_models = [model._treelite_model for model in models]
model_jsons = [model._model_json for model in models]
attrs = first_model.get_model_attributes()
assert attrs is not None
attrs["treelite_model"] = treelite_models
attrs["model_json"] = model_jsons
rf_model = cls(**attrs)
first_model._copyValues(rf_model)
first_model._copy_cuml_params(rf_model)
return rf_model
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/tree.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.sql import SparkSession
from .utils import _get_spark_session, _is_local, get_logger
if TYPE_CHECKING:
from pyspark.ml._typing import ParamMap
P = TypeVar("P", bound="_CumlParams")
class HasFeaturesCols(Params):
"""
Mixin for param featuresCols: features column names for multi-column input.
"""
featuresCols = Param(
Params._dummy(), # type: ignore
"featuresCols",
"features column names for multi-column input.",
TypeConverters.toListString,
)
def __init__(self) -> None:
super(HasFeaturesCols, self).__init__()
def getFeaturesCols(self) -> List[str]:
"""
Gets the value of featuresCols or its default value.
"""
return self.getOrDefault(self.featuresCols)
class _CumlClass(object):
"""
Base class for all _CumlEstimator and _CumlModel implemenations.
Defines helper methods for mapping Spark ML Params to cuML class parameters.
"""
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
"""
Return a mapping of Spark ML Param names to cuML parameter names, which is used maintain
associations from Spark params to cuML parameters.
If the Spark Param has no equivalent cuML parameter, the cuML name can be set to:
- empty string, if a defined Spark Param should just be silently ignored, or
- None, if a defined Spark Param should raise an error.
Note: standard Spark column Params, e.g. inputCol, featureCol, etc, should not be listed
in this mapping, since they are handled differently.
Example
-------
.. code-block::python
# For KMeans
return {
"distanceMeasure": "",
"k": "n_clusters",
"initSteps": "",
"maxIter": "max_iter",
"seed": "random_state",
"tol": "tol",
"weightCol": None,
}
"""
return {}
@classmethod
def _param_value_mapping(
cls,
) -> Dict[str, Callable[[Any], Union[None, str, float, int]]]:
"""
Return a dictionary of cuML parameter names and a function mapping their Spark ML Param
values to cuML values of either str, float, or int type.
The mapped function should return None for any unmapped input values.
If it is desired that a cuML value be accepted as a valid input, it must be explicitly mapped to
itself in the function (see "squared_loss" and "eig" in example below).
Example
-------
.. code-block:: python
# For LinearRegression
return {
"loss": lambda x: {
"squaredError": "squared_loss",
"huber": None,
"squared_loss": "squared_loss",
}.get(x, None),
"solver": lambda x: {
"auto": "eig",
"normal": "eig",
"l-bfgs": None,
"eig": "eig",
}.get(x, None),
}
"""
return {}
@abstractmethod
def _get_cuml_params_default(self) -> Dict[str, Any]:
"""Return a dictionary of parameter names and their default values.
Note, please don't import cuml class and inspect the signatures to
get the parameters, since it may break the rule that spark-rapids-ml should
run on the driver side without rapids dependencies"""
raise NotImplementedError()
class _CumlParams(_CumlClass, Params):
"""
Mix-in to handle common parameters for all Spark Rapids ML algorithms, along with utilties
for synchronizing between Spark ML Params and cuML class parameters.
"""
_cuml_params: Dict[str, Any] = {}
_num_workers: Optional[int] = None
_float32_inputs: bool = True
@property
def cuml_params(self) -> Dict[str, Any]:
"""
Returns the dictionary of parameters intended for the underlying cuML class.
"""
return self._cuml_params
@property
def num_workers(self) -> int:
"""
Number of cuML workers, where each cuML worker corresponds to one Spark task
running on one GPU.
"""
inferred_workers = self._infer_num_workers()
if self._num_workers is not None:
# user sets the num_workers explicitly
sc = _get_spark_session().sparkContext
if _is_local(sc):
default_parallelism = sc.defaultParallelism
if default_parallelism < self._num_workers:
raise ValueError(
f"The num_workers ({self._num_workers}) should be less than "
f"or equal to spark default parallelism ({default_parallelism})"
)
elif inferred_workers < self._num_workers:
raise ValueError(
f"The num_workers ({self._num_workers}) should be less than "
f"or equal to total GPUs ({inferred_workers})"
)
elif inferred_workers < self._num_workers:
get_logger(self.__class__).warning(
f"Spark cluster may not have enough executors. "
f"Found {inferred_workers} < {self._num_workers}"
)
return self._num_workers
return inferred_workers
@num_workers.setter
def num_workers(self, value: int) -> None:
self._num_workers = value
def copy(self: P, extra: Optional["ParamMap"] = None) -> P:
# override this function to update cuml_params if possible
instance: P = super().copy(extra)
cuml_params = instance.cuml_params.copy()
if isinstance(extra, dict):
for param, value in extra.items():
if isinstance(param, Param):
name = instance._get_cuml_param(param.name, silent=False)
if name is not None:
cuml_params[name] = instance._get_cuml_mapping_value(
name, value
)
else:
raise TypeError(
"Expecting a valid instance of Param, but received: {}".format(
param
)
)
instance._cuml_params = cuml_params
return instance
def initialize_cuml_params(self) -> None:
"""
Set the default values of cuML parameters to match their Spark equivalents.
"""
# initialize cuml_params with defaults from cuML
self._cuml_params = self._get_cuml_params_default()
# update default values from Spark ML Param equivalents
param_map = self._param_mapping()
for spark_param in param_map.keys():
if self.hasDefault(spark_param):
self._set_cuml_param(spark_param, self.getOrDefault(spark_param))
def set_params(self: P, **kwargs: Any) -> P:
"""
Set the kwargs as Spark ML Params and/or cuML parameters, while maintaining parameter
and value mappings defined by the _CumlClass.
"""
param_map = self._param_mapping()
# raise error if setting both sides of a param mapping
for spark_param, cuml_param in param_map.items():
if (
spark_param != cuml_param
and spark_param in kwargs
and cuml_param in kwargs
):
raise ValueError(
f"'{cuml_param}' is an alias of '{spark_param}', set one or the other."
)
for k, v in kwargs.items():
if self.hasParam(k):
# standard Spark ML Param
self._set(**{str(k): v}) # type: ignore
self._set_cuml_param(k, v, silent=False)
elif k in self.cuml_params:
# cuml param
self._cuml_params[k] = v
for spark_param, cuml_param in param_map.items():
if k == cuml_param:
# also set matching Spark Param, if exists
# TODO: map cuml values back to Spark equivalents?
try:
self._set(**{str(spark_param): v})
except TypeError:
# Spark params have a converter, which may not work
# as expected. Eg, it can't convert float back to
# str param.
# TypeError: Invalid param value given for param "featureSubsetStrategy".
# Could not convert <class 'float'> to string type
pass
elif k == "num_workers":
# special case, since not a Spark or cuML param
self._num_workers = v
elif k == "float32_inputs":
self._float32_inputs = v
else:
raise ValueError(f"Unsupported param '{k}'.")
return self
def clear(self, param: Param) -> None:
"""
Reset a Spark ML Param to its default value, setting matching cuML parameter, if exists.
"""
super().clear(param)
param_map = self._param_mapping()
if param.name in param_map:
cuml_param = param_map[param.name]
if cuml_param:
self._cuml_params[cuml_param] = self.getOrDefault(param.name)
def _copy_cuml_params(self: P, to: P) -> P:
"""
Copy this instance's cuml_params values into another instance, only setting parameters
which already exist in the other instance. This is intended to mirror the behavior of
:py:func:`Params._copyValues()`.
Parameters
----------
to : :py:class:`_CumlParams`
Other instance to copy parameter values into.
Returns
-------
:py:class:`_CumlParams`
Other instance.
"""
for k, v in self._cuml_params.items():
if k in to._cuml_params:
to._cuml_params[k] = v
return to
def _get_input_columns(self) -> Tuple[Optional[str], Optional[List[str]]]:
"""
Get input column(s) from any of inputCol, inputCols, featuresCol, or featuresCols.
Single-column setters, e.g. `setInputCol`, should allow either a single col name,
or a list of col names (to transparently support multi-column inputs), while storing
values in the appropriate underlying params, e.g. `inputCol` or `inputCols`.
Returns
-------
Tuple[Optional[str], Optional[List[str]]]
tuple of either a single column name or a list of multiple column names.
Raises
------
ValueError
if none of the four supported input column params are set.
"""
input_col = None
input_cols = None
# Note: order is significant if multiple params are set, e.g. defaults vs. overrides
if self.hasParam("inputCols") and self.isDefined("inputCols"):
input_cols = self.getOrDefault("inputCols")
elif self.hasParam("inputCol") and self.isDefined("inputCol"):
input_col = self.getOrDefault("inputCol")
elif self.hasParam("featuresCols") and self.isDefined("featuresCols"):
input_cols = self.getOrDefault("featuresCols")
elif self.hasParam("featuresCol") and self.isDefined("featuresCol"):
input_col = self.getOrDefault("featuresCol")
else:
raise ValueError("Please set inputCol(s) or featuresCol(s)")
return input_col, input_cols
def _infer_num_workers(self) -> int:
"""
Try to infer the number of cuML workers (i.e. GPUs in cluster) from the Spark environment.
"""
num_workers = 1
try:
spark = SparkSession.getActiveSession()
if spark:
sc = spark.sparkContext
if _is_local(sc):
# assume using all local GPUs for Spark local mode
# TODO suggest using more CPUs (e.g. local[*]) if number of GPUs > number of CPUs
import cupy
num_workers = cupy.cuda.runtime.getDeviceCount()
else:
num_executors = int(
spark.conf.get("spark.executor.instances", "-1") # type: ignore
)
if num_executors == -1:
jsc = spark.sparkContext._jsc.sc()
num_executors = len(jsc.statusTracker().getExecutorInfos()) - 1
gpus_per_executor = float(
spark.conf.get("spark.executor.resource.gpu.amount", "1") # type: ignore
)
num_workers = max(int(num_executors * gpus_per_executor), 1)
except Exception as e:
# ignore any exceptions and just use default value
print(e)
return num_workers
def _get_cuml_param(self, spark_param: str, silent: bool = True) -> Optional[str]:
param_map = self._param_mapping()
if spark_param in param_map:
cuml_param = param_map[spark_param]
if cuml_param is None:
if not silent:
# if Spark Param is mapped to None, raise error
raise ValueError(
f"Spark Param '{spark_param}' is not supported by cuML."
)
elif cuml_param == "":
# if Spark Param is mapped to empty string, warn and continue
if not silent:
print(f"WARNING: Spark Param '{spark_param}' is not used by cuML.")
cuml_param = None
return cuml_param
else:
return None
def _set_cuml_param(
self, spark_param: str, spark_value: Any, silent: bool = True
) -> None:
"""Set a cuml_params parameter for a given Spark Param and value.
Parameters
----------
spark_param : str
Spark ML Param name.
spark_value : Any
Value associated with the Spark ML Param.
silent: bool
Don't warn or raise errors, default=True.
Raises
------
ValueError
If the Spark Param is explictly not supported.
"""
cuml_param = self._get_cuml_param(spark_param, silent)
if cuml_param is not None:
# if Spark Param is mapped to cuML parameter, set cuml_params
self._set_cuml_value(cuml_param, spark_value)
def _get_cuml_mapping_value(self, k: str, v: Any) -> Any:
value_map = self._param_value_mapping()
if k not in value_map:
# no value mapping required
return v
else:
# value map exists
mapped_v = value_map[k](v)
if mapped_v is not None:
return mapped_v
else:
raise ValueError(f"Value '{v}' for '{k}' param is unsupported")
def _set_cuml_value(self, k: str, v: Any) -> None:
"""
Set a cuml_params parameter with a (mapped) value.
If the value originated from a Spark ML Param, and a value mapping exists, the parameter
will be set to the mapped value. Generally, this is only useful for string/enum types.
Parameters
----------
k : str
Name of a cuml_param parameter.
v : Any
Value to assign to the cuml_param parameter, which may be mapped to another value.
Raises
------
ValueError
If a value mapping exists, but the mapped value is None, this means that there is
no equivalent value for the cuML side, so an exception is raised.
"""
value_map = self._get_cuml_mapping_value(k, v)
self._cuml_params[k] = value_map
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/params.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import pandas as pd
from pyspark import Row, TaskContext, keyword_only
from pyspark.ml.common import _py2java
from pyspark.ml.evaluation import Evaluator, RegressionEvaluator
from pyspark.ml.linalg import Vector, Vectors, _convert_to_vector
from pyspark.ml.regression import LinearRegressionModel as SparkLinearRegressionModel
from pyspark.ml.regression import LinearRegressionSummary
from pyspark.ml.regression import (
RandomForestRegressionModel as SparkRandomForestRegressionModel,
)
from pyspark.ml.regression import _LinearRegressionParams, _RandomForestRegressorParams
from pyspark.sql import Column, DataFrame
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
IntegerType,
StringType,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
TransformInputType,
_ConstructFunc,
_CumlEstimatorSupervised,
_CumlModel,
_CumlModelWithPredictionCol,
_EvaluateFunc,
_TransformFunc,
alias,
param_alias,
pred,
transform_evaluate,
)
from .metrics.RegressionMetrics import RegressionMetrics, reg_metrics
from .params import HasFeaturesCols, P, _CumlClass, _CumlParams
from .tree import (
_RandomForestClass,
_RandomForestCumlParams,
_RandomForestEstimator,
_RandomForestModel,
)
from .utils import PartitionDescriptor, _get_spark_session, cudf_to_cuml_array, java_uid
if TYPE_CHECKING:
from pyspark.ml._typing import ParamMap
T = TypeVar("T")
class _RegressionModelEvaluationMixIn:
# https://github.com/python/mypy/issues/5868#issuecomment-437690894 to bypass mypy checking
_this_model: Union["RandomForestRegressionModel", "LinearRegressionModel"]
def _transform_evaluate(
self,
dataset: DataFrame,
evaluator: Evaluator,
num_models: int,
params: Optional["ParamMap"] = None,
) -> List[float]:
"""
Transforms and evaluates the input dataset with optional parameters in a single pass.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a dataset that contains labels/observations and predictions
evaluator: :py:class:`pyspark.ml.evaluation.Evaluator`
an evaluator user intends to use
num_models: how many models are used to perform transform and evaluation in a single pass
params : dict, optional
an optional param map that overrides embedded params
Returns
-------
list of float
metrics
"""
if not isinstance(evaluator, RegressionEvaluator):
raise NotImplementedError(f"{evaluator} is unsupported yet.")
if self._this_model.getLabelCol() not in dataset.schema.names:
raise RuntimeError("Label column is not existing.")
dataset = dataset.withColumnRenamed(self._this_model.getLabelCol(), alias.label)
schema = StructType(
[
StructField(pred.model_index, IntegerType()),
StructField(reg_metrics.mean, ArrayType(FloatType())),
StructField(reg_metrics.m2n, ArrayType(FloatType())),
StructField(reg_metrics.m2, ArrayType(FloatType())),
StructField(reg_metrics.l1, ArrayType(FloatType())),
StructField(reg_metrics.total_count, IntegerType()),
]
)
rows = self._this_model._transform_evaluate_internal(dataset, schema).collect()
metrics = RegressionMetrics.from_rows(num_models, rows)
return [metric.evaluate(evaluator) for metric in metrics]
@staticmethod
def calculate_regression_metrics(
input: TransformInputType,
transformed: TransformInputType,
) -> pd.DataFrame:
"""calculate the metrics: mean/m2n/m2/l1 ...
input must have `alias.label` column"""
comb = pd.DataFrame(
{
"label": input[alias.label],
"prediction": transformed,
}
)
comb.insert(1, "label-prediction", comb["label"] - comb["prediction"])
total_cnt = comb.shape[0]
return pd.DataFrame(
data={
reg_metrics.mean: [comb.mean().to_list()],
reg_metrics.m2n: [(comb.var(ddof=0) * total_cnt).to_list()],
reg_metrics.m2: [comb.pow(2).sum().to_list()],
reg_metrics.l1: [comb.abs().sum().to_list()],
reg_metrics.total_count: total_cnt,
}
)
class LinearRegressionClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {
"aggregationDepth": "",
"elasticNetParam": "l1_ratio",
"epsilon": "",
"fitIntercept": "fit_intercept",
"loss": "loss",
"maxBlockSizeInMB": "",
"maxIter": "max_iter",
"regParam": "alpha",
"solver": "solver",
"standardization": "normalize",
"tol": "tol",
"weightCol": None,
}
@classmethod
def _param_value_mapping(
cls,
) -> Dict[str, Callable[[Any], Union[None, str, float, int]]]:
return {
"loss": lambda x: {
"squaredError": "squared_loss",
"huber": None,
"squared_loss": "squared_loss",
}.get(x, None),
"solver": lambda x: {
"auto": "eig",
"normal": "eig",
"l-bfgs": None,
"eig": "eig",
}.get(x, None),
}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"algorithm": "eig",
"fit_intercept": True,
"copy_X": None,
"normalize": False,
"verbose": False,
"alpha": 0.0001,
"solver": "eig",
"loss": "squared_loss",
"l1_ratio": 0.15,
"max_iter": 1000,
"tol": 0.001,
"shuffle": True,
}
class _LinearRegressionCumlParams(
_CumlParams, _LinearRegressionParams, HasFeaturesCols
):
"""
Shared Spark Params for LinearRegression and LinearRegressionModel.
"""
def getFeaturesCol(self) -> Union[str, List[str]]: # type:ignore
"""
Gets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`
"""
if self.isDefined(self.featuresCols):
return self.getFeaturesCols()
elif self.isDefined(self.featuresCol):
return self.getOrDefault("featuresCol")
else:
raise RuntimeError("featuresCol is not set")
def setFeaturesCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`featuresCol` or :py:attr:`featureCols`.
"""
if isinstance(value, str):
self.set_params(featuresCol=value)
else:
self.set_params(featuresCols=value)
return self
def setFeaturesCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`featuresCols`.
"""
return self.set_params(featuresCols=value)
def setLabelCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`labelCol`.
"""
return self.set_params(labelCol=value)
def setPredictionCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self.set_params(predictionCol=value)
class LinearRegression(
LinearRegressionClass,
_CumlEstimatorSupervised,
_LinearRegressionCumlParams,
):
"""LinearRegression is a machine learning model where the response y is modeled
by a linear combination of the predictors in X. It implements cuML's GPU accelerated
LinearRegression algorithm based on cuML python library, and it can be used in
PySpark Pipeline and PySpark ML meta algorithms like
:py:class:`~pyspark.ml.tuning.CrossValidator`/
:py:class:`~pyspark.ml.tuning.TrainValidationSplit`/
:py:class:`~pyspark.ml.classification.OneVsRest`
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
LinearRegression automatically supports most of the parameters from both
:py:class:`~pyspark.ml.regression.LinearRegression`,
:py:class:`cuml.LinearRegression`, :py:class:`cuml.Ridge`, :py:class:`cuml.Lasso`
and :py:class:`cuml.ElasticNet`. And it will automatically map pyspark parameters
to cuML parameters.
Notes
-----
Results for spark ML and spark rapids ml fit() will currently match in all regularization
cases only if features and labels are standardized in the input dataframe. Otherwise,
they will match only if regParam = 0 or elastNetParam = 1.0 (aka Lasso).
Parameters
----------
featuresCol:
The feature column names, spark-rapids-ml supports vector, array and columnar as the input.\n
* When the value is a string, the feature columns must be assembled into 1 column with vector or array type.
* When the value is a list of strings, the feature columns must be numeric types.
labelCol:
The label column name.
predictionCol:
The prediction column name.
maxIter:
Max number of iterations (>= 0).
regParam:
Regularization parameter (>= 0)
elasticNetParam:
The ElasticNet mixing parameter, in range [0, 1]. For alpha = 0,
the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
tol:
The convergence tolerance for iterative algorithms (>= 0).
fitIntercept:
whether to fit an intercept term.
standardization:
Whether to standardize the training features before fitting the model.
solver:
The solver algorithm for optimization. If this is not set or empty, default value is 'auto'.\n
The supported options: 'auto', 'normal' and 'eig', all of them will be mapped to 'eig' in cuML.
loss:
The loss function to be optimized.
The supported options: 'squaredError'
num_workers:
Number of cuML workers, where each cuML worker corresponds to one Spark task
running on one GPU. If not set, spark-rapids-ml tries to infer the number of
cuML workers (i.e. GPUs in cluster) from the Spark environment.
verbose:
Logging level.
* ``0`` - Disables all log messages.
* ``1`` - Enables only critical messages.
* ``2`` - Enables all messages up to and including errors.
* ``3`` - Enables all messages up to and including warnings.
* ``4 or False`` - Enables all messages up to and including information messages.
* ``5 or True`` - Enables all messages up to and including debug messages.
* ``6`` - Enables all messages up to and including trace messages.
Examples
--------
>>> from spark_rapids_ml.regression import LinearRegression, LinearRegressionModel
>>> from pyspark.ml.linalg import Vectors
>>>
>>> df = spark.createDataFrame([
... (6.5, Vectors.dense(1.0, 2.0)),
... (3.5, Vectors.sparse(2, {1: 2}))], ["label", "features"])
>>>
>>> lr = LinearRegression(regParam=0.0, solver="normal")
>>> lr.setMaxIter(5)
LinearRegression...
>>> model = lr.fit(df)
>>> model.setFeaturesCol("features")
LinearRegressionModel...
>>> model.setPredictionCol("newPrediction")
LinearRegressionModel...
>>> model.getMaxIter()
5
>>> model.coefficients
[3.000000000000001, 0.0]
>>> model.intercept
3.4999999999999996
>>> model.transform(df).show()
+-----+----------+------------------+
|label| features| newPrediction|
+-----+----------+------------------+
| 6.5|[1.0, 2.0]| 6.5|
| 3.5|[0.0, 2.0]|3.4999999999999996|
+-----+----------+------------------+
>>> lr_path = temp_path + "/rl"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
2
>>> model2.transform(df).show()
+-----+----------+------------------+
|label| features| newPrediction|
+-----+----------+------------------+
| 6.5|[1.0, 2.0]| 6.5|
| 3.5|[0.0, 2.0]|3.4999999999999996|
+-----+----------+------------------+
"""
@keyword_only
def __init__(
self,
*,
featuresCol: Union[str, List[str]] = "features",
labelCol: str = "label",
predictionCol: str = "prediction",
maxIter: int = 100,
regParam: float = 0.0,
elasticNetParam: float = 0.0,
tol: float = 1e-6,
fitIntercept: bool = True,
standardization: bool = True,
solver: str = "auto",
loss: str = "squaredError",
num_workers: Optional[int] = None,
verbose: Union[int, bool] = False,
**kwargs: Any,
):
super().__init__()
self.set_params(**self._input_kwargs)
def setMaxIter(self, value: int) -> "LinearRegression":
"""
Sets the value of :py:attr:`maxIter`.
"""
return self.set_params(maxIter=value)
def setRegParam(self, value: float) -> "LinearRegression":
"""
Sets the value of :py:attr:`regParam`.
"""
return self.set_params(regParam=value)
def setElasticNetParam(self, value: float) -> "LinearRegression":
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self.set_params(elasticNetParam=value)
def setLoss(self, value: str) -> "LinearRegression":
"""
Sets the value of :py:attr:`loss`.
"""
return self.set_params(loss=value)
def setStandardization(self, value: bool) -> "LinearRegression":
"""
Sets the value of :py:attr:`standardization`.
"""
return self.set_params(standardization=value)
def setTol(self, value: float) -> "LinearRegression":
"""
Sets the value of :py:attr:`tol`.
"""
return self.set_params(tol=value)
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[
List[Column], Optional[List[str]], int, Union[Type[FloatType], Type[DoubleType]]
]:
(
select_cols,
multi_col_names,
dimension,
feature_type,
) = super()._pre_process_data(dataset)
# Ridge and LinearRegression can't train on the dataset which only has 1 feature
if dimension == 1 and (
self.cuml_params["alpha"] == 0 or self.cuml_params["l1_ratio"] == 0
):
raise RuntimeError(
"LinearRegression doesn't support training data with 1 column"
)
return select_cols, multi_col_names, dimension, feature_type
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
def _linear_regression_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
# Step 1, get the PartitionDescriptor
pdesc = PartitionDescriptor.build(
params[param_alias.part_sizes], params[param_alias.num_cols]
)
def _single_fit(init_parameters: Dict[str, Any]) -> Dict[str, Any]:
if init_parameters["alpha"] == 0:
# LR
from cuml.linear_model.linear_regression_mg import (
LinearRegressionMG as CumlLinearRegression,
)
supported_params = [
"algorithm",
"fit_intercept",
"normalize",
"verbose",
"copy_X",
]
else:
if init_parameters["l1_ratio"] == 0:
# LR + L2
from cuml.linear_model.ridge_mg import (
RidgeMG as CumlLinearRegression,
)
supported_params = [
"alpha",
"solver",
"fit_intercept",
"normalize",
"verbose",
]
# spark ML normalizes sample portion of objective by the number of examples
# but cuml does not for RidgeRegression (l1_ratio=0). Induce similar behavior
# to spark ml by scaling up the reg parameter by the number of examples.
# With this, spark ML and spark rapids ML results match closely when features
# and label columns are all standardized.
init_parameters = init_parameters.copy()
if "alpha" in init_parameters.keys():
init_parameters["alpha"] *= (float)(pdesc.m)
else:
# LR + L1, or LR + L1 + L2
# Cuml uses Coordinate Descent algorithm to implement Lasso and ElasticNet
# So combine Lasso and ElasticNet here.
from cuml.solvers.cd_mg import CDMG as CumlLinearRegression
# in this case, both spark ML and cuml CD normalize sample portion of
# objective by the number of training examples, so no need to adjust
# reg params
supported_params = [
"loss",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"normalize",
"tol",
"shuffle",
"verbose",
]
# filter only supported params
final_init_parameters = {
k: v for k, v in init_parameters.items() if k in supported_params
}
# cuml adds copy_X argument since 23.08
if "copy_X" in final_init_parameters:
final_init_parameters["copy_X"] = False
linear_regression = CumlLinearRegression(
handle=params[param_alias.handle],
output_type="cudf",
**final_init_parameters,
)
linear_regression.fit(
dfs,
pdesc.m,
pdesc.n,
pdesc.parts_rank_size,
pdesc.rank,
)
return {
"coef_": linear_regression.coef_.to_numpy().tolist(),
"intercept_": linear_regression.intercept_,
"dtype": linear_regression.dtype.name,
"n_cols": linear_regression.n_cols,
}
init_parameters = params[param_alias.cuml_init]
fit_multiple_params = params[param_alias.fit_multiple_params]
if len(fit_multiple_params) == 0:
fit_multiple_params.append({})
models = []
for i in range(len(fit_multiple_params)):
tmp_params = init_parameters.copy()
tmp_params.update(fit_multiple_params[i])
models.append(_single_fit(tmp_params))
models_dict = {}
tc = TaskContext.get()
assert tc is not None
if tc.partitionId() == 0:
for k in models[0].keys():
models_dict[k] = [m[k] for m in models]
return models_dict
return _linear_regression_fit
def _out_schema(self) -> Union[StructType, str]:
return StructType(
[
StructField("coef_", ArrayType(DoubleType(), False), False),
StructField("intercept_", DoubleType(), False),
StructField("n_cols", IntegerType(), False),
StructField("dtype", StringType(), False),
]
)
def _create_pyspark_model(self, result: Row) -> "LinearRegressionModel":
return LinearRegressionModel.from_row(result)
def _enable_fit_multiple_in_single_pass(self) -> bool:
return True
def _supportsTransformEvaluate(self, evaluator: Evaluator) -> bool:
return True if isinstance(evaluator, RegressionEvaluator) else False
class LinearRegressionModel(
LinearRegressionClass,
_CumlModelWithPredictionCol,
_LinearRegressionCumlParams,
_RegressionModelEvaluationMixIn,
):
"""Model fitted by :class:`LinearRegression`."""
def __init__(
self,
coef_: Union[List[float], List[List[float]]],
intercept_: Union[float, List[float]],
n_cols: int,
dtype: str,
) -> None:
super().__init__(dtype=dtype, n_cols=n_cols, coef_=coef_, intercept_=intercept_)
self.coef_ = coef_
self.intercept_ = intercept_
self._lr_ml_model: Optional[SparkLinearRegressionModel] = None
self._this_model = self
def cpu(self) -> SparkLinearRegressionModel:
"""Return the PySpark ML LinearRegressionModel"""
if self._lr_ml_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
coef = _convert_to_vector(self.coefficients)
java_model = sc._jvm.org.apache.spark.ml.regression.LinearRegressionModel(
java_uid(sc, "linReg"), _py2java(sc, coef), self.intercept, self.scale
)
self._lr_ml_model = SparkLinearRegressionModel(java_model)
self._copyValues(self._lr_ml_model)
return self._lr_ml_model
@property
def coefficients(self) -> Vector:
"""
Model coefficients.
"""
# TBD: for large enough dimension, SparseVector is returned. Need to find out how to match
assert not isinstance(self.coef_[0], list)
return Vectors.dense(cast(list, self.coef_))
@property
def hasSummary(self) -> bool:
"""
Indicates whether a training summary exists for this model instance.
"""
return False
@property
def intercept(self) -> float:
"""
Model intercept.
"""
assert not isinstance(self.intercept_, list)
return self.intercept_
@property
def scale(self) -> float:
"""
Since "huber" loss is not supported by cuML, just returns the value 1.0 for API compatibility.
"""
return 1.0
def predict(self, value: T) -> float:
"""cuML doesn't support predicting 1 single sample.
Fall back to PySpark ML LinearRegressionModel"""
return self.cpu().predict(value)
def evaluate(self, dataset: DataFrame) -> LinearRegressionSummary:
"""cuML doesn't support evaluating.
Fall back to PySpark ML LinearRegressionModel"""
return self.cpu().evaluate(dataset)
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
coef_ = self.coef_
intercept_ = self.intercept_
n_cols = self.n_cols
dtype = self.dtype
def _construct_lr() -> CumlT:
from cuml.linear_model.linear_regression_mg import LinearRegressionMG
lrs = []
coefs = coef_ if isinstance(intercept_, list) else [coef_]
intercepts = intercept_ if isinstance(intercept_, list) else [intercept_]
for i in range(len(coefs)):
lr = LinearRegressionMG(output_type="numpy", copy_X=False)
lr.coef_ = cudf_to_cuml_array(
np.array(coefs[i], order="F").astype(dtype)
)
lr.intercept_ = intercepts[i]
lr.n_cols = n_cols
lr.dtype = np.dtype(dtype)
lrs.append(lr)
return lrs
def _predict(lr: CumlT, pdf: TransformInputType) -> pd.Series:
ret = lr.predict(pdf)
return pd.Series(ret)
return _construct_lr, _predict, self.calculate_regression_metrics
@classmethod
def _combine(
cls: Type["LinearRegressionModel"], models: List["LinearRegressionModel"] # type: ignore
) -> "LinearRegressionModel":
assert len(models) > 0 and all(isinstance(model, cls) for model in models)
first_model = models[0]
# Combine coef and intercepts
coefs = cast(list, [model.coef_ for model in models])
intercepts = cast(list, [model.intercept_ for model in models])
assert first_model.n_cols is not None
assert first_model.dtype is not None
lr_model = cls(
n_cols=first_model.n_cols,
dtype=first_model.dtype,
coef_=coefs,
intercept_=intercepts,
)
first_model._copyValues(lr_model)
first_model._copy_cuml_params(lr_model)
return lr_model
def _transformEvaluate(
self,
dataset: DataFrame,
evaluator: Evaluator,
params: Optional["ParamMap"] = None,
) -> List[float]:
num_models = len(self.intercept_) if isinstance(self.intercept_, list) else 1
return self._transform_evaluate(
dataset=dataset, evaluator=evaluator, num_models=num_models, params=params
)
class _RandomForestRegressorClass(_RandomForestClass):
@classmethod
def _param_value_mapping(
cls,
) -> Dict[str, Callable[[Any], Union[None, str, float, int]]]:
mapping = super()._param_value_mapping()
mapping["split_criterion"] = lambda x: {"variance": "mse", "mse": "mse"}.get(
x, None
)
return mapping
class RandomForestRegressor(
_RandomForestRegressorClass,
_RandomForestEstimator,
_RandomForestCumlParams,
_RandomForestRegressorParams,
):
"""RandomForestRegressor implements a Random Forest regressor model which
fits multiple decision tree in an ensemble. It implements cuML's
GPU accelerated RandomForestRegressor algorithm based on cuML python library,
and it can be used in PySpark Pipeline and PySpark ML meta algorithms like
:py:class:`~pyspark.ml.tuning.CrossValidator`,
:py:class:`~pyspark.ml.tuning.TrainValidationSplit`,
:py:class:`~pyspark.ml.classification.OneVsRest`
The distributed algorithm uses an *embarrassingly-parallel* approach. For a
forest with `N` trees being built on `w` workers, each worker simply builds `N/w`
trees on the data it has available locally. In many cases, partitioning the
data so that each worker builds trees on a subset of the total dataset works
well, but it generally requires the data to be well-shuffled in advance.
RandomForestRegressor automatically supports most of the parameters from both
:py:class:`~pyspark.ml.regression.RandomForestRegressor` and
:py:class:`cuml.ensemble.RandomForestRegressor`. And it can automatically map
pyspark parameters to cuML parameters.
Parameters
----------
featuresCol:
The feature column names, spark-rapids-ml supports vector, array and columnar as the input.\n
* When the value is a string, the feature columns must be assembled into 1 column with vector or array type.
* When the value is a list of strings, the feature columns must be numeric types.
labelCol:
The label column name.
predictionCol:
The prediction column name.
maxDepth:
Maximum tree depth. Must be greater than 0.
maxBins:
Maximum number of bins used by the split algorithm per feature.
minInstancesPerNode:
The minimum number of samples (rows) in each leaf node.
impurity: str = "variance",
The criterion used to split nodes.
numTrees:
Total number of trees in the forest.
featureSubsetStrategy:
Ratio of number of features (columns) to consider per node split.\n
The supported options:\n
``'auto'``: If numTrees == 1, set to 'all', If numTrees > 1 (forest), set to 'onethird'\n
``'all'``: use all features\n
``'onethird'``: use 1/3 of the features\n
``'sqrt'``: use sqrt(number of features)\n
``'log2'``: log2(number of features)\n
``'n'``: when n is in the range (0, 1.0], use n * number of features. When n
is in the range (1, number of features), use n features.
seed:
Seed for the random number generator.
bootstrap:
Control bootstrapping.\n
* If ``True``, each tree in the forest is built on a bootstrapped
sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
num_workers:
Number of cuML workers, where each cuML worker corresponds to one Spark task
running on one GPU. If not set, spark-rapids-ml tries to infer the number of
cuML workers (i.e. GPUs in cluster) from the Spark environment.
verbose:
Logging level.
* ``0`` - Disables all log messages.
* ``1`` - Enables only critical messages.
* ``2`` - Enables all messages up to and including errors.
* ``3`` - Enables all messages up to and including warnings.
* ``4 or False`` - Enables all messages up to and including information messages.
* ``5 or True`` - Enables all messages up to and including debug messages.
* ``6`` - Enables all messages up to and including trace messages.
n_streams:
Number of parallel streams used for forest building.
Please note that there is a bug running spark-rapids-ml on a node with multi-gpus
when n_streams > 1. See https://github.com/rapidsai/cuml/issues/5402.
min_samples_split:
The minimum number of samples required to split an internal node.\n
* If type ``int``, then ``min_samples_split`` represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``ceil(min_samples_split * n_rows)`` is the minimum number of
samples for each split. max_samples:
Ratio of dataset rows used while fitting each tree.
max_leaves:
Maximum leaf nodes per tree. Soft constraint. Unlimited, if -1.
min_impurity_decrease:
Minimum decrease in impurity required for node to be split.
max_batch_size:
Maximum number of nodes that can be processed in a given batch.
Examples
--------
>>> from spark_rapids_ml.regression import RandomForestRegressor, RandomForestRegressionModel
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
>>> rf.setSeed(42)
RandomForestRegressor_...
>>> model = rf.fit(df)
>>> model.getBootstrap()
True
>>> model.getSeed()
42
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> model.numFeatures
1
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(
self,
*,
featuresCol: Union[str, List[str]] = "features",
labelCol: str = "label",
predictionCol: str = "prediction",
maxDepth: int = 5,
maxBins: int = 32,
minInstancesPerNode: int = 1,
impurity: str = "variance",
numTrees: int = 20,
featureSubsetStrategy: str = "auto",
seed: Optional[int] = None,
bootstrap: Optional[bool] = True,
num_workers: Optional[int] = None,
verbose: Union[int, bool] = False,
n_streams: int = 1,
min_samples_split: Union[int, float] = 2,
max_samples: float = 1.0,
max_leaves: int = -1,
min_impurity_decrease: float = 0.0,
max_batch_size: int = 4096,
**kwargs: Any,
):
super().__init__(**self._input_kwargs)
def _is_classification(self) -> bool:
return False
def _create_pyspark_model(self, result: Row) -> "RandomForestRegressionModel":
return RandomForestRegressionModel.from_row(result)
def _supportsTransformEvaluate(self, evaluator: Evaluator) -> bool:
return True if isinstance(evaluator, RegressionEvaluator) else False
class RandomForestRegressionModel(
_RandomForestRegressorClass,
_RandomForestModel,
_RandomForestCumlParams,
_RandomForestRegressorParams,
_RegressionModelEvaluationMixIn,
):
"""
Model fitted by :class:`RandomForestRegressor`.
"""
def __init__(
self,
n_cols: int,
dtype: str,
treelite_model: Union[str, List[str]],
model_json: Union[List[str], List[List[str]]] = [], # type: ignore
):
super().__init__(
dtype=dtype,
n_cols=n_cols,
treelite_model=treelite_model,
model_json=model_json,
)
self._rf_spark_model: Optional[SparkRandomForestRegressionModel] = None
self._this_model = self
def cpu(self) -> SparkRandomForestRegressionModel:
"""Return the PySpark ML RandomForestRegressionModel"""
if self._rf_spark_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
uid, java_trees = self._convert_to_java_trees(self.getImpurity())
# Create the Spark RandomForestClassificationModel
java_rf_model = (
sc._jvm.org.apache.spark.ml.regression.RandomForestRegressionModel(
uid,
java_trees,
self.numFeatures,
)
)
self._rf_spark_model = SparkRandomForestRegressionModel(java_rf_model)
self._copyValues(self._rf_spark_model)
return self._rf_spark_model
def _is_classification(self) -> bool:
return False
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
_construct_rf, _predict, _ = super()._get_cuml_transform_func(dataset, category)
return _construct_rf, _predict, self.calculate_regression_metrics
def _transformEvaluate(
self,
dataset: DataFrame,
evaluator: Evaluator,
params: Optional["ParamMap"] = None,
) -> List[float]:
"""
Transforms and evaluates the input dataset with optional parameters in a single pass.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a dataset that contains labels/observations and predictions
evaluator: :py:class:`pyspark.ml.evaluation.Evaluator`
an evaluator user intends to use
params : dict, optional
an optional param map that overrides embedded params
Returns
-------
list of float
metrics
"""
num_models = (
len(self._treelite_model) if isinstance(self._treelite_model, list) else 1
)
return self._transform_evaluate(
dataset=dataset, evaluator=evaluator, num_models=num_models, params=params
)
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/regression.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import pandas as pd
from pyspark.ml.clustering import KMeansModel as SparkKMeansModel
from pyspark.ml.clustering import _KMeansParams
from pyspark.ml.linalg import Vector
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import (
ArrayType,
DoubleType,
IntegerType,
Row,
StringType,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
_ConstructFunc,
_CumlEstimator,
_CumlModelWithPredictionCol,
_EvaluateFunc,
_TransformFunc,
param_alias,
transform_evaluate,
)
from .params import HasFeaturesCols, P, _CumlClass, _CumlParams
from .utils import (
_ArrayOrder,
_concat_and_free,
_get_spark_session,
get_logger,
java_uid,
)
class KMeansClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {
"distanceMeasure": None,
"initMode": "init",
"k": "n_clusters",
"initSteps": "",
"maxIter": "max_iter",
"seed": "random_state",
"tol": "tol",
"weightCol": None,
}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"n_clusters": 8,
"max_iter": 300,
"tol": 0.0001,
"verbose": False,
"random_state": 1,
"init": "scalable-k-means++",
"n_init": 1,
"oversampling_factor": 2.0,
"max_samples_per_batch": 32768,
}
class _KMeansCumlParams(_CumlParams, _KMeansParams, HasFeaturesCols):
"""
Shared Spark Params for KMeans and KMeansModel.
"""
def __init__(self) -> None:
super().__init__()
# restrict default seed to max value of 32-bit signed integer for cuML
self._setDefault(seed=hash(type(self).__name__) & 0x07FFFFFFF)
def getFeaturesCol(self) -> Union[str, List[str]]: # type: ignore
"""
Gets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`
"""
if self.isDefined(self.featuresCols):
return self.getFeaturesCols()
elif self.isDefined(self.featuresCol):
return self.getOrDefault("featuresCol")
else:
raise RuntimeError("featuresCol is not set")
def setFeaturesCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`. Used when input vectors are stored in a single column.
"""
if isinstance(value, str):
self.set_params(featuresCol=value)
else:
self.set_params(featuresCols=value)
return self
def setFeaturesCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`featuresCols`. Used when input vectors are stored as multiple feature columns.
"""
return self.set_params(featuresCols=value)
def setPredictionCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`predictionCol`.
"""
self.set_params(predictionCol=value)
return self
class KMeans(KMeansClass, _CumlEstimator, _KMeansCumlParams):
"""
KMeans algorithm partitions data points into a fixed number (denoted as k) of clusters.
The algorithm initializes a set of k random centers then runs in iterations.
In each iteration, KMeans assigns every point to its nearest center,
then calculates a new set of k centers. KMeans often deals with large datasets.
This class provides GPU acceleration for pyspark distributed KMeans.
Parameters
----------
k: int (default = 8)
the number of centers. Set this parameter to enable KMeans to learn k centers from input vectors.
maxIter: int (default = 300)
the maximum iterations the algorithm will run to learn the k centers.
More iterations help generate more accurate centers.
seed: int (default = 1)
the random seed used by the algorithm to initialize a set of k random centers to start with.
tol: float (default = 1e-4)
early stopping criterion if centers do not change much after an iteration.
featuresCol: str
the name of the column that contains input vectors. featuresCol should be set when input vectors are stored in a single column of a dataframe.
featuresCols: List[str]
the names of feature columns that form input vectors. featureCols should be set when input vectors are stored as multiple feature columns of a dataframe.
predictionCol: str
the name of the column that stores cluster indices of input vectors. predictionCol should be set when users expect to apply the transform function of a learned model.
Examples
--------
>>> from spark_rapids_ml.clustering import KMeans
>>> data = [([0.0, 0.0],),
... ([1.0, 1.0],),
... ([9.0, 8.0],),
... ([8.0, 9.0],),]
>>> df = spark.createDataFrame(data, ["features"])
>>> df.show()
+----------+
| features|
+----------+
|[0.0, 0.0]|
|[1.0, 1.0]|
|[9.0, 8.0]|
|[8.0, 9.0]|
+----------+
>>> gpu_kmeans = KMeans(k=2).setFeaturesCol("features")
>>> gpu_kmeans.setMaxIter(10)
KMeans_5606dff6b4fa
>>> gpu_model = gpu_kmeans.fit(df)
>>> gpu_model.setPredictionCol("prediction")
>>> gpu_model.clusterCenters()
[[0.5, 0.5], [8.5, 8.5]]
>>> transformed = gpu_model.transform(df)
>>> transformed.show()
+----------+----------+
| features|prediction|
+----------+----------+
|[0.0, 0.0]| 0|
|[1.0, 1.0]| 0|
|[9.0, 8.0]| 1|
|[8.0, 9.0]| 1|
+----------+----------+
>>> gpu_kmeans.save("/tmp/kmeans")
>>> gpu_model.save("/tmp/kmeans_model")
>>> # vector column input
>>> from spark_rapids_ml.clustering import KMeans
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),),
... (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),),
... (Vectors.dense([8.0, 9.0]),),]
>>> df = spark.createDataFrame(data, ["features"])
>>> gpu_kmeans = KMeans(k=2).setFeaturesCol("features")
>>> gpu_kmeans.getFeaturesCol()
'features'
>>> gpu_model = gpu_kmeans.fit(df)
>>> # multi-column input
>>> data = [(0.0, 0.0),
... (1.0, 1.0),
... (9.0, 8.0),
... (8.0, 9.0),]
>>> df = spark.createDataFrame(data, ["f1", "f2"])
>>> gpu_kmeans = KMeans(k=2).setFeaturesCols(["f1", "f2"])
>>> gpu_kmeans.getFeaturesCols()
['f1', 'f2']
>>> gpu_kmeans = gpu_kmeans.fit(df)
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__()
self.set_params(**kwargs)
def setK(self, value: int) -> "KMeans":
"""
Sets the value of :py:attr:`k`.
"""
return self.set_params(k=value)
def setMaxIter(self, value: int) -> "KMeans":
"""
Sets the value of :py:attr:`maxIter`.
"""
return self.set_params(maxIter=value)
def setSeed(self, value: int) -> "KMeans":
"""
Sets the value of :py:attr:`seed`.
"""
if value > 0x07FFFFFFF:
raise ValueError("cuML seed value must be a 32-bit integer.")
return self.set_params(seed=value)
def setTol(self, value: float) -> "KMeans":
"""
Sets the value of :py:attr:`tol`.
"""
return self.set_params(tol=value)
def setWeightCol(self, value: str) -> "KMeans":
"""
Sets the value of :py:attr:`weightCol`.
"""
raise ValueError("'weightCol' is not supported by cuML.")
def _fit_array_order(self) -> _ArrayOrder:
return "C"
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
cls = self.__class__
array_order = self._fit_array_order()
def _cuml_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
import cupy as cp
from cuml.cluster.kmeans_mg import KMeansMG as CumlKMeansMG
kmeans_object = CumlKMeansMG(
handle=params[param_alias.handle],
output_type="cudf",
**params[param_alias.cuml_init],
)
df_list = [x for (x, _, _) in dfs]
if isinstance(df_list[0], pd.DataFrame):
concated = pd.concat(df_list)
else:
# features are either cp or np arrays here
concated = _concat_and_free(df_list, order=array_order)
kmeans_object.fit(
concated,
sample_weight=None,
)
logger = get_logger(cls)
# TBD: inertia is always 0 for some reason
logger.info(
f"iterations: {kmeans_object.n_iter_}, inertia: {kmeans_object.inertia_}"
)
return {
"cluster_centers_": [
kmeans_object.cluster_centers_.to_numpy().tolist()
],
"n_cols": params[param_alias.num_cols],
"dtype": str(kmeans_object.dtype.name),
}
return _cuml_fit
def _out_schema(self) -> Union[StructType, str]:
return StructType(
[
StructField(
"cluster_centers_", ArrayType(ArrayType(DoubleType()), False), False
),
StructField("n_cols", IntegerType(), False),
StructField("dtype", StringType(), False),
]
)
def _create_pyspark_model(self, result: Row) -> "KMeansModel":
return KMeansModel.from_row(result)
class KMeansModel(KMeansClass, _CumlModelWithPredictionCol, _KMeansCumlParams):
"""
KMeans gpu model for clustering input vectors to learned k centers.
Refer to the KMeans class for learning the k centers.
"""
def __init__(
self,
cluster_centers_: List[List[float]],
n_cols: int,
dtype: str,
):
super(KMeansModel, self).__init__(
n_cols=n_cols, dtype=dtype, cluster_centers_=cluster_centers_
)
self.cluster_centers_ = cluster_centers_
self._kmeans_spark_model: Optional[SparkKMeansModel] = None
def cpu(self) -> SparkKMeansModel:
"""Return the PySpark ML KMeansModel"""
if self._kmeans_spark_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
from pyspark.mllib.common import _py2java
from pyspark.mllib.linalg import _convert_to_vector
java_centers = _py2java(
sc, [_convert_to_vector(c) for c in self.cluster_centers_]
)
java_mllib_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(
java_centers
)
java_model = sc._jvm.org.apache.spark.ml.clustering.KMeansModel(
java_uid(sc, "kmeans"), java_mllib_model
)
self._kmeans_spark_model = SparkKMeansModel(java_model)
return self._kmeans_spark_model
def clusterCenters(self) -> List[np.ndarray]:
"""Returns the list of cluster centers."""
return [np.array(x) for x in self.cluster_centers_]
@property
def hasSummary(self) -> bool:
"""Indicates whether a training summary exists for this model instance."""
return False
def predict(self, value: Vector) -> int:
"""Predict label for the given features.
cuML doesn't support predicting 1 single sample.
Fall back to PySpark ML KMeansModel"""
return self.cpu().predict(value)
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
ret_schema = "int"
return ret_schema
def _transform_array_order(self) -> _ArrayOrder:
return "C"
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
cuml_alg_params = self.cuml_params.copy()
cluster_centers_ = self.cluster_centers_
dtype = self.dtype
n_cols = self.n_cols
array_order = self._transform_array_order()
def _construct_kmeans() -> CumlT:
from cuml.cluster.kmeans_mg import KMeansMG as CumlKMeansMG
kmeans = CumlKMeansMG(output_type="cudf", **cuml_alg_params)
from spark_rapids_ml.utils import cudf_to_cuml_array
kmeans.n_cols = n_cols
kmeans.dtype = np.dtype(dtype)
kmeans.cluster_centers_ = cudf_to_cuml_array(
np.array(cluster_centers_).astype(dtype), order=array_order
)
return kmeans
def _transform_internal(
kmeans: CumlT, df: Union[pd.DataFrame, np.ndarray]
) -> pd.Series:
res = list(kmeans.predict(df, normalize_weights=False).to_numpy())
return pd.Series(res)
return _construct_kmeans, _transform_internal, None
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/clustering.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from pyspark.ml.common import _py2java
from pyspark.ml.evaluation import Evaluator, MulticlassClassificationEvaluator
from .metrics.MulticlassMetrics import MulticlassMetrics
if TYPE_CHECKING:
from pyspark.ml._typing import ParamMap
import numpy as np
import pandas as pd
from pyspark import Row, keyword_only
from pyspark.ml.classification import BinaryRandomForestClassificationSummary
from pyspark.ml.classification import (
LogisticRegressionModel as SparkLogisticRegressionModel,
)
from pyspark.ml.classification import (
LogisticRegressionSummary,
LogisticRegressionTrainingSummary,
)
from pyspark.ml.classification import (
RandomForestClassificationModel as SparkRandomForestClassificationModel,
)
from pyspark.ml.classification import (
RandomForestClassificationSummary,
_LogisticRegressionParams,
_RandomForestClassifierParams,
)
from pyspark.ml.linalg import DenseMatrix, Vector, Vectors
from pyspark.ml.param.shared import HasProbabilityCol, HasRawPredictionCol
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import col
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
StringType,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
TransformInputType,
_ConstructFunc,
_CumlEstimatorSupervised,
_CumlModelWithPredictionCol,
_EvaluateFunc,
_TransformFunc,
alias,
param_alias,
pred,
transform_evaluate,
)
from .params import HasFeaturesCols, _CumlClass, _CumlParams
from .tree import (
_RandomForestClass,
_RandomForestCumlParams,
_RandomForestEstimator,
_RandomForestModel,
)
from .utils import (
PartitionDescriptor,
_ArrayOrder,
_concat_and_free,
_get_spark_session,
get_logger,
java_uid,
)
T = TypeVar("T")
class _RFClassifierParams(
_RandomForestClassifierParams, HasProbabilityCol, HasRawPredictionCol
):
def __init__(self, *args: Any):
super().__init__(*args)
def setProbabilityCol(
self: "_RFClassifierParams", value: str
) -> "_RFClassifierParams":
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
def setRawPredictionCol(
self: "_RFClassifierParams", value: str
) -> "_RFClassifierParams":
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
class _RandomForestClassifierClass(_RandomForestClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
mapping = super()._param_mapping()
mapping["rawPredictionCol"] = ""
return mapping
class RandomForestClassifier(
_RandomForestClassifierClass,
_RandomForestEstimator,
_RandomForestCumlParams,
_RFClassifierParams,
):
"""RandomForestClassifier implements a Random Forest classifier model which
fits multiple decision tree classifiers in an ensemble. It supports both
binary and multiclass labels. It implements cuML's GPU accelerated
RandomForestClassifier algorithm based on cuML python library,
and it can be used in PySpark Pipeline and PySpark ML meta algorithms like
:py:class:`~pyspark.ml.tuning.CrossValidator`,
:py:class:`~pyspark.ml.tuning.TrainValidationSplit`,
:py:class:`~pyspark.ml.classification.OneVsRest`.
The distributed algorithm uses an *embarrassingly-parallel* approach. For a
forest with `N` trees being built on `w` workers, each worker simply builds `N/w`
trees on the data it has available locally. In many cases, partitioning the
data so that each worker builds trees on a subset of the total dataset works
well, but it generally requires the data to be well-shuffled in advance.
RandomForestClassifier automatically supports most of the parameters from both
:py:class:`~pyspark.ml.classification.RandomForestClassifier`
and :py:class:`cuml.ensemble.RandomForestClassifier`. And it can automatically
map pyspark parameters to cuML parameters.
Parameters
----------
featuresCol:
The feature column names, spark-rapids-ml supports vector, array and columnar as the input.\n
* When the value is a string, the feature columns must be assembled into 1 column with vector or array type.
* When the value is a list of strings, the feature columns must be numeric types.
labelCol:
The label column name.
predictionCol:
The prediction column name.
probabilityCol
The column name for predicted class conditional probabilities.
maxDepth:
Maximum tree depth. Must be greater than 0.
maxBins:
Maximum number of bins used by the split algorithm per feature.
minInstancesPerNode:
The minimum number of samples (rows) in each leaf node.
impurity: str = "gini",
The criterion used to split nodes.\n
* ``'gini'`` for gini impurity
* ``'entropy'`` for information gain (entropy)
numTrees:
Total number of trees in the forest.
featureSubsetStrategy:
Ratio of number of features (columns) to consider per node split.\n
The supported options:\n
``'auto'``: If numTrees == 1, set to 'all', If numTrees > 1 (forest), set to 'sqrt'\n
``'all'``: use all features\n
``'onethird'``: use 1/3 of the features\n
``'sqrt'``: use sqrt(number of features)\n
``'log2'``: log2(number of features)\n
``'n'``: when n is in the range (0, 1.0], use n * number of features. When n
is in the range (1, number of features), use n features.
seed:
Seed for the random number generator.
bootstrap:
Control bootstrapping.\n
* If ``True``, each tree in the forest is built on a bootstrapped
sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
num_workers:
Number of cuML workers, where each cuML worker corresponds to one Spark task
running on one GPU. If not set, spark-rapids-ml tries to infer the number of
cuML workers (i.e. GPUs in cluster) from the Spark environment.
verbose:
Logging level.
* ``0`` - Disables all log messages.
* ``1`` - Enables only critical messages.
* ``2`` - Enables all messages up to and including errors.
* ``3`` - Enables all messages up to and including warnings.
* ``4 or False`` - Enables all messages up to and including information messages.
* ``5 or True`` - Enables all messages up to and including debug messages.
* ``6`` - Enables all messages up to and including trace messages.
n_streams:
Number of parallel streams used for forest building.
Please note that there is a bug running spark-rapids-ml on a node with multi-gpus
when n_streams > 1. See https://github.com/rapidsai/cuml/issues/5402.
min_samples_split:
The minimum number of samples required to split an internal node.\n
* If type ``int``, then ``min_samples_split`` represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``ceil(min_samples_split * n_rows)`` is the minimum number of
samples for each split. max_samples:
Ratio of dataset rows used while fitting each tree.
max_leaves:
Maximum leaf nodes per tree. Soft constraint. Unlimited, if -1.
min_impurity_decrease:
Minimum decrease in impurity required for node to be split.
max_batch_size:
Maximum number of nodes that can be processed in a given batch.
Examples
--------
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> from spark_rapids_ml.classification import RandomForestClassifier, RandomForestClassificationModel
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42)
>>> model = rf.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
RandomForestClassificationModel_...
>>> model.getBootstrap()
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>>
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model2.getNumTrees
3
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(
self,
*,
featuresCol: Union[str, List[str]] = "features",
labelCol: str = "label",
predictionCol: str = "prediction",
probabilityCol: str = "probability",
maxDepth: int = 5,
maxBins: int = 32,
minInstancesPerNode: int = 1,
impurity: str = "gini",
numTrees: int = 20,
featureSubsetStrategy: str = "auto",
seed: Optional[int] = None,
bootstrap: Optional[bool] = True,
num_workers: Optional[int] = None,
verbose: Union[int, bool] = False,
n_streams: int = 1,
min_samples_split: Union[int, float] = 2,
max_samples: float = 1.0,
max_leaves: int = -1,
min_impurity_decrease: float = 0.0,
max_batch_size: int = 4096,
**kwargs: Any,
):
super().__init__(**self._input_kwargs)
def _pre_process_label(
self, dataset: DataFrame, feature_type: Union[Type[FloatType], Type[DoubleType]]
) -> Column:
"""Cuml RandomForestClassifier requires the int32 type of label column"""
label_name = self.getLabelCol()
label_datatype = dataset.schema[label_name].dataType
if isinstance(label_datatype, (IntegralType, FloatType, DoubleType)):
label_col = col(label_name).cast(IntegerType()).alias(alias.label)
else:
raise ValueError(
"Label column must be integral types or float/double types."
)
return label_col
def _create_pyspark_model(self, result: Row) -> "RandomForestClassificationModel":
return RandomForestClassificationModel.from_row(result)
def _is_classification(self) -> bool:
return True
def _supportsTransformEvaluate(self, evaluator: Evaluator) -> bool:
if (
isinstance(evaluator, MulticlassClassificationEvaluator)
and evaluator.getMetricName()
in MulticlassMetrics.SUPPORTED_MULTI_CLASS_METRIC_NAMES
):
return True
return False
class RandomForestClassificationModel(
_RandomForestClassifierClass,
_RandomForestModel,
_RandomForestCumlParams,
_RFClassifierParams,
):
"""
Model fitted by :class:`RandomForestClassifier`.
"""
def __init__(
self,
n_cols: int,
dtype: str,
treelite_model: Union[str, List[str]],
model_json: Union[List[str], List[List[str]]],
num_classes: int,
):
super().__init__(
dtype=dtype,
n_cols=n_cols,
treelite_model=treelite_model,
model_json=model_json,
num_classes=num_classes,
)
self._num_classes = num_classes
self._model_json = model_json
self._rf_spark_model: Optional[SparkRandomForestClassificationModel] = None
def cpu(self) -> SparkRandomForestClassificationModel:
"""Return the PySpark ML RandomForestClassificationModel"""
if self._rf_spark_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
uid, java_trees = self._convert_to_java_trees(self.getImpurity())
# Create the Spark RandomForestClassificationModel
java_rf_model = sc._jvm.org.apache.spark.ml.classification.RandomForestClassificationModel(
uid,
java_trees,
self.numFeatures,
self._num_classes,
)
self._rf_spark_model = SparkRandomForestClassificationModel(java_rf_model)
self._copyValues(self._rf_spark_model)
return self._rf_spark_model
def _is_classification(self) -> bool:
return True
@property
def hasSummary(self) -> bool:
"""Indicates whether a training summary exists for this model instance."""
return False
@property
def numClasses(self) -> int:
"""Number of classes (values which the label can take)."""
return self._num_classes
def predictRaw(self, value: Vector) -> Vector:
"""
Raw prediction for each possible label.
"""
return self.cpu().predictRaw(value)
def predictProbability(self, value: Vector) -> Vector:
"""
Predict the probability of each class given the features.
"""
return self.cpu().predictProbability(value)
def evaluate(
self, dataset: DataFrame
) -> Union[
BinaryRandomForestClassificationSummary, RandomForestClassificationSummary
]:
"""
Evaluates the model on a test dataset.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
return self.cpu().evaluate(dataset)
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
_construct_rf, _, _ = super()._get_cuml_transform_func(dataset)
def _predict(rf: CumlT, pdf: TransformInputType) -> pd.Series:
data = {}
rf.update_labels = False
data[pred.prediction] = rf.predict(pdf)
if category == transform_evaluate.transform:
# transform_evaluate doesn't need probs for f1 score.
probs = rf.predict_proba(pdf)
if isinstance(probs, pd.DataFrame):
# For 2302, when input is multi-cols, the output will be DataFrame
data[pred.probability] = pd.Series(probs.values.tolist())
else:
# should be np.ndarray
data[pred.probability] = pd.Series(list(probs))
return pd.DataFrame(data)
def _evaluate(
input: TransformInputType,
transformed: TransformInputType,
) -> pd.DataFrame:
# calculate the count of (label, prediction)
comb = pd.DataFrame(
{
"label": input[alias.label],
"prediction": transformed[pred.prediction],
}
)
confusion = (
comb.groupby(["label", "prediction"]).size().reset_index(name="total")
)
return confusion
return _construct_rf, _predict, _evaluate
def _transformEvaluate(
self,
dataset: DataFrame,
evaluator: Evaluator,
params: Optional["ParamMap"] = None,
) -> List[float]:
"""
Transforms and evaluates the input dataset with optional parameters in a single pass.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a dataset that contains labels/observations and predictions
evaluator: :py:class:`pyspark.ml.evaluation.Evaluator`
an evaluator user intends to use
params : dict, optional
an optional param map that overrides embedded params
Returns
-------
list of float
metrics
"""
if not isinstance(evaluator, MulticlassClassificationEvaluator):
raise NotImplementedError(f"{evaluator} is unsupported yet.")
if (
evaluator.getMetricName()
not in MulticlassMetrics.SUPPORTED_MULTI_CLASS_METRIC_NAMES
):
raise NotImplementedError(
f"{evaluator.getMetricName()} is not supported yet."
)
if self.getLabelCol() not in dataset.schema.names:
raise RuntimeError("Label column is not existing.")
dataset = dataset.withColumnRenamed(self.getLabelCol(), alias.label)
schema = StructType(
[
StructField(pred.model_index, IntegerType()),
StructField("label", FloatType()),
StructField("prediction", FloatType()),
StructField("total", FloatType()),
]
)
rows = super()._transform_evaluate_internal(dataset, schema).collect()
num_models = (
len(self._treelite_model) if isinstance(self._treelite_model, list) else 1
)
tp_by_class: List[Dict[float, float]] = [{} for _ in range(num_models)]
fp_by_class: List[Dict[float, float]] = [{} for _ in range(num_models)]
label_count_by_class: List[Dict[float, float]] = [{} for _ in range(num_models)]
label_count = [0 for _ in range(num_models)]
for i in range(num_models):
for j in range(self._num_classes):
tp_by_class[i][float(j)] = 0.0
label_count_by_class[i][float(j)] = 0.0
fp_by_class[i][float(j)] = 0.0
for row in rows:
label_count[row.model_index] += row.total
label_count_by_class[row.model_index][row.label] += row.total
if row.label == row.prediction:
tp_by_class[row.model_index][row.label] += row.total
else:
fp_by_class[row.model_index][row.prediction] += row.total
scores = []
for i in range(num_models):
metrics = MulticlassMetrics(
tp=tp_by_class[i],
fp=fp_by_class[i],
label=label_count_by_class[i],
label_count=label_count[i],
)
scores.append(metrics.evaluate(evaluator))
return scores
class LogisticRegressionClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {
"maxIter": "max_iter",
"regParam": "C", # regParam = 1/C
"tol": "tol",
"fitIntercept": "fit_intercept",
"elasticNetParam": None,
"threshold": None,
"thresholds": None,
"standardization": "", # Set to "" instead of None because cuml defaults to standardization = False
"weightCol": None,
"aggregationDepth": None,
"family": "", # family can be 'auto', 'binomial' or 'multinomial', cuml automatically detects num_classes
"lowerBoundsOnCoefficients": None,
"upperBoundsOnCoefficients": None,
"lowerBoundsOnIntercepts": None,
"upperBoundsOnIntercepts": None,
"maxBlockSizeInMB": None,
"rawPredictionCol": "",
}
@classmethod
def _param_value_mapping(
cls,
) -> Dict[str, Callable[[Any], Union[None, str, float, int]]]:
def regParam_value_mapper(x: float) -> float:
# TODO: remove this checking and set regParam to 0.0 once no regularization is supported
if x == 0.0:
logger = get_logger(cls)
logger.warning(
"no regularization is not supported yet. if regParam is set to 0,"
+ "it will be mapped to smallest positive float, i.e. numpy.finfo('float32').tiny"
)
return 1.0 / np.finfo("float32").tiny.item()
else:
return 1.0 / x
return {"C": lambda x: regParam_value_mapper(x)}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"fit_intercept": True,
"verbose": False,
"C": 1.0,
"max_iter": 1000,
"tol": 0.0001,
}
class _LogisticRegressionCumlParams(
_CumlParams,
_LogisticRegressionParams,
HasFeaturesCols,
HasProbabilityCol,
HasRawPredictionCol,
):
def getFeaturesCol(self) -> Union[str, List[str]]: # type:ignore
"""
Gets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`
"""
if self.isDefined(self.featuresCols):
return self.getFeaturesCols()
elif self.isDefined(self.featuresCol):
return self.getOrDefault("featuresCol")
else:
raise RuntimeError("featuresCol is not set")
def setFeaturesCol(
self: "_LogisticRegressionCumlParams", value: Union[str, List[str]]
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`featuresCol` or :py:attr:`featureCols`.
"""
if isinstance(value, str):
self.set_params(featuresCol=value)
else:
self.set_params(featuresCols=value)
return self
def setFeaturesCols(
self: "_LogisticRegressionCumlParams", value: List[str]
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`featuresCols`.
"""
return self.set_params(featuresCols=value)
def setLabelCol(
self: "_LogisticRegressionCumlParams", value: str
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`labelCol`.
"""
return self.set_params(labelCol=value)
def setPredictionCol(
self: "_LogisticRegressionCumlParams", value: str
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self.set_params(predictionCol=value)
def setProbabilityCol(
self: "_LogisticRegressionCumlParams", value: str
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self.set_params(probabilityCol=value)
def setRawPredictionCol(
self: "_LogisticRegressionCumlParams", value: str
) -> "_LogisticRegressionCumlParams":
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
class LogisticRegression(
LogisticRegressionClass,
_CumlEstimatorSupervised,
_LogisticRegressionCumlParams,
):
"""LogisticRegression is a machine learning model where the response y is modeled
by the sigmoid (or softmax for more than 2 classes) function applied to a linear
combination of the features in X. It implements cuML's GPU accelerated
LogisticRegression algorithm based on cuML python library, and it can be used in
PySpark Pipeline and PySpark ML meta algorithms like
:py:class:`~pyspark.ml.tuning.CrossValidator`/
:py:class:`~pyspark.ml.tuning.TrainValidationSplit`/
:py:class:`~pyspark.ml.classification.OneVsRest`
This currently supports the regularization options:
* none
* L2 (ridge regression)
and two classes.
LogisticRegression automatically supports most of the parameters from both
:py:class:`~pyspark.ml.classification.LogisticRegression`.
And it will automatically map pyspark parameters
to cuML parameters.
Parameters
----------
featuresCol:
The feature column names, spark-rapids-ml supports vector, array and columnar as the input.\n
* When the value is a string, the feature columns must be assembled into 1 column with vector or array type.
* When the value is a list of strings, the feature columns must be numeric types.
labelCol:
The label column name.
predictionCol:
The class prediction column name.
probabilityCol:
The probability prediction column name.
maxIter:
The maximum number of iterations of the underlying L-BFGS algorithm.
regParam:
The regularization parameter.
tol:
The convergence tolerance.
fitIntercept:
Whether to fit an intercept term.
num_workers:
Number of cuML workers, where each cuML worker corresponds to one Spark task
running on one GPU. If not set, spark-rapids-ml tries to infer the number of
cuML workers (i.e. GPUs in cluster) from the Spark environment.
verbose:
Logging level.
* ``0`` - Disables all log messages.
* ``1`` - Enables only critical messages.
* ``2`` - Enables all messages up to and including errors.
* ``3`` - Enables all messages up to and including warnings.
* ``4 or False`` - Enables all messages up to and including information messages.
* ``5 or True`` - Enables all messages up to and including debug messages.
* ``6`` - Enables all messages up to and including trace messages.
Examples
--------
>>> from spark_rapids_ml.classification import LogisticRegression
>>> data = [
... ([1.0, 2.0], 1.0),
... ([1.0, 3.0], 1.0),
... ([2.0, 1.0], 0.0),
... ([3.0, 1.0], 0.0),
... ]
>>> schema = "features array<float>, label float"
>>> df = spark.createDataFrame(data, schema=schema)
>>> df.show()
+----------+-----+
| features|label|
+----------+-----+
|[1.0, 2.0]| 1.0|
|[1.0, 3.0]| 1.0|
|[2.0, 1.0]| 0.0|
|[3.0, 1.0]| 0.0|
+----------+-----+
>>> lr_estimator = LogisticRegression()
>>> lr_estimator.setFeaturesCol("features")
LogisticRegression_a757215437b0
>>> lr_estimator.setLabelCol("label")
LogisticRegression_a757215437b0
>>> lr_model = lr_estimator.fit(df)
>>> lr_model.coefficients
DenseVector([-0.7148, 0.7148])
>>> lr_model.intercept
-8.543887375367376e-09
"""
@keyword_only
def __init__(
self,
*,
featuresCol: Union[str, List[str]] = "features",
labelCol: str = "label",
predictionCol: str = "prediction",
probabilityCol: str = "probability",
maxIter: int = 100,
regParam: float = 0.0, # NOTE: the default value of regParam is actually mapped to sys.float_info.min on GPU
tol: float = 1e-6,
fitIntercept: bool = True,
num_workers: Optional[int] = None,
verbose: Union[int, bool] = False,
**kwargs: Any,
):
if not self._input_kwargs.get("float32_inputs", True):
get_logger(self.__class__).warning(
"This estimator does not support double precision inputs. Setting float32_inputs to False will be ignored."
)
self._input_kwargs.pop("float32_inputs")
super().__init__()
self.set_params(**self._input_kwargs)
def _fit_array_order(self) -> _ArrayOrder:
return "C"
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
array_order = self._fit_array_order()
def _logistic_regression_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
init_parameters = params[param_alias.cuml_init]
from cuml.linear_model.logistic_regression_mg import LogisticRegressionMG
logistic_regression = LogisticRegressionMG(
handle=params[param_alias.handle],
**init_parameters,
)
logistic_regression.penalty_normalized = False
logistic_regression.lbfgs_memory = 10
X_list = [x for (x, _, _) in dfs]
y_list = [y for (_, y, _) in dfs]
if isinstance(X_list[0], pd.DataFrame):
concated = pd.concat(X_list)
concated_y = pd.concat(y_list)
else:
# features are either cp or np arrays here
concated = _concat_and_free(X_list, order=array_order)
concated_y = _concat_and_free(y_list, order=array_order)
pdesc = PartitionDescriptor.build(
[concated.shape[0]], params[param_alias.num_cols]
)
logistic_regression.fit(
[(concated, concated_y)],
pdesc.m,
pdesc.n,
pdesc.parts_rank_size,
pdesc.rank,
)
return {
"coef_": [logistic_regression.coef_.tolist()],
"intercept_": [logistic_regression.intercept_.tolist()],
"n_cols": [logistic_regression.n_cols],
"dtype": [logistic_regression.dtype.name],
}
return _logistic_regression_fit
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[
List[Column], Optional[List[str]], int, Union[Type[FloatType], Type[DoubleType]]
]:
(
select_cols,
multi_col_names,
dimension,
feature_type,
) = super()._pre_process_data(dataset)
return select_cols, multi_col_names, dimension, feature_type
def _out_schema(self) -> Union[StructType, str]:
return StructType(
[
StructField("coef_", ArrayType(ArrayType(DoubleType()), False), False),
StructField("intercept_", ArrayType(DoubleType()), False),
StructField("n_cols", IntegerType(), False),
StructField("dtype", StringType(), False),
]
)
def _create_pyspark_model(self, result: Row) -> "LogisticRegressionModel":
return LogisticRegressionModel.from_row(result)
def setMaxIter(self, value: int) -> "LogisticRegression":
"""
Sets the value of :py:attr:`maxIter`.
"""
return self.set_params(maxIter=value)
def setRegParam(self, value: float) -> "LogisticRegression":
"""
Sets the value of :py:attr:`regParam`.
"""
return self.set_params(regParam=value)
def setTol(self, value: float) -> "LogisticRegression":
"""
Sets the value of :py:attr:`tol`.
"""
return self.set_params(tol=value)
def setFitIntercept(self, value: bool) -> "LogisticRegression":
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self.set_params(fitIntercept=value)
class LogisticRegressionModel(
LogisticRegressionClass,
_CumlModelWithPredictionCol,
_LogisticRegressionCumlParams,
):
"""Model fitted by :class:`LogisticRegression`."""
def __init__(
self,
coef_: List[List[float]],
intercept_: List[float],
n_cols: int,
dtype: str,
) -> None:
super().__init__(dtype=dtype, n_cols=n_cols, coef_=coef_, intercept_=intercept_)
self.coef_ = coef_
self.intercept_ = intercept_
self._lr_spark_model: Optional[SparkLogisticRegressionModel] = None
def cpu(self) -> SparkLogisticRegressionModel:
"""Return the PySpark ML LogisticRegressionModel"""
if self._lr_spark_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
# TODO Multinomial is not supported yet.
num_classes = 2
is_multinomial = False
num_coefficient_sets = 1
coefficients = self.coef_[0]
assert self.n_cols is not None
coefficients_dmatrix = DenseMatrix(
num_coefficient_sets, self.n_cols, list(coefficients), True
)
intercepts = Vectors.dense(self.intercept)
java_model = (
sc._jvm.org.apache.spark.ml.classification.LogisticRegressionModel(
java_uid(sc, "logreg"),
_py2java(sc, coefficients_dmatrix),
_py2java(sc, intercepts),
num_classes,
is_multinomial,
)
)
self._lr_spark_model = SparkLogisticRegressionModel(java_model)
self._copyValues(self._lr_spark_model)
return self._lr_spark_model
@property
def coefficients(self) -> Vector:
"""
Model coefficients.
"""
assert len(self.coef_) == 1, "multi classes not supported yet"
return Vectors.dense(cast(list, self.coef_[0]))
@property
def intercept(self) -> float:
"""
Model intercept.
"""
assert len(self.intercept_) == 1, "multi classes not supported yet"
return self.intercept_[0]
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
coef_ = self.coef_
intercept_ = self.intercept_
n_cols = self.n_cols
dtype = self.dtype
def _construct_lr() -> CumlT:
import numpy as np
from cuml.internals.input_utils import input_to_cuml_array
from cuml.linear_model.logistic_regression_mg import LogisticRegressionMG
lr = LogisticRegressionMG(output_type="numpy")
lr.n_cols = n_cols
lr.dtype = np.dtype(dtype)
lr.intercept_ = input_to_cuml_array(
np.array(intercept_, order="C").astype(dtype)
).array
lr.coef_ = input_to_cuml_array(
np.array(coef_, order="C").astype(dtype)
).array
# TBD: infer class indices from data for > 2 classes
# needed for predict_proba
lr.classes_ = input_to_cuml_array(
np.array([0, 1], order="F").astype(dtype)
).array
return lr
def _predict(lr: CumlT, pdf: TransformInputType) -> pd.DataFrame:
data = {}
data[pred.prediction] = lr.predict(pdf)
probs = lr.predict_proba(pdf)
if isinstance(probs, pd.DataFrame):
data[pred.probability] = pd.Series(probs.values.tolist())
else:
# should be np.ndarray
data[pred.probability] = pd.Series(list(probs))
return pd.DataFrame(data)
return _construct_lr, _predict, None
@property
def hasSummary(self) -> bool:
"""
Indicates whether a training summary exists for this model
instance.
"""
return False
@property
def summary(self) -> "LogisticRegressionTrainingSummary":
"""
Gets summary (accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
raise RuntimeError(
"No training summary available for this %s" % self.__class__.__name__
)
def predict(self, value: Vector) -> float:
"""cuML doesn't support predicting 1 single sample.
Fall back to PySpark ML LogisticRegressionModel"""
return self.cpu().predict(value)
def evaluate(self, dataset: DataFrame) -> LogisticRegressionSummary:
"""cuML doesn't support evaluating.
Fall back to PySpark ML LogisticRegressionModel"""
return self.cpu().evaluate(dataset)
def predictRaw(self, value: Vector) -> Vector:
"""
Raw prediction for each possible label.
Fall back to PySpark ML LogisticRegressionModel
"""
return self.cpu().predictRaw(value)
def predictProbability(self, value: Vector) -> Vector:
"""
Predict the probability of each class given the features.
Fall back to PySpark ML LogisticRegressionModel
"""
return self.cpu().predictProbability(value)
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/classification.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = "23.8.0"
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/__init__.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import threading
from abc import abstractmethod
from collections import namedtuple
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generic,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
from pyspark import RDD, TaskContext
from pyspark.ml import Estimator, Model
from pyspark.ml.evaluation import Evaluator
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.ml.linalg import VectorUDT
from pyspark.ml.param.shared import (
HasLabelCol,
HasOutputCol,
HasPredictionCol,
HasProbabilityCol,
)
from pyspark.ml.util import (
DefaultParamsReader,
DefaultParamsWriter,
MLReadable,
MLReader,
MLWritable,
MLWriter,
)
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import col, struct
from pyspark.sql.pandas.functions import pandas_udf
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
IntegralType,
Row,
StructType,
)
from .common.cuml_context import CumlContext
from .params import _CumlParams
from .utils import (
_ArrayOrder,
_get_gpu_id,
_get_spark_session,
_is_local,
_is_standalone_or_localcluster,
dtype_to_pyspark_type,
get_logger,
)
if TYPE_CHECKING:
import cudf
from pyspark.ml._typing import ParamMap
CumlT = Any
_SinglePdDataFrameBatchType = Tuple[
pd.DataFrame, Optional[pd.DataFrame], Optional[pd.DataFrame]
]
_SingleNpArrayBatchType = Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray]]
# FitInputType is type of [(feature, label), ...]
FitInputType = Union[List[_SinglePdDataFrameBatchType], List[_SingleNpArrayBatchType]]
# TransformInput type
TransformInputType = Union["cudf.DataFrame", np.ndarray]
# Function to construct cuml instances on the executor side
_ConstructFunc = Callable[..., Union[CumlT, List[CumlT]]]
# Function to do the inference using cuml instance constructed by _ConstructFunc
_TransformFunc = Callable[[CumlT, TransformInputType], pd.DataFrame]
# Function to do evaluation based on the prediction result got from _TransformFunc
_EvaluateFunc = Callable[
[
TransformInputType, # input dataset with label column
TransformInputType, # inferred dataset with prediction column
],
pd.DataFrame,
]
# Global constant for defining column alias
Alias = namedtuple("Alias", ("data", "label", "row_number"))
alias = Alias("cuml_values", "cuml_label", "unique_id")
# Global prediction names
Pred = namedtuple("Pred", ("prediction", "probability", "model_index"))
pred = Pred("prediction", "probability", "model_index")
# Global parameter alias used by core and subclasses.
ParamAlias = namedtuple(
"ParamAlias",
("cuml_init", "handle", "num_cols", "part_sizes", "loop", "fit_multiple_params"),
)
param_alias = ParamAlias(
"cuml_init", "handle", "num_cols", "part_sizes", "loop", "fit_multiple_params"
)
CumlModel = TypeVar("CumlModel", bound="_CumlModel")
# Global parameter used by core and subclasses.
TransformEvaluate = namedtuple("TransformEvaluate", ("transform", "transform_evaluate"))
transform_evaluate = TransformEvaluate("transform", "transform_evaluate")
class _CumlEstimatorWriter(MLWriter):
"""
Write the parameters of _CumlEstimator to the file
"""
def __init__(self, instance: "_CumlEstimator") -> None:
super().__init__()
self.instance = instance
def saveImpl(self, path: str) -> None:
DefaultParamsWriter.saveMetadata(
self.instance,
path,
self.sc,
extraMetadata={
"_cuml_params": self.instance._cuml_params,
"_num_workers": self.instance._num_workers,
"_float32_inputs": self.instance._float32_inputs,
},
) # type: ignore
class _CumlEstimatorReader(MLReader):
"""
Instantiate the _CumlEstimator from the file.
"""
def __init__(self, cls: Type) -> None:
super().__init__()
self.estimator_cls = cls
def load(self, path: str) -> "_CumlEstimator":
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
cuml_estimator = self.estimator_cls()
cuml_estimator._resetUid(metadata["uid"])
DefaultParamsReader.getAndSetParams(cuml_estimator, metadata)
cuml_estimator._cuml_params = metadata["_cuml_params"]
cuml_estimator._num_workers = metadata["_num_workers"]
cuml_estimator._float32_inputs = metadata["_float32_inputs"]
return cuml_estimator
class _CumlModelWriter(MLWriter):
"""
Write the parameters of _CumlModel to the file
"""
def __init__(self, instance: "_CumlModel") -> None:
super().__init__()
self.instance: "_CumlModel" = instance
def saveImpl(self, path: str) -> None:
DefaultParamsWriter.saveMetadata(
self.instance,
path,
self.sc,
extraMetadata={
"_cuml_params": self.instance._cuml_params,
"_num_workers": self.instance._num_workers,
"_float32_inputs": self.instance._float32_inputs,
},
)
data_path = os.path.join(path, "data")
model_attributes = self.instance.get_model_attributes()
model_attributes_str = json.dumps(model_attributes)
self.sc.parallelize([model_attributes_str], 1).saveAsTextFile(data_path)
class _CumlModelReader(MLReader):
"""
Instantiate the _CumlModel from the file.
"""
def __init__(self, cls: Type) -> None:
super().__init__()
self.model_cls = cls
def load(self, path: str) -> "_CumlEstimator":
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
data_path = os.path.join(path, "data")
model_attr_str = self.sc.textFile(data_path).collect()[0]
model_attr_dict = json.loads(model_attr_str)
instance = self.model_cls(**model_attr_dict)
DefaultParamsReader.getAndSetParams(instance, metadata)
instance._cuml_params = metadata["_cuml_params"]
instance._num_workers = metadata["_num_workers"]
instance._float32_inputs = metadata["_float32_inputs"]
return instance
class _CumlCommon(MLWritable, MLReadable):
def __init__(self) -> None:
super().__init__()
@staticmethod
def set_gpu_device(
context: Optional[TaskContext], is_local: bool, is_transform: bool = False
) -> None:
"""
Set gpu device according to the spark task resources.
If it is local mode, we use partition id as gpu id for training
and (partition id ) % gpus for transform.
"""
# Get the GPU ID from resources
assert context is not None
import cupy
if is_local:
partition_id = context.partitionId()
if is_transform:
# For transform local mode, default the gpu_id to (partition id ) % gpus.
total_gpus = cupy.cuda.runtime.getDeviceCount()
gpu_id = partition_id % total_gpus
else:
gpu_id = partition_id
else:
gpu_id = _get_gpu_id(context)
cupy.cuda.Device(gpu_id).use()
@staticmethod
def initialize_cuml_logging(verbose: Optional[Union[bool, int]]) -> None:
"""Initializes the logger for cuML.
Parameters
----------
verbose : Optional[Union[bool, int]]
If True, sets the log_level to 5. If integer value, sets the log_level to the value.
"""
if verbose is not None:
from cuml.common import logger as cuml_logger
# below is from https://docs.rapids.ai/api/cuml/stable/api.html#verbosity-levels
if isinstance(verbose, bool):
if verbose:
log_level = 5
else:
log_level = 4
elif isinstance(verbose, int):
log_level = verbose
else:
raise ValueError(f"invalid value for verbose parameter: {verbose}")
cuml_logger.set_level(log_level)
class _CumlCaller(_CumlParams, _CumlCommon):
"""
This class is responsible for calling cuml function (e.g. fit or kneighbor) on pyspark dataframe,
to run a multi-node multi-gpu algorithm on the dataframe. A function usually comes from a multi-gpu cuml class,
such as cuml.decomposition.pca_mg.PCAMG or cuml.neighbors.nearest_neighbors_mg.NearestNeighborsMG.
This class converts dataframe into cuml input type, and leverages NCCL or UCX for communicator. To use this class,
developers can override the key methods including _out_schema(...) and _get_cuml_fit_func(...). Examples can be found in
spark_rapids_ml.clustering.KMeans and spark_rapids_ml.regression.LinearRegression.
"""
def __init__(self) -> None:
super().__init__()
self.initialize_cuml_params()
@abstractmethod
def _out_schema(self) -> Union[StructType, str]:
"""
The output schema of the estimator, which will be used to
construct the returning pandas dataframe
"""
raise NotImplementedError()
def _repartition_dataset(self, dataset: DataFrame) -> DataFrame:
"""
Repartition the dataset to the desired number of workers.
"""
return dataset.repartition(self.num_workers)
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[
List[Column],
Optional[List[str]],
int,
Union[Type[FloatType], Type[DoubleType]],
]:
select_cols = []
# label column will be cast to feature type if needed.
feature_type: Union[Type[FloatType], Type[DoubleType]] = FloatType
input_col, input_cols = self._get_input_columns()
if input_col is not None:
# Single Column
input_datatype = dataset.schema[input_col].dataType
if isinstance(input_datatype, ArrayType):
# Array type
if (
isinstance(input_datatype.elementType, DoubleType)
and not self._float32_inputs
):
select_cols.append(col(input_col).alias(alias.data))
feature_type = DoubleType
elif (
isinstance(input_datatype.elementType, DoubleType)
and self._float32_inputs
):
select_cols.append(
col(input_col).cast(ArrayType(feature_type())).alias(alias.data)
)
else:
# FloatType array
select_cols.append(col(input_col).alias(alias.data))
elif isinstance(input_datatype, VectorUDT):
# Vector type
vector_element_type = "float32" if self._float32_inputs else "float64"
select_cols.append(
vector_to_array(col(input_col), vector_element_type).alias(alias.data) # type: ignore
)
if not self._float32_inputs:
feature_type = DoubleType
else:
raise ValueError("Unsupported input type.")
dimension = len(dataset.first()[input_col]) # type: ignore
elif input_cols is not None:
# if self._float32_inputs is False and if any columns are double type, convert all to double type
# otherwise convert all to float type
any_double_types = any(
[isinstance(dataset.schema[c].dataType, DoubleType) for c in input_cols]
)
if not self._float32_inputs and any_double_types:
feature_type = DoubleType
dimension = len(input_cols)
for c in input_cols:
col_type = dataset.schema[c].dataType
if (
isinstance(col_type, IntegralType)
or isinstance(col_type, FloatType)
or isinstance(col_type, DoubleType)
):
if not isinstance(col_type, feature_type):
select_cols.append(col(c).cast(feature_type()).alias(c))
else:
select_cols.append(col(c))
else:
raise ValueError(
"All columns must be integral types or float/double types."
)
else:
# should never get here
raise Exception("Unable to determine input column(s).")
return select_cols, input_cols, dimension, feature_type
def _require_nccl_ucx(self) -> Tuple[bool, bool]:
"""
If enable or disable communication layer (NCCL or UCX).
Return (False, False) if no communication layer is required.
Return (True, False) if only NCCL is required.
Return (True, True) if UCX is required. Cuml UCX backend currently also requires NCCL.
"""
return (True, False)
@abstractmethod
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
"""
Subclass must implement this function to return a cuml fit function that will be
sent to executor to run.
Eg,
def _get_cuml_fit_func(self, dataset: DataFrame, extra_params: Optional[List[Dict[str, Any]]] = None):
...
def _cuml_fit(df: CumlInputType, params: Dict[str, Any]) -> Dict[str, Any]:
"" "
df: a sequence of (X, Y)
params: a series of parameters stored in dictionary,
especially, the parameters of __init__ is stored in params[param_alias.init]
"" "
...
...
return _cuml_fit
_get_cuml_fit_func itself runs on the driver side, while the returned _cuml_fit will
run on the executor side.
"""
raise NotImplementedError()
def _call_cuml_fit_func(
self,
dataset: DataFrame,
partially_collect: bool = True,
paramMaps: Optional[Sequence["ParamMap"]] = None,
) -> RDD:
"""
Fits a model to the input dataset. This is called by the default implementation of fit.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
Returns
-------
:class:`Transformer`
fitted model
"""
cls = self.__class__
select_cols, multi_col_names, dimension, _ = self._pre_process_data(dataset)
num_workers = self.num_workers
dataset = dataset.select(*select_cols)
if dataset.rdd.getNumPartitions() != num_workers:
dataset = self._repartition_dataset(dataset)
is_local = _is_local(_get_spark_session().sparkContext)
cuda_managed_mem_enabled = (
_get_spark_session().conf.get("spark.rapids.ml.uvm.enabled", "false")
== "true"
)
if cuda_managed_mem_enabled:
get_logger(cls).info("CUDA managed memory enabled.")
# parameters passed to subclass
params: Dict[str, Any] = {
param_alias.cuml_init: self.cuml_params,
}
# Convert the paramMaps into cuml paramMaps
fit_multiple_params = []
if paramMaps is not None:
for paramMap in paramMaps:
tmp_fit_multiple_params = {}
for k, v in paramMap.items():
name = self._get_cuml_param(k.name, False)
assert name is not None
tmp_fit_multiple_params[name] = self._get_cuml_mapping_value(
name, v
)
fit_multiple_params.append(tmp_fit_multiple_params)
params[param_alias.fit_multiple_params] = fit_multiple_params
cuml_fit_func = self._get_cuml_fit_func(
dataset, None if len(fit_multiple_params) == 0 else fit_multiple_params
)
array_order = self._fit_array_order()
cuml_verbose = self.cuml_params.get("verbose", False)
(enable_nccl, require_ucx) = self._require_nccl_ucx()
def _train_udf(pdf_iter: Iterator[pd.DataFrame]) -> pd.DataFrame:
from pyspark import BarrierTaskContext
logger = get_logger(cls)
logger.info("Initializing cuml context")
import cupy as cp
if cuda_managed_mem_enabled:
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(managed_memory=True)
cp.cuda.set_allocator(rmm_cupy_allocator)
_CumlCommon.initialize_cuml_logging(cuml_verbose)
context = BarrierTaskContext.get()
partition_id = context.partitionId()
# set gpu device
_CumlCommon.set_gpu_device(context, is_local)
with CumlContext(
partition_id, num_workers, context, enable_nccl, require_ucx
) as cc:
# handle the input
# inputs = [(X, Optional(y)), (X, Optional(y))]
logger.info("Loading data into python worker memory")
inputs = []
sizes = []
for pdf in pdf_iter:
sizes.append(pdf.shape[0])
if multi_col_names:
features = np.array(pdf[multi_col_names], order=array_order)
else:
features = np.array(list(pdf[alias.data]), order=array_order)
# experiments indicate it is faster to convert to numpy array and then to cupy array than directly
# invoking cupy array on the list
if cuda_managed_mem_enabled:
features = cp.array(features)
label = pdf[alias.label] if alias.label in pdf.columns else None
row_number = (
pdf[alias.row_number]
if alias.row_number in pdf.columns
else None
)
inputs.append((features, label, row_number))
if len(sizes) == 0 or all(sz == 0 for sz in sizes):
raise RuntimeError(
"A python worker received no data. Please increase amount of data or use fewer workers."
)
params[param_alias.handle] = cc.handle
params[param_alias.part_sizes] = sizes
params[param_alias.num_cols] = dimension
params[param_alias.loop] = cc._loop
logger.info("Invoking cuml fit")
# call the cuml fit function
# *note*: cuml_fit_func may delete components of inputs to free
# memory. do not rely on inputs after this call.
result = cuml_fit_func(inputs, params)
logger.info("Cuml fit complete")
if partially_collect == True:
if enable_nccl:
context.barrier()
if context.partitionId() == 0:
yield pd.DataFrame(data=result)
else:
yield pd.DataFrame(data=result)
pipelined_rdd = (
dataset.mapInPandas(_train_udf, schema=self._out_schema()) # type: ignore
.rdd.barrier()
.mapPartitions(lambda x: x)
)
return pipelined_rdd
def _fit_array_order(self) -> _ArrayOrder:
"""
preferred array order for converting single column array type to numpy arrays: "C" or "F"
"""
return "F"
class _FitMultipleIterator(Generic[CumlModel]):
"""
Used by default implementation of Estimator.fitMultiple to produce models in a thread safe
iterator. This class handles the gpu version of fitMultiple where all param maps should be
fit in a single pass.
Parameters
----------
fitMultipleModels : function
Callable[[], CumlModel] which fits multiple models to a dataset in a single pass.
numModels : int
Number of models this iterator should produce.
Notes
-----
See :py:meth:`Estimator.fitMultiple` for more info.
"""
def __init__(
self, fitMultipleModels: Callable[[], List[CumlModel]], numModels: int
):
self.fitMultipleModels = fitMultipleModels
self.numModels = numModels
self.counter = 0
self.lock = threading.Lock()
self.models: List[CumlModel] = []
def __iter__(self) -> Iterator[Tuple[int, CumlModel]]:
return self
def __next__(self) -> Tuple[int, CumlModel]:
with self.lock:
index = self.counter
if index >= self.numModels:
raise StopIteration("No models remaining.")
if index == 0:
self.models = self.fitMultipleModels()
assert len(self.models) == self.numModels
self.counter += 1
return index, self.models[index]
def next(self) -> Tuple[int, CumlModel]:
return self.__next__()
class _CumlEstimator(Estimator, _CumlCaller):
"""
The common estimator to handle the fit callback (_fit). It should:
1. set the default parameters
2. validate the parameters
3. prepare the dataset
4. train and return CUML model
5. create the pyspark model
"""
# used by keywords_only
_input_kwargs: Dict[str, Any]
def __init__(self) -> None:
super().__init__()
self.logger = get_logger(self.__class__)
@abstractmethod
def _create_pyspark_model(self, result: Row) -> "_CumlModel":
"""
Create the model according to the collected Row
"""
raise NotImplementedError()
def _enable_fit_multiple_in_single_pass(self) -> bool:
"""flag to indicate if fitMultiple in a single pass is supported.
If not, fallback to super().fitMultiple"""
return False
def fitMultiple(
self, dataset: DataFrame, paramMaps: Sequence["ParamMap"]
) -> Iterator[Tuple[int, "_CumlModel"]]:
"""
Fits multiple models to the input dataset for all param maps in a single pass.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset.
paramMaps : :py:class:`collections.abc.Sequence`
A Sequence of param maps.
Returns
-------
:py:class:`_FitMultipleIterator`
A thread safe iterable which contains one model for each param map. Each
call to `next(modelIterator)` will return `(index, model)` where model was fit
using `paramMaps[index]`. `index` values may not be sequential.
"""
if self._enable_fit_multiple_in_single_pass():
estimator = self.copy()
def fitMultipleModels() -> List["_CumlModel"]:
return estimator._fit_internal(dataset, paramMaps)
return _FitMultipleIterator(fitMultipleModels, len(paramMaps))
else:
return super().fitMultiple(dataset, paramMaps)
def _try_stage_level_scheduling(self, rdd: RDD) -> RDD:
ss = _get_spark_session()
sc = ss.sparkContext
if ss.version < "3.4.0":
self.logger.warning(
"Stage level scheduling in spark-rapids-ml requires spark version 3.4.0+"
)
return rdd
elif not _is_standalone_or_localcluster(sc):
# Only standalone or local-cluster supports stage-level scheduling with dynamic
# allocation disabled.
self.logger.warning(
"Stage level scheduling in spark-rapids-ml only works on spark standalone or "
"local cluster mode"
)
return rdd
executor_cores = sc.getConf().get("spark.executor.cores")
executor_gpu_amount = sc.getConf().get("spark.executor.resource.gpu.amount")
if executor_cores is None or executor_gpu_amount is None:
self.logger.warning(
"Stage level scheduling in spark-rapids-ml requires spark.executor.cores, "
"spark.executor.resource.gpu.amount to be set "
)
return rdd
if int(executor_gpu_amount) > 1:
# For spark.executor.resource.gpu.amount>1, we suppose user knows how to configure
# to make spark-rapids-ml run successfully.
#
self.logger.warning(
"Stage level scheduling in spark-rapids-ml will not work "
"when spark.executor.resource.gpu.amount>1"
)
return rdd
task_gpu_amount = sc.getConf().get("spark.task.resource.gpu.amount")
if task_gpu_amount is None:
# if spark.task.resource.gpu.amount is not set, the default concurrent tasks
# with gpu requirement will be 1, which means 2 training tasks will never
# be scheduled into the same executor.
return rdd
if float(task_gpu_amount) == float(executor_gpu_amount):
self.logger.warning(
f"The configuration of cores (exec = {executor_gpu_amount} task = {task_gpu_amount}, "
f"runnable tasks = 1) will result in wasted resources due to resource gpu limiting"
f"the number of runnable tasks per executor to: 1. Please adjust your configuration."
)
return rdd
from pyspark.resource.profile import ResourceProfileBuilder
from pyspark.resource.requests import TaskResourceRequests
# each training task requires cpu cores > total executor cores/2 which can
# ensure each training task be sent to different executor.
#
# Please note that we can't set task_cores to the value which is smaller than total executor cores/2
# because only task_gpus can't ensure the tasks be sent to different executor even task_gpus=1.0
#
# If spark-rapids enabled. we don't allow other ETL task running alongside training task to avoid OOM
spark_plugins = ss.conf.get("spark.plugins", " ")
assert spark_plugins is not None
spark_rapids_sql_enabled = ss.conf.get("spark.rapids.sql.enabled", "true")
task_cores = (
int(executor_cores)
if "com.nvidia.spark.SQLPlugin" in spark_plugins
and "true" == spark_rapids_sql_enabled
else (int(executor_cores) // 2) + 1
)
# task_gpus means how many slots per gpu address the task requires,
# it does mean how many gpus it would like to require, so it can be any value of (0, 0.5] or 1.
task_gpus = 1.0
treqs = TaskResourceRequests().cpus(task_cores).resource("gpu", task_gpus)
rp = ResourceProfileBuilder().require(treqs).build
self.logger.info(
f"Training tasks require the resource(cores={task_cores}, gpu={task_gpus})"
)
return rdd.withResources(rp)
def _fit_internal(
self, dataset: DataFrame, paramMaps: Optional[Sequence["ParamMap"]]
) -> List["_CumlModel"]:
"""Fit multiple models according to the parameters maps"""
pipelined_rdd = self._call_cuml_fit_func(
dataset=dataset,
partially_collect=True,
paramMaps=paramMaps,
)
pipelined_rdd = self._try_stage_level_scheduling(pipelined_rdd)
self.logger.info(
f"Training spark-rapids-ml with {self.num_workers} worker(s) ..."
)
rows = pipelined_rdd.collect()
self.logger.info("Finished training")
models: List["_CumlModel"] = [None] # type: ignore
if paramMaps is not None:
models = [None] * len(paramMaps) # type: ignore
for index in range(len(models)):
model = self._create_pyspark_model(rows[index])
model._num_workers = self._num_workers
model._float32_inputs = self._float32_inputs
if paramMaps is not None:
self._copyValues(model, paramMaps[index])
else:
self._copyValues(model)
self._copy_cuml_params(model) # type: ignore
models[index] = model # type: ignore
return models
def _fit(self, dataset: DataFrame) -> "_CumlModel":
"""fit only 1 model"""
return self._fit_internal(dataset, None)[0]
def write(self) -> MLWriter:
return _CumlEstimatorWriter(self)
@classmethod
def read(cls) -> MLReader:
return _CumlEstimatorReader(cls)
def _supportsTransformEvaluate(self, evaluator: Evaluator) -> bool:
"""If supporting _transformEvaluate in a single pass based on the evaluator
Please note that this function should only be used in CrossValidator for quick
fallback if unsupported."""
return False
class _CumlEstimatorSupervised(_CumlEstimator, HasLabelCol):
"""
Base class for Cuml Supervised machine learning.
"""
def _pre_process_label(
self, dataset: DataFrame, feature_type: Union[Type[FloatType], Type[DoubleType]]
) -> Column:
"""Convert label according to feature type by default"""
label_name = self.getLabelCol()
label_datatype = dataset.schema[label_name].dataType
if isinstance(label_datatype, (IntegralType, FloatType, DoubleType)):
if isinstance(label_datatype, IntegralType) or not isinstance(
label_datatype, feature_type
):
label_col = col(label_name).cast(feature_type()).alias(alias.label)
else:
label_col = col(label_name).alias(alias.label)
else:
raise ValueError(
"Label column must be integral types or float/double types."
)
return label_col
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[
List[Column], Optional[List[str]], int, Union[Type[FloatType], Type[DoubleType]]
]:
(
select_cols,
multi_col_names,
dimension,
feature_type,
) = super()._pre_process_data(dataset)
select_cols.append(self._pre_process_label(dataset, feature_type))
return select_cols, multi_col_names, dimension, feature_type
class _CumlModel(Model, _CumlParams, _CumlCommon):
"""
Abstract class for spark-rapids-ml models that are fitted by spark-rapids-ml estimators.
"""
def __init__(
self,
*,
dtype: Optional[str] = None,
n_cols: Optional[int] = None,
**model_attributes: Any,
) -> None:
"""
Subclass must pass the model attributes which will be saved in model persistence.
"""
super().__init__()
self.initialize_cuml_params()
# model_data is the native data which will be saved for model persistence
self._model_attributes = model_attributes
self._model_attributes["dtype"] = dtype
self._model_attributes["n_cols"] = n_cols
self.dtype = dtype
self.n_cols = n_cols
def cpu(self) -> Model:
"""Return the equivalent PySpark CPU model"""
raise NotImplementedError()
def get_model_attributes(self) -> Optional[Dict[str, Any]]:
"""Return model attributes as a dictionary."""
return self._model_attributes
@classmethod
def from_row(cls, model_attributes: Row): # type: ignore
"""
Default to pass all the attributes of the model to the model constructor,
So please make sure if the constructor can accept all of them.
"""
attr_dict = model_attributes.asDict()
return cls(**attr_dict)
@abstractmethod
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
"""
Subclass must implement this function to return three functions,
1. a function to construct cuml counterpart instance
2. a function to transform the dataset
3. an optional function to evaluate.
Eg,
def _get_cuml_transform_func(self, dataset: DataFrame):
...
def _construct_cuml_object() -> CumlT
...
def _cuml_transform(cuml_obj: CumlT, df: Union[pd.DataFrame, np.ndarray]) ->pd.DataFrame:
...
def _evaluate(input_df: Union[pd.DataFrame, np.ndarray], transformed_df: Union[pd.DataFrame, np.ndarray]) -> pd.DataFrame:
...
...
# please note that if category is transform, the evaluate function will be ignored.
return _construct_cuml_object, _cuml_transform, _evaluate
_get_cuml_transform_func itself runs on the driver side, while the returned
_construct_cuml_object and _cuml_transform, _evaluate will run on the executor side.
"""
raise NotImplementedError()
def _transform_array_order(self) -> _ArrayOrder:
"""
preferred array order for converting single column array type to numpy arrays: "C" or "F"
"""
return "F"
@abstractmethod
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
"""
The output schema of the model, which will be used to
construct the returning pandas dataframe
"""
raise NotImplementedError()
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[DataFrame, List[str], bool, List[str]]:
"""Pre-handle the dataset before transform.
Please note that, this function just transforms the input column if necessary, and
it will keep the unused columns.
return (dataset, list of feature names, bool value to indicate if it is multi-columns input, list of temporary columns to be dropped)
"""
select_cols = []
tmp_cols = []
input_is_multi_cols = True
input_col, input_cols = self._get_input_columns()
if input_col is not None:
input_col_type = dataset.schema[input_col].dataType
tmp_input_col = f"{alias.data}_c3BhcmstcmFwaWRzLW1sCg=="
if isinstance(input_col_type, VectorUDT):
# Vector type
# Avoid same naming. `echo spark-rapids-ml | base64` = c3BhcmstcmFwaWRzLW1sCg==
vector_element_type = "float32" if self._float32_inputs else "float64"
dataset = dataset.withColumn(
tmp_input_col, vector_to_array(col(input_col), vector_element_type)
)
select_cols.append(tmp_input_col)
tmp_cols.append(tmp_input_col)
elif isinstance(input_col_type, ArrayType):
if (
isinstance(input_col_type.elementType, DoubleType)
and not self._float32_inputs
):
select_cols.append(input_col)
elif (
isinstance(input_col_type.elementType, DoubleType)
and self._float32_inputs
):
dataset = dataset.withColumn(
tmp_input_col, col(input_col).cast(ArrayType(FloatType()))
)
select_cols.append(tmp_input_col)
tmp_cols.append(tmp_input_col)
else:
# FloatType array
select_cols.append(input_col)
elif not isinstance(input_col_type, ArrayType):
# not Array type
raise ValueError("Unsupported input type.")
input_is_multi_cols = False
elif input_cols is not None:
any_double_types = any(
[isinstance(dataset.schema[c].dataType, DoubleType) for c in input_cols]
)
feature_type: Union[Type[FloatType], Type[DoubleType]] = FloatType
if not self._float32_inputs and any_double_types:
feature_type = DoubleType
for c in input_cols:
col_type = dataset.schema[c].dataType
if (
isinstance(col_type, IntegralType)
or isinstance(col_type, FloatType)
or isinstance(col_type, DoubleType)
):
if not isinstance(col_type, feature_type):
tmp_input_col = f"{c}_c3BhcmstcmFwaWRzLW1sCg=="
dataset = dataset.withColumn(
tmp_input_col, col(c).cast(feature_type())
)
select_cols.append(tmp_input_col)
tmp_cols.append(tmp_input_col)
else:
select_cols.append(c)
else:
raise ValueError(
"All columns must be integral types or float/double types."
)
else:
# should never get here
raise Exception("Unable to determine input column(s).")
return dataset, select_cols, input_is_multi_cols, tmp_cols
def _transform_evaluate_internal(
self, dataset: DataFrame, schema: Union[StructType, str]
) -> DataFrame:
"""Internal API to support transform and evaluation in a single pass"""
dataset, select_cols, input_is_multi_cols, _ = self._pre_process_data(dataset)
is_local = _is_local(_get_spark_session().sparkContext)
# Get the functions which will be passed into executor to run.
(
construct_cuml_object_func,
cuml_transform_func,
evaluate_func,
) = self._get_cuml_transform_func(
dataset, transform_evaluate.transform_evaluate
)
if evaluate_func:
dataset = dataset.select(alias.label, *select_cols)
else:
dataset = dataset.select(*select_cols)
array_order = self._transform_array_order()
def _transform_udf(pdf_iter: Iterator[pd.DataFrame]) -> pd.DataFrame:
from pyspark import TaskContext
context = TaskContext.get()
_CumlCommon.set_gpu_device(context, is_local, True)
# Construct the cuml counterpart object
cuml_instance = construct_cuml_object_func()
cuml_objects = (
cuml_instance if isinstance(cuml_instance, list) else [cuml_instance]
)
# TODO try to concatenate all the data and do the transform.
for pdf in pdf_iter:
for index, cuml_object in enumerate(cuml_objects):
# Transform the dataset
if input_is_multi_cols:
data = cuml_transform_func(cuml_object, pdf[select_cols])
else:
nparray = np.array(list(pdf[select_cols[0]]), order=array_order)
data = cuml_transform_func(cuml_object, nparray)
# Evaluate the dataset if necessary.
if evaluate_func is not None:
data = evaluate_func(pdf, data)
data[pred.model_index] = index
yield data
return dataset.mapInPandas(_transform_udf, schema=schema) # type: ignore
def _transform(self, dataset: DataFrame) -> DataFrame:
"""
Transforms the input dataset.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
transformed dataset
"""
return self._transform_evaluate_internal(
dataset, schema=self._out_schema(dataset.schema)
)
def write(self) -> MLWriter:
return _CumlModelWriter(self)
@classmethod
def read(cls) -> MLReader:
return _CumlModelReader(cls)
def _transformEvaluate(
self,
dataset: DataFrame,
evaluator: Evaluator,
params: Optional["ParamMap"] = None,
) -> List[float]:
"""
Transforms and evaluates the input dataset with optional parameters in a single pass.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a dataset that contains labels/observations and predictions
evaluator: :py:class:`pyspark.ml.evaluation.Evaluator`
an evaluator user intends to use
params : dict, optional
an optional param map that overrides embedded params
Returns
-------
list of float
metrics
"""
raise NotImplementedError()
@classmethod
def _combine(cls: Type["_CumlModel"], models: List["_CumlModel"]) -> "_CumlModel":
"""Combine a list of same type models into a model"""
raise NotImplementedError()
class _CumlModelWithColumns(_CumlModel):
"""Cuml base model for generating extra predicted columns"""
def _is_single_pred(self, input_schema: StructType) -> bool:
"""Indicate if the transform is only predicting 1 column"""
schema = self._out_schema(input_schema)
if isinstance(schema, str):
return False if "," in schema else True
elif isinstance(schema, StructType):
return False if len(schema.names) > 1 else True
def _has_probability_col(self) -> bool:
"""This API is needed and can be overwritten by subclass which
hasn't implemented predict probability yet"""
return True if isinstance(self, HasProbabilityCol) else False
def _get_prediction_name(self) -> str:
"""Different algos have different prediction names,
eg, PCA: value of outputCol param, RF/LR/Kmeans: value of predictionCol name"""
if isinstance(self, HasPredictionCol):
return self.getPredictionCol()
elif isinstance(self, HasOutputCol):
return self.getOutputCol()
else:
raise ValueError("Please set predictionCol or outputCol")
def _transform(self, dataset: DataFrame) -> DataFrame:
"""This version of transform is directly adding extra columns to the dataset"""
dataset, select_cols, input_is_multi_cols, tmp_cols = self._pre_process_data(
dataset
)
is_local = _is_local(_get_spark_session().sparkContext)
# Get the functions which will be passed into executor to run.
(
construct_cuml_object_func,
cuml_transform_func,
_,
) = self._get_cuml_transform_func(dataset)
array_order = self._transform_array_order()
@pandas_udf(self._out_schema(dataset.schema)) # type: ignore
def predict_udf(iterator: Iterator[pd.DataFrame]) -> Iterator[pd.Series]:
from pyspark import TaskContext
context = TaskContext.get()
_CumlCommon.set_gpu_device(context, is_local, True)
cuml_objects = construct_cuml_object_func()
cuml_object = (
cuml_objects[0] if isinstance(cuml_objects, list) else cuml_objects
)
for pdf in iterator:
if not input_is_multi_cols:
data = np.array(list(pdf[select_cols[0]]), order=array_order)
else:
data = pdf[select_cols]
# for normal transform, we don't allow multiple models.
res = cuml_transform_func(cuml_object, data)
del data
yield res
pred_name = self._get_prediction_name()
pred_col = predict_udf(struct(*select_cols))
if self._is_single_pred(dataset.schema):
return dataset.withColumn(pred_name, pred_col)
else:
# Avoid same naming. `echo sparkcuml | base64` = c3BhcmtjdW1sCg==
pred_struct_col_name = "_prediction_struct_c3BhcmtjdW1sCg=="
dataset = dataset.withColumn(pred_struct_col_name, pred_col)
# 1. Add prediction in the base class
dataset = dataset.withColumn(
pred_name, getattr(col(pred_struct_col_name), pred.prediction)
)
# 2. Handle probability columns
if self._has_probability_col():
probability_col = self.getOrDefault("probabilityCol")
dataset = dataset.withColumn(
probability_col,
array_to_vector(
getattr(col(pred_struct_col_name), pred.probability)
),
)
# 3. Drop the unused column
dataset = dataset.drop(pred_struct_col_name)
return dataset.drop(*tmp_cols)
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
assert self.dtype is not None
schema = f"{pred.prediction} double"
if self._has_probability_col():
schema = f"{schema}, {pred.probability} array<double>"
else:
schema = f"double"
return schema
class _CumlModelWithPredictionCol(_CumlModelWithColumns, HasPredictionCol):
"""Cuml base model with prediction col"""
@property # type: ignore[misc]
def numFeatures(self) -> int:
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
num_features = self.n_cols if self.n_cols else -1
return num_features
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/core.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pyspark.ml.common import _py2java
from pyspark.ml.feature import PCAModel as SparkPCAModel
from pyspark.ml.feature import _PCAParams
from pyspark.ml.linalg import DenseMatrix, DenseVector
from pyspark.ml.param.shared import HasInputCols
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import (
ArrayType,
DoubleType,
IntegerType,
Row,
StringType,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
_ConstructFunc,
_CumlEstimator,
_CumlModelWithColumns,
_EvaluateFunc,
_TransformFunc,
param_alias,
transform_evaluate,
)
from .params import P, _CumlClass, _CumlParams
from .utils import (
PartitionDescriptor,
_get_spark_session,
dtype_to_pyspark_type,
java_uid,
)
class PCAClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {"k": "n_components"}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"n_components": None,
"svd_solver": "auto",
"verbose": False,
"whiten": False,
}
class _PCACumlParams(_CumlParams, _PCAParams, HasInputCols):
"""
Shared Spark Params for PCA and PCAModel.
"""
def setInputCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`inputCol` or :py:attr:`inputCols`. Used when input vectors are stored in a single column.
"""
if isinstance(value, str):
self.set_params(inputCol=value)
else:
self.set_params(inputCols=value)
return self
def setInputCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`inputCols`. Used when input vectors are stored as multiple feature columns.
"""
return self.set_params(inputCols=value)
def setOutputCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`outputCol`
"""
return self.set_params(outputCol=value)
class PCA(PCAClass, _CumlEstimator, _PCACumlParams):
"""
PCA algorithm learns principal component vectors to project high-dimensional vectors
into low-dimensional vectors, while preserving the similarity of the vectors. PCA
has been used in dimensionality reduction, clustering, and data visualization on large
datasets. This class provides GPU acceleration for pyspark distributed PCA.
Parameters
----------
k: int
the number of components, or equivalently the dimension that all vectors will be projected to.
inputCol: str
the name of the column that contains input vectors. inputCol should be set when input vectors
are stored in a single column of a dataframe.
inputCols: List[str]
the names of feature columns that form input vectors. inputCols should be set when input vectors
are stored as multiple feature columns of a dataframe.
outputCol: str
the name of the column that stores output vectors. outputCol should be set when users expect to
store output vectors in a single column.
Examples
--------
>>> from spark_rapids_ml.feature import PCA
>>> data = [([1.0, 1.0],),
... ([2.0, 2.0],),
... ([3.0, 3.0],),]
>>> df = spark.createDataFrame(data, ["features"])
>>> gpu_pca = PCA(k=1, inputCol="features")
>>> gpu_pca.setOutputCol("pca_features")
PCA...
>>> gpu_model = gpu_pca.fit(df)
>>> gpu_model.getK()
1
>>> print(gpu_model.mean)
[2.0, 2.0]
>>> print(gpu_model.pc)
DenseMatrix([[0.70710678],
[0.70710678]])
>>> print(gpu_model.explained_variance)
[1.0]
>>> gpu_pca.save("/tmp/pca")
>>> # vector column input
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([2.0, 2.0]),),
... (Vectors.dense([3.0, 3.0]),),]
>>> df = spark.createDataFrame(data, ["features"])
>>> gpu_pca = PCA(k=1).setInputCol("features")
>>> gpu_pca.getInputCol()
'features'
>>> gpu_model = gpu_pca.fit(df)
>>> # multi-column input
>>> data = [(1.0, 1.0),
... (2.0, 2.0),
... (3.0, 3.0),]
>>> df = spark.createDataFrame(data, ["f1", "f2"])
>>> gpu_pca = PCA(k=1).setInputCols(["f1", "f2"])
>>> gpu_pca.getInputCols()
['f1', 'f2']
>>> gpu_model = gpu_pca.fit(df)
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__()
self.set_params(**kwargs)
def setK(self, value: int) -> "PCA":
"""
Sets the value of :py:attr:`k`.
"""
return self.set_params(k=value)
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
def _cuml_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
from cuml.decomposition.pca_mg import PCAMG as CumlPCAMG
pca_object = CumlPCAMG(
handle=params[param_alias.handle],
output_type="cudf",
**params[param_alias.cuml_init],
)
pdesc = PartitionDescriptor.build(
params[param_alias.part_sizes], params[param_alias.num_cols]
)
data_arrays = [x for x, _, _ in dfs]
# reverse list order to compensate for cuda managed memory LRU eviction
stride = -1
pca_object.fit(
data_arrays[::stride],
pdesc.m,
pdesc.n,
pdesc.parts_rank_size[::stride],
pdesc.rank,
_transform=False,
)
cpu_mean = pca_object.mean_.to_arrow().to_pylist()
cpu_pc = pca_object.components_.to_numpy().tolist()
cpu_explained_variance = (
pca_object.explained_variance_ratio_.to_numpy().tolist()
)
cpu_singular_values = pca_object.singular_values_.to_numpy().tolist()
return {
"mean_": [cpu_mean],
"components_": [cpu_pc],
"explained_variance_ratio_": [cpu_explained_variance],
"singular_values_": [cpu_singular_values],
"n_cols": params[param_alias.num_cols],
"dtype": pca_object.dtype.name,
}
return _cuml_fit
def _out_schema(self) -> Union[StructType, str]:
return StructType(
[
StructField("mean_", ArrayType(DoubleType(), False), False),
StructField(
"components_", ArrayType(ArrayType(DoubleType()), False), False
),
StructField(
"explained_variance_ratio_", ArrayType(DoubleType(), False), False
),
StructField("singular_values_", ArrayType(DoubleType(), False), False),
StructField("n_cols", IntegerType(), False),
StructField("dtype", StringType(), False),
]
)
def _create_pyspark_model(self, result: Row) -> "PCAModel":
return PCAModel.from_row(result)
class PCAModel(PCAClass, _CumlModelWithColumns, _PCACumlParams):
"""Applies dimensionality reduction on an input DataFrame.
Note: Input vectors must be zero-centered to ensure PCA work properly.
Spark PCA does not automatically remove the mean of the input data, so use the
:py:class::`~pyspark.ml.feature.StandardScaler` to center the input data before
invoking transform.
Examples
--------
>>> from spark_rapids_ml.feature import PCA
>>> data = [([-1.0, -1.0],),
... ([0.0, 0.0],),
... ([1.0, 1.0],),]
>>> df = spark.createDataFrame(data, ["features"])
>>> gpu_pca = PCA(k=1).setInputCol("features").setOutputCol("pca_features")
>>> gpu_model = gpu_pca.fit(df)
>>> reduced_df = gpu_model.transform(df)
>>> reduced_df.show()
+---------------------+
| pca_features|
+---------------------+
| [-1.414213562373095]|
| [0.0]|
| [1.414213562373095]|
+---------------------+
"""
def __init__(
self,
mean_: List[float],
components_: List[List[float]],
explained_variance_ratio_: List[float],
singular_values_: List[float],
n_cols: int,
dtype: str,
):
super().__init__(
n_cols=n_cols,
dtype=dtype,
mean_=mean_,
components_=components_,
explained_variance_ratio_=explained_variance_ratio_,
singular_values_=singular_values_,
)
self.mean_ = mean_
self.components_ = components_
self.explained_variance_ratio_ = explained_variance_ratio_
self.singular_values_ = singular_values_
self._pca_ml_model: Optional[SparkPCAModel] = None
self.set_params(n_components=len(components_))
@property
def mean(self) -> List[float]:
"""
Returns the mean of the input vectors.
"""
return self.mean_
@property
def pc(self) -> DenseMatrix:
"""
Returns a principal components Matrix.
Each column is one principal component.
"""
num_rows = len(self.components_)
num_cols = self.n_cols
values = list(itertools.chain.from_iterable(self.components_))
# DenseMatrix is column major, so flip rows/cols
return DenseMatrix(num_cols, num_rows, values, False) # type: ignore
@property
def explainedVariance(self) -> DenseVector:
"""
Returns a vector of proportions of variance
explained by each principal component.
"""
return DenseVector(self.explained_variance_ratio_)
def cpu(self) -> SparkPCAModel:
"""Return the PySpark ML PCAModel"""
if self._pca_ml_model is None:
sc = _get_spark_session().sparkContext
assert sc._jvm is not None
java_pc = _py2java(sc, self.pc)
java_explainedVariance = _py2java(sc, self.explainedVariance)
java_model = sc._jvm.org.apache.spark.ml.feature.PCAModel(
java_uid(sc, "pca"), java_pc, java_explainedVariance
)
self._pca_ml_model = SparkPCAModel(java_model)
self._copyValues(self._pca_ml_model)
return self._pca_ml_model
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
cuml_alg_params = self.cuml_params.copy()
n_cols = self.n_cols
dype = self.dtype
components = self.components_
mean = self.mean_
singular_values = self.singular_values_
def _construct_pca() -> CumlT:
"""
Returns the instance of PCAMG which will be passed to _transform_internal
to do the transform.
-------
"""
from cuml.decomposition.pca_mg import PCAMG as CumlPCAMG
pca = CumlPCAMG(output_type="numpy", **cuml_alg_params)
# Compatible with older cuml versions (before 23.02)
pca._n_components = pca.n_components
pca.n_components_ = pca.n_components
from spark_rapids_ml.utils import cudf_to_cuml_array
pca.n_cols = n_cols
pca.dtype = np.dtype(dype)
# TBD: figure out why PCA warns regardless of array order here and for singular values
pca.components_ = cudf_to_cuml_array(
np.array(components, order="F").astype(pca.dtype)
)
pca.mean_ = cudf_to_cuml_array(np.array(mean, order="F").astype(pca.dtype))
pca.singular_values_ = cudf_to_cuml_array(
np.array(singular_values, order="F").astype(pca.dtype)
)
return pca
transformed_mean = np.matmul(
np.array(self.mean_, self.dtype),
np.array(self.components_, self.dtype).T,
)
def _transform_internal(
pca_object: CumlT, df: Union[pd.DataFrame, np.ndarray]
) -> pd.DataFrame:
res = pca_object.transform(df)
# Spark does not remove the mean from the transformed data,
# but cuML does, so need to add the mean back to match Spark results
res += transformed_mean
return pd.Series(list(res))
return _construct_pca, _transform_internal, None
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
assert self.dtype is not None
pyspark_type = dtype_to_pyspark_type(self.dtype)
return f"array<{pyspark_type}>"
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/feature.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import numpy as np
import pandas as pd
import pyspark
from pandas import DataFrame as PandasDataFrame
from pyspark.ml.param.shared import (
HasFeaturesCol,
HasLabelCol,
HasOutputCol,
Param,
Params,
TypeConverters,
)
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, MLReader, MLWriter
from pyspark.sql import Column, DataFrame
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
Row,
StructField,
StructType,
)
from spark_rapids_ml.core import FitInputType, _CumlModel
from .core import (
CumlT,
FitInputType,
_ConstructFunc,
_CumlCommon,
_CumlEstimator,
_CumlEstimatorSupervised,
_CumlModel,
_CumlModelReader,
_CumlModelWriter,
_EvaluateFunc,
_TransformFunc,
alias,
param_alias,
transform_evaluate,
)
from .params import HasFeaturesCols, P, _CumlClass, _CumlParams
from .utils import (
_ArrayOrder,
_concat_and_free,
_get_spark_session,
_is_local,
get_logger,
)
if TYPE_CHECKING:
import cudf
from pyspark.ml._typing import ParamMap
class UMAPClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {
"n_neighbors": 15,
"n_components": 2,
"metric": "euclidean",
"n_epochs": None,
"learning_rate": 1.0,
"init": "spectral",
"min_dist": 0.1,
"spread": 1.0,
"set_op_mix_ratio": 1.0,
"local_connectivity": 1.0,
"repulsion_strength": 1.0,
"negative_sample_rate": 5,
"transform_queue_size": 4.0,
"a": None,
"b": None,
"precomputed_knn": None,
"random_state": None,
"verbose": False,
}
class _UMAPCumlParams(
_CumlParams, HasFeaturesCol, HasFeaturesCols, HasLabelCol, HasOutputCol
):
def __init__(self) -> None:
super().__init__()
self._setDefault(
n_neighbors=15,
n_components=2,
metric="euclidean",
n_epochs=None,
learning_rate=1.0,
init="spectral",
min_dist=0.1,
spread=1.0,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
repulsion_strength=1.0,
negative_sample_rate=5,
transform_queue_size=4.0,
a=None,
b=None,
precomputed_knn=None,
random_state=None,
sample_fraction=1.0,
outputCol="embedding",
)
n_neighbors = Param(
Params._dummy(),
"n_neighbors",
(
f"The size of local neighborhood (in terms of number of neighboring sample points) used for manifold approximation."
f" Larger values result in more global views of the manifold, while smaller values result in more local data being"
f" preserved. In general values should be in the range 2 to 100."
),
typeConverter=TypeConverters.toFloat,
)
n_components = Param(
Params._dummy(),
"n_components",
(
f"The dimension of the space to embed into. This defaults to 2 to provide easy visualization, but can reasonably"
f" be set to any integer value in the range 2 to 100."
),
typeConverter=TypeConverters.toInt,
)
metric = Param(
Params._dummy(),
"metric",
(
f"Distance metric to use. Supported distances are ['l1', 'cityblock', 'taxicab', 'manhattan', 'euclidean', 'l2',"
f" 'sqeuclidean', 'canberra', 'minkowski', 'chebyshev', 'linf', 'cosine', 'correlation', 'hellinger', 'hamming',"
f" 'jaccard']. Metrics that take arguments (such as minkowski) can have arguments passed via the metric_kwds"
f" dictionary."
),
typeConverter=TypeConverters.toString,
)
n_epochs = Param(
Params._dummy(),
"n_epochs",
(
f"The number of training epochs to be used in optimizing the low dimensional embedding. Larger values result in"
f" more accurate embeddings. If None is specified a value will be selected based on the size of the input dataset"
f" (200 for large datasets, 500 for small)."
),
typeConverter=TypeConverters.toInt,
)
learning_rate = Param(
Params._dummy(),
"learning_rate",
"The initial learning rate for the embedding optimization.",
typeConverter=TypeConverters.toFloat,
)
init = Param(
Params._dummy(),
"init",
(
f"How to initialize the low dimensional embedding. Options are: 'spectral': use a spectral embedding of the fuzzy"
f" 1-skeleton, 'random': assign initial embedding positions at random."
),
typeConverter=TypeConverters.toString,
)
min_dist = Param(
Params._dummy(),
"min_dist",
(
f"The effective minimum distance between embedded points. Smaller values will result in a more clustered/clumped"
f" embedding where nearby points on the manifold are drawn closer together, while larger values will result in a"
f" more even dispersal of points. The value should be set relative to the ``spread`` value, which determines the"
f" scale at which embedded points will be spread out."
),
typeConverter=TypeConverters.toFloat,
)
spread = Param(
Params._dummy(),
"spread",
(
f"The effective scale of embedded points. In combination with ``min_dist`` this determines how clustered/clumped"
f" the embedded points are."
),
typeConverter=TypeConverters.toFloat,
)
set_op_mix_ratio = Param(
Params._dummy(),
"set_op_mix_ratio",
(
f"Interpolate between (fuzzy) union and intersection as the set operation used to combine local fuzzy simplicial"
f" sets to obtain a global fuzzy simplicial sets. Both fuzzy set operations use the product t-norm. The value of"
f" this parameter should be between 0.0 and 1.0; a value of 1.0 will use a pure fuzzy union, while 0.0 will use a"
f" pure fuzzy intersection."
),
typeConverter=TypeConverters.toFloat,
)
local_connectivity = Param(
Params._dummy(),
"local_connectivity",
(
f"The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected"
f" at a local level. The higher this value the more connected the manifold becomes locally. In practice this should"
f" be not more than the local intrinsic dimension of the manifold."
),
typeConverter=TypeConverters.toFloat,
)
repulsion_strength = Param(
Params._dummy(),
"repulsion_strength",
(
f"Weighting applied to negative samples in low dimensional embedding optimization. Values higher than one will"
f" result in greater weight being given to negative samples."
),
typeConverter=TypeConverters.toFloat,
)
negative_sample_rate = Param(
Params._dummy(),
"negative_sample_rate",
(
f"The number of negative samples to select per positive sample in the optimization process. Increasing this value"
f" will result in greater repulsive force being applied, greater optimization cost, but slightly more accuracy."
),
typeConverter=TypeConverters.toInt,
)
transform_queue_size = Param(
Params._dummy(),
"transform_queue_size",
(
f"For transform operations (embedding new points using a trained model), this will control how aggressively to"
f" search for nearest neighbors. Larger values will result in slower performance but more accurate nearest neighbor"
f" evaluation."
),
typeConverter=TypeConverters.toFloat,
)
a = Param(
Params._dummy(),
"a",
(
f"More specific parameters controlling the embedding. If None these values are set automatically as determined by"
f" ``min_dist`` and ``spread``."
),
typeConverter=TypeConverters.toFloat,
)
b = Param(
Params._dummy(),
"b",
(
f"More specific parameters controlling the embedding. If None these values are set automatically as determined by"
f" ``min_dist`` and ``spread``."
),
typeConverter=TypeConverters.toFloat,
)
precomputed_knn = Param(
Params._dummy(),
"precomputed_knn",
(
f"Either one of a tuple (indices, distances) of arrays of shape (n_samples, n_neighbors), a pairwise distances"
f" dense array of shape (n_samples, n_samples) or a KNN graph sparse array (preferably CSR/COO). This feature"
f" allows the precomputation of the KNN outside of UMAP and also allows the use of a custom distance function."
f" This function should match the metric used to train the UMAP embeedings."
),
typeConverter=TypeConverters.toListListFloat,
)
random_state = Param(
Params._dummy(),
"random_state",
(
f"The seed used by the random number generator during embedding initialization and during sampling used by the"
f" optimizer. Unfortunately, achieving a high amount of parallelism during the optimization stage often comes at"
f" the expense of determinism, since many floating-point additions are being made in parallel without a"
f" deterministic ordering. This causes slightly different results across training sessions, even when the same"
f" seed is used for random number generation. Setting a random_state will enable consistency of trained embeddings,"
f" allowing for reproducible results to 3 digits of precision, but will do so at the expense of training time and"
f" memory usage."
),
typeConverter=TypeConverters.toInt,
)
sample_fraction = Param(
Params._dummy(),
"sample_fraction",
(
f"The fraction of the dataset to be used for fitting the model. Since fitting is done on a single node, very large"
f" datasets must be subsampled to fit within the node's memory and execute in a reasonable time. Smaller fractions"
f" will result in faster training, but may result in sub-optimal embeddings."
),
typeConverter=TypeConverters.toFloat,
)
def getNNeighbors(self) -> float:
"""
Gets the value of `n_neighbors`.
"""
return self.getOrDefault("n_neighbors")
def setNNeighbors(self: P, value: float) -> P:
"""
Sets the value of `n_neighbors`.
"""
return self.set_params(n_neighbors=value)
def getNComponents(self) -> int:
"""
Gets the value of `n_components`.
"""
return self.getOrDefault("n_components")
def setNComponents(self: P, value: int) -> P:
"""
Sets the value of `n_components`.
"""
return self.set_params(n_components=value)
def getMetric(self) -> str:
"""
Gets the value of `metric`.
"""
return self.getOrDefault("metric")
def setMetric(self: P, value: str) -> P:
"""
Sets the value of `metric`.
"""
return self.set_params(metric=value)
def getNEpochs(self) -> int:
"""
Gets the value of `n_epochs`.
"""
return self.getOrDefault("n_epochs")
def setNEpochs(self: P, value: int) -> P:
"""
Sets the value of `n_epochs`.
"""
return self.set_params(n_epochs=value)
def getLearningRate(self) -> float:
"""
Gets the value of `learning_rate`.
"""
return self.getOrDefault("learning_rate")
def setLearningRate(self: P, value: float) -> P:
"""
Sets the value of `learning_rate`.
"""
return self.set_params(learning_rate=value)
def getInit(self) -> str:
"""
Gets the value of `init`.
"""
return self.getOrDefault("init")
def setInit(self: P, value: str) -> P:
"""
Sets the value of `init`.
"""
return self.set_params(init=value)
def getMinDist(self) -> float:
"""
Gets the value of `min_dist`.
"""
return self.getOrDefault("min_dist")
def setMinDist(self: P, value: float) -> P:
"""
Sets the value of `min_dist`.
"""
return self.set_params(min_dist=value)
def getSpread(self) -> float:
"""
Gets the value of `spread`.
"""
return self.getOrDefault("spread")
def setSpread(self: P, value: float) -> P:
"""
Sets the value of `spread`.
"""
return self.set_params(spread=value)
def getSetOpMixRatio(self) -> float:
"""
Gets the value of `set_op_mix_ratio`.
"""
return self.getOrDefault("set_op_mix_ratio")
def setSetOpMixRatio(self: P, value: float) -> P:
"""
Sets the value of `set_op_mix_ratio`.
"""
return self.set_params(set_op_mix_ratio=value)
def getLocalConnectivity(self) -> float:
"""
Gets the value of `local_connectivity`.
"""
return self.getOrDefault("local_connectivity")
def setLocalConnectivity(self: P, value: float) -> P:
"""
Sets the value of `local_connectivity`.
"""
return self.set_params(local_connectivity=value)
def getRepulsionStrength(self) -> float:
"""
Gets the value of `repulsion_strength`.
"""
return self.getOrDefault("repulsion_strength")
def setRepulsionStrength(self: P, value: float) -> P:
"""
Sets the value of `repulsion_strength`.
"""
return self.set_params(repulsion_strength=value)
def getNegativeSampleRate(self) -> int:
"""
Gets the value of `negative_sample_rate`.
"""
return self.getOrDefault("negative_sample_rate")
def setNegativeSampleRate(self: P, value: int) -> P:
"""
Sets the value of `negative_sample_rate`.
"""
return self.set_params(negative_sample_rate=value)
def getTransformQueueSize(self) -> float:
"""
Gets the value of `transform_queue_size`.
"""
return self.getOrDefault("transform_queue_size")
def setTransformQueueSize(self: P, value: float) -> P:
"""
Sets the value of `transform_queue_size`.
"""
return self.set_params(transform_queue_size=value)
def getA(self) -> float:
"""
Gets the value of `a`.
"""
return self.getOrDefault("a")
def setA(self: P, value: float) -> P:
"""
Sets the value of `a`.
"""
return self.set_params(a=value)
def getB(self) -> float:
"""
Gets the value of `b`.
"""
return self.getOrDefault("b")
def setB(self: P, value: float) -> P:
"""
Sets the value of `b`.
"""
return self.set_params(b=value)
def getPrecomputedKNN(self) -> List[List[float]]:
"""
Gets the value of `precomputed_knn`.
"""
return self.getOrDefault("precomputed_knn")
def setPrecomputedKNN(self: P, value: List[List[float]]) -> P:
"""
Sets the value of `precomputed_knn`.
"""
return self.set_params(precomputed_knn=value)
def getRandomState(self) -> int:
"""
Gets the value of `random_state`.
"""
return self.getOrDefault("random_state")
def setRandomState(self: P, value: int) -> P:
"""
Sets the value of `random_state`.
"""
return self.set_params(random_state=value)
def getSampleFraction(self) -> float:
"""
Gets the value of `sample_fraction`.
"""
return self.getOrDefault("sample_fraction")
def setSampleFraction(self: P, value: float) -> P:
"""
Sets the value of `sample_fraction`.
"""
return self.set_params(sample_fraction=value)
def getFeaturesCol(self) -> Union[str, List[str]]: # type: ignore
"""
Gets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`
"""
if self.isDefined(self.featuresCols):
return self.getFeaturesCols()
elif self.isDefined(self.featuresCol):
return self.getOrDefault("featuresCol")
else:
raise RuntimeError("featuresCol is not set")
def setFeaturesCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`featuresCol` or :py:attr:`featuresCols`. Used when input vectors are stored in a single column.
"""
if isinstance(value, str):
self.set_params(featuresCol=value)
else:
self.set_params(featuresCols=value)
return self
def setFeaturesCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`featuresCols`. Used when input vectors are stored as multiple feature columns.
"""
return self.set_params(featuresCols=value)
def setLabelCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`labelCol`.
"""
return self.set_params(labelCol=value)
def getOutputCol(self) -> str:
"""
Gets the value of :py:attr:`outputCol`. Contains the embeddings of the input data.
"""
return self.getOrDefault("outputCol")
def setOutputCol(self: P, value: str) -> P:
"""
Sets the value of :py:attr:`outputCol`. Contains the embeddings of the input data.
"""
return self.set_params(outputCol=value)
class UMAP(UMAPClass, _CumlEstimatorSupervised, _UMAPCumlParams):
"""
Uniform Manifold Approximation and Projection (UMAP) is a dimension reduction technique
used for low-dimensional data visualization and general non-linear dimension reduction.
The algorithm finds a low dimensional embedding of the data that approximates an underlying manifold.
The fit() method constructs a KNN-graph representation of an input dataset and then optimizes a
low dimensional embedding, and is performed on a single node. The transform() method transforms an input dataset
into the optimized embedding space, and is performed distributedly.
Parameters
----------
n_neighbors : float (optional, default=15)
The size of local neighborhood (in terms of number of neighboring sample points) used for
manifold approximation. Larger values result in more global views of the manifold, while
smaller values result in more local data being preserved. In general values should be in the range 2 to 100.
n_components : int (optional, default=2)
The dimension of the space to embed into. This defaults to 2 to provide easy visualization,
but can reasonably be set to any integer value in the range 2 to 100.
metric : str (optional, default='euclidean')
Distance metric to use. Supported distances are ['l1', 'cityblock', 'taxicab', 'manhattan', 'euclidean',
'l2', 'sqeuclidean', 'canberra', 'minkowski', 'chebyshev', 'linf', 'cosine', 'correlation', 'hellinger',
'hamming', 'jaccard']. Metrics that take arguments (such as minkowski) can have arguments passed via the
metric_kwds dictionary.
n_epochs : int (optional, default=None)
The number of training epochs to be used in optimizing the low dimensional embedding. Larger values result
in more accurate embeddings. If None is specified a value will be selected based on the size of the input dataset
(200 for large datasets, 500 for small).
learning_rate : float (optional, default=1.0)
The initial learning rate for the embedding optimization.
init : str (optional, default='spectral')
How to initialize the low dimensional embedding. Options are:
'spectral': use a spectral embedding of the fuzzy 1-skeleton
'random': assign initial embedding positions at random.
min_dist : float (optional, default=0.1)
The effective minimum distance between embedded points. Smaller values will result in a more clustered/clumped
embedding where nearby points on the manifold are drawn closer together, while larger values will result in a
more even dispersal of points. The value should be set relative to the ``spread`` value, which determines the
scale at which embedded points will be spread out.
spread : float (optional, default=1.0)
The effective scale of embedded points. In combination with ``min_dist`` this determines how clustered/clumped
the embedded points are.
set_op_mix_ratio : float (optional, default=1.0)
Interpolate between (fuzzy) union and intersection as the set operation used to combine local fuzzy simplicial
sets to obtain a global fuzzy simplicial sets. Both fuzzy set operations use the product t-norm. The value of
this parameter should be between 0.0 and 1.0; a value of 1.0 will use a pure fuzzy union, while 0.0 will use a
pure fuzzy intersection.
local_connectivity : int (optional, default=1)
The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected
at a local level. The higher this value the more connected the manifold becomes locally. In practice this should
be not more than the local intrinsic dimension of the manifold.
repulsion_strength : float (optional, default=1.0)
Weighting applied to negative samples in low dimensional embedding optimization. Values higher than one will
result in greater weight being given to negative samples.
negative_sample_rate : int (optional, default=5)
The number of negative samples to select per positive sample in the optimization process. Increasing this value
will result in greater repulsive force being applied, greater optimization cost, but slightly more accuracy.
transform_queue_size : float (optional, default=4.0)
For transform operations (embedding new points using a trained model), this will control how aggressively to
search for nearest neighbors. Larger values will result in slower performance but more accurate nearest neighbor
evaluation.
a : float (optional, default=None)
More specific parameters controlling the embedding. If None these values are set automatically as determined
by ``min_dist`` and ``spread``.
b : float (optional, default=None)
More specific parameters controlling the embedding. If None these values are set automatically as determined
by ``min_dist`` and ``spread``.
precomputed_knn : array / sparse array / tuple - device or host (optional, default=None)
Either one of a tuple (indices, distances) of arrays of shape (n_samples, n_neighbors), a pairwise distances
dense array of shape (n_samples, n_samples) or a KNN graph sparse array (preferably CSR/COO). This feature
allows the precomputation of the KNN outside of UMAP and also allows the use of a custom distance function.
This function should match the metric used to train the UMAP embeedings.
random_state : int, RandomState instance (optional, default=None)
The seed used by the random number generator during embedding initialization and during sampling used by the
optimizer. Unfortunately, achieving a high amount of parallelism during the optimization stage often comes at
the expense of determinism, since many floating-point additions are being made in parallel without a deterministic
ordering. This causes slightly different results across training sessions, even when the same seed is used for
random number generation. Setting a random_state will enable consistency of trained embeddings, allowing for
reproducible results to 3 digits of precision, but will do so at the expense of training time and memory usage.
verbose :
Logging level.
* ``0`` - Disables all log messages.
* ``1`` - Enables only critical messages.
* ``2`` - Enables all messages up to and including errors.
* ``3`` - Enables all messages up to and including warnings.
* ``4 or False`` - Enables all messages up to and including information messages.
* ``5 or True`` - Enables all messages up to and including debug messages.
* ``6`` - Enables all messages up to and including trace messages.
sample_fraction : float (optional, default=1.0)
The fraction of the dataset to be used for fitting the model. Since fitting is done on a single node, very large
datasets must be subsampled to fit within the node's memory and execute in a reasonable time. Smaller fractions
will result in faster training, but may result in sub-optimal embeddings.
featuresCol: str
The name of the column that contains input vectors. featuresCol should be set when input vectors are stored
in a single column of a dataframe.
featuresCols: List[str]
The names of the columns that contain input vectors. featuresCols should be set when input vectors are stored
in multiple columns of a dataframe.
labelCol: str (optional)
The name of the column that contains labels. If provided, supervised fitting will be performed, where labels
will be taken into account when optimizing the embedding.
outputCol: str (optional)
The name of the column that contains embeddings. If not provided, the default name of "embedding" will be used.
Examples
--------
>>> from spark_rapids_ml.umap import UMAP
>>> from cuml.datasets import make_blobs
>>> import cupy as cp
>>> X, _ = make_blobs(500, 5, centers=42, cluster_std=0.1, dtype=np.float32, random_state=10)
>>> feature_cols = [f"c{i}" for i in range(X.shape[1])]
>>> schema = [f"{c} {"float"}" for c in feature_cols]
>>> df = spark.createDataFrame(X.tolist(), ",".join(schema))
>>> df = df.withColumn("features", array(*feature_cols)).drop(*feature_cols)
>>> df.show(10, False)
+--------------------------------------------------------+
|features |
+--------------------------------------------------------+
|[1.5578103, -9.300072, 9.220654, 4.5838223, -3.2613218] |
|[9.295866, 1.3326015, -4.6483326, 4.43685, 6.906736] |
|[1.1148645, 0.9800974, -9.67569, -8.020592, -3.748023] |
|[-4.6454153, -8.095899, -4.9839406, 7.954683, -8.15784] |
|[-6.5075264, -5.538241, -6.740191, 3.0490158, 4.1693997]|
|[7.9449835, 4.142317, 6.207676, 3.202615, 7.1319785] |
|[-0.3837125, 6.826891, -4.35618, -9.582829, -1.5456663] |
|[2.5012932, 4.2080708, 3.5172815, 2.5741744, -6.291008] |
|[9.317718, 1.3419528, -4.832837, 4.5362573, 6.9357944] |
|[-6.65039, -5.438729, -6.858565, 2.9733503, 3.99863] |
+--------------------------------------------------------+
only showing top 10 rows
>>> umap_estimator = UMAP(sample_fraction=0.5, num_workers=3).setFeaturesCol("features")
>>> umap_model = umap_estimator.fit(df)
>>> output = umap_model.transform(df).toPandas()
>>> embedding = cp.asarray(output["embedding"].to_list())
>>> print("First 10 embeddings:")
>>> print(embedding[:10])
First 10 embeddings:
[[ 5.378397 6.504756 ]
[ 12.531521 13.946098 ]
[ 11.990916 6.049594 ]
[-14.175631 7.4849815]
[ 7.065363 -16.75355 ]
[ 1.8876278 1.0889664]
[ 0.6557462 17.965862 ]
[-16.220764 -6.4817486]
[ 12.476492 13.80965 ]
[ 6.823325 -16.71719 ]]
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__()
if not kwargs.get("float32_inputs", True):
get_logger(self.__class__).warning(
"This estimator does not support double precision inputs. Setting float32_inputs to False will be ignored."
)
kwargs.pop("float32_inputs")
self.set_params(**kwargs)
max_records_per_batch_str = _get_spark_session().conf.get(
"spark.sql.execution.arrow.maxRecordsPerBatch", "10000"
)
assert max_records_per_batch_str is not None
self.max_records_per_batch = int(max_records_per_batch_str)
self.BROADCAST_LIMIT = 8 << 30
def _create_pyspark_model(self, result: Row) -> _CumlModel:
raise NotImplementedError("UMAP does not support model creation from Row")
def _fit(self, dataset: DataFrame) -> "UMAPModel":
if self.getSampleFraction() < 1.0:
data_subset = dataset.sample(
withReplacement=False,
fraction=self.getSampleFraction(),
seed=self.cuml_params["random_state"],
)
else:
data_subset = dataset
input_num_workers = self.num_workers
# Force to single partition, single worker
self._num_workers = 1
if data_subset.rdd.getNumPartitions() != 1:
data_subset = data_subset.coalesce(1)
df_output = self._call_cuml_fit_func_dataframe(
dataset=data_subset,
partially_collect=False,
paramMaps=None,
)
pdf_output: PandasDataFrame = df_output.toPandas()
# Collect and concatenate row-by-row fit results
embeddings = np.array(
list(
pd.concat(
[pd.Series(x) for x in pdf_output["embedding_"]], ignore_index=True
)
),
dtype=np.float32,
)
raw_data = np.array(
list(
pd.concat(
[pd.Series(x) for x in pdf_output["raw_data_"]], ignore_index=True
)
),
dtype=np.float32,
)
del pdf_output
def _chunk_arr(
arr: np.ndarray, BROADCAST_LIMIT: int = self.BROADCAST_LIMIT
) -> List[np.ndarray]:
"""Chunk an array, if oversized, into smaller arrays that can be broadcasted."""
if arr.nbytes <= BROADCAST_LIMIT:
return [arr]
rows_per_chunk = BROADCAST_LIMIT // (arr.nbytes // arr.shape[0])
num_chunks = (arr.shape[0] + rows_per_chunk - 1) // rows_per_chunk
chunks = [
arr[i * rows_per_chunk : (i + 1) * rows_per_chunk]
for i in range(num_chunks)
]
return chunks
spark = _get_spark_session()
broadcast_embeddings = [
spark.sparkContext.broadcast(chunk) for chunk in _chunk_arr(embeddings)
]
broadcast_raw_data = [
spark.sparkContext.broadcast(chunk) for chunk in _chunk_arr(raw_data)
]
model = UMAPModel(
embedding_=broadcast_embeddings,
raw_data_=broadcast_raw_data,
n_cols=len(raw_data[0]),
dtype=type(raw_data[0][0]).__name__,
)
model._num_workers = input_num_workers
self._copyValues(model)
self._copy_cuml_params(model) # type: ignore
return model
def _fit_array_order(self) -> _ArrayOrder:
return "C"
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
array_order = self._fit_array_order()
def _cuml_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
from cuml.manifold import UMAP as CumlUMAP
umap_object = CumlUMAP(
**params[param_alias.cuml_init],
)
df_list = [x for (x, _, _) in dfs]
if isinstance(df_list[0], pd.DataFrame):
concated = pd.concat(df_list)
else:
concated = _concat_and_free(df_list, order=array_order)
if dfs[0][1] is not None:
# If labels are provided, call supervised fit
label_list = [x for (_, x, _) in dfs]
if isinstance(label_list[0], pd.DataFrame):
labels = pd.concat(label_list)
else:
labels = _concat_and_free(label_list, order=array_order)
umap_model = umap_object.fit(concated, y=labels)
else:
# Call unsupervised fit
umap_model = umap_object.fit(concated)
embedding = umap_model.embedding_
del umap_model
return {"embedding": embedding, "raw_data": concated}
return _cuml_fit
def _call_cuml_fit_func_dataframe(
self,
dataset: DataFrame,
partially_collect: bool = True,
paramMaps: Optional[Sequence["ParamMap"]] = None,
) -> DataFrame:
"""
Fits a model to the input dataset. This replaces _call_cuml_fit_func() to omit barrier stages and return a dataframe
rather than an RDD.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
Returns
-------
output : :py:class:`pyspark.sql.DataFrame`
fitted model attributes
"""
cls = self.__class__
select_cols, multi_col_names, _, _ = self._pre_process_data(dataset)
dataset = dataset.select(*select_cols)
is_local = _is_local(_get_spark_session().sparkContext)
cuda_managed_mem_enabled = (
_get_spark_session().conf.get("spark.rapids.ml.uvm.enabled", "false")
== "true"
)
if cuda_managed_mem_enabled:
get_logger(cls).info("CUDA managed memory enabled.")
# parameters passed to subclass
params: Dict[str, Any] = {
param_alias.cuml_init: self.cuml_params,
}
params[param_alias.fit_multiple_params] = []
cuml_fit_func = self._get_cuml_fit_func(dataset, None)
array_order = self._fit_array_order()
cuml_verbose = self.cuml_params.get("verbose", False)
chunk_size = self.max_records_per_batch
def _train_udf(pdf_iter: Iterable[pd.DataFrame]) -> Iterable[pd.DataFrame]:
from pyspark import TaskContext
logger = get_logger(cls)
logger.info("Initializing cuml context")
import cupy as cp
if cuda_managed_mem_enabled:
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(managed_memory=True)
cp.cuda.set_allocator(rmm_cupy_allocator)
_CumlCommon.initialize_cuml_logging(cuml_verbose)
context = TaskContext.get()
# set gpu device
_CumlCommon.set_gpu_device(context, is_local)
# handle the input
# inputs = [(X, Optional(y)), (X, Optional(y))]
logger.info("Loading data into python worker memory")
inputs = []
sizes = []
for pdf in pdf_iter:
sizes.append(pdf.shape[0])
if multi_col_names:
features = np.array(pdf[multi_col_names], order=array_order)
else:
features = np.array(list(pdf[alias.data]), order=array_order)
# experiments indicate it is faster to convert to numpy array and then to cupy array than directly
# invoking cupy array on the list
if cuda_managed_mem_enabled:
features = cp.array(features)
label = pdf[alias.label] if alias.label in pdf.columns else None
row_number = (
pdf[alias.row_number] if alias.row_number in pdf.columns else None
)
inputs.append((features, label, row_number))
# call the cuml fit function
# *note*: cuml_fit_func may delete components of inputs to free
# memory. do not rely on inputs after this call.
embedding, raw_data = cuml_fit_func(inputs, params).values()
logger.info("Cuml fit complete")
num_sections = (len(embedding) + chunk_size - 1) // chunk_size
for i in range(num_sections):
start = i * chunk_size
end = min((i + 1) * chunk_size, len(embedding))
yield pd.DataFrame(
data=[
{
"embedding_": embedding[start:end].tolist(),
"raw_data_": raw_data[start:end].tolist(),
}
]
)
output_df = dataset.mapInPandas(_train_udf, schema=self._out_schema())
return output_df
def _require_nccl_ucx(self) -> Tuple[bool, bool]:
return (False, False)
def _out_schema(self) -> Union[StructType, str]:
return StructType(
[
StructField(
"embedding_",
ArrayType(ArrayType(FloatType(), False), False),
False,
),
StructField(
"raw_data_",
ArrayType(ArrayType(FloatType(), False), False),
False,
),
]
)
def _pre_process_data(
self, dataset: DataFrame
) -> Tuple[
List[Column], Optional[List[str]], int, Union[Type[FloatType], Type[DoubleType]]
]:
(
select_cols,
multi_col_names,
dimension,
feature_type,
) = super(
_CumlEstimatorSupervised, self
)._pre_process_data(dataset)
if self.getLabelCol() in dataset.schema.names:
select_cols.append(self._pre_process_label(dataset, feature_type))
return select_cols, multi_col_names, dimension, feature_type
class UMAPModel(_CumlModel, UMAPClass, _UMAPCumlParams):
def __init__(
self,
embedding_: List[pyspark.broadcast.Broadcast],
raw_data_: List[pyspark.broadcast.Broadcast],
n_cols: int,
dtype: str,
) -> None:
super(UMAPModel, self).__init__(
embedding_=embedding_,
raw_data_=raw_data_,
n_cols=n_cols,
dtype=dtype,
)
self.embedding_ = embedding_
self.raw_data_ = raw_data_
@property
def embedding(self) -> List[List[float]]:
res = []
for chunk in self.embedding_:
res.extend(chunk.value.tolist())
return res
@property
def raw_data(self) -> List[List[float]]:
res = []
for chunk in self.raw_data_:
res.extend(chunk.value.tolist())
return res
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
cuml_alg_params = self.cuml_params
driver_embedding = self.embedding_
driver_raw_data = self.raw_data_
outputCol = self.getOutputCol()
def _construct_umap() -> CumlT:
import cupy as cp
from cuml.common import SparseCumlArray
from cuml.common.sparse_utils import is_sparse
from cuml.manifold import UMAP as CumlUMAP
from .utils import cudf_to_cuml_array
nonlocal driver_embedding, driver_raw_data
embedding = (
driver_embedding[0].value
if len(driver_embedding) == 1
else np.concatenate([chunk.value for chunk in driver_embedding])
)
raw_data = (
driver_raw_data[0].value
if len(driver_raw_data) == 1
else np.concatenate([chunk.value for chunk in driver_raw_data])
)
del driver_embedding
del driver_raw_data
if embedding.dtype != np.float32:
embedding = embedding.astype(np.float32)
raw_data = raw_data.astype(np.float32)
if is_sparse(raw_data):
raw_data_cuml = SparseCumlArray(raw_data, convert_format=False)
else:
raw_data_cuml = cudf_to_cuml_array(
raw_data,
order="C",
)
internal_model = CumlUMAP(**cuml_alg_params)
internal_model.embedding_ = cp.array(embedding).data
internal_model._raw_data = raw_data_cuml
return internal_model
def _transform_internal(
umap: CumlT,
df: Union[pd.DataFrame, np.ndarray],
) -> pd.Series:
embedding = umap.transform(df)
is_df_np = isinstance(df, np.ndarray)
is_emb_np = isinstance(embedding, np.ndarray)
# Input is either numpy array or pandas dataframe
input_list = [
df[i, :] if is_df_np else df.iloc[i, :] for i in range(df.shape[0]) # type: ignore
]
emb_list = [
embedding[i, :] if is_emb_np else embedding.iloc[i, :]
for i in range(embedding.shape[0])
]
result = pd.DataFrame(
{
"features": input_list,
outputCol: emb_list,
}
)
return result
return _construct_umap, _transform_internal, None
def _require_nccl_ucx(self) -> Tuple[bool, bool]:
return (False, False)
def _out_schema(self, input_schema: StructType) -> Union[StructType, str]:
return StructType(
[
StructField("features", ArrayType(FloatType(), False), False),
StructField(self.getOutputCol(), ArrayType(FloatType(), False), False),
]
)
def get_model_attributes(self) -> Optional[Dict[str, Any]]:
"""
Override parent method to bring broadcast variables to driver before JSON serialization.
"""
self._model_attributes["embedding_"] = [
chunk.value for chunk in self.embedding_
]
self._model_attributes["raw_data_"] = [chunk.value for chunk in self.raw_data_]
return self._model_attributes
def write(self) -> MLWriter:
return _CumlModelWriterNumpy(self)
@classmethod
def read(cls) -> MLReader:
return _CumlModelReaderNumpy(cls)
class _CumlModelWriterNumpy(_CumlModelWriter):
"""
Override parent writer to save numpy objects of _CumlModel to the file
"""
def saveImpl(self, path: str) -> None:
DefaultParamsWriter.saveMetadata(
self.instance,
path,
self.sc,
extraMetadata={
"_cuml_params": self.instance._cuml_params,
"_num_workers": self.instance._num_workers,
"_float32_inputs": self.instance._float32_inputs,
},
)
data_path = os.path.join(path, "data")
model_attributes = self.instance.get_model_attributes()
if not os.path.exists(data_path):
os.makedirs(data_path)
assert model_attributes is not None
for key, value in model_attributes.items():
if isinstance(value, list) and isinstance(value[0], np.ndarray):
paths = []
for idx, chunk in enumerate(value):
array_path = os.path.join(data_path, f"{key}_{idx}.npy")
np.save(array_path, chunk)
paths.append(array_path)
model_attributes[key] = paths
metadata_file_path = os.path.join(data_path, "metadata.json")
model_attributes_str = json.dumps(model_attributes)
self.sc.parallelize([model_attributes_str], 1).saveAsTextFile(
metadata_file_path
)
class _CumlModelReaderNumpy(_CumlModelReader):
"""
Override parent reader to instantiate numpy objects of _CumlModel from file
"""
def load(self, path: str) -> "_CumlEstimator":
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
data_path = os.path.join(path, "data")
metadata_file_path = os.path.join(data_path, "metadata.json")
model_attr_str = self.sc.textFile(metadata_file_path).collect()[0]
model_attr_dict = json.loads(model_attr_str)
for key, value in model_attr_dict.items():
if isinstance(value, list) and value[0].endswith(".npy"):
arrays = []
spark = _get_spark_session()
for array_path in value:
array = np.load(array_path)
arrays.append(spark.sparkContext.broadcast(array))
model_attr_dict[key] = arrays
instance = self.model_cls(**model_attr_dict)
DefaultParamsReader.getAndSetParams(instance, metadata)
instance._cuml_params = metadata["_cuml_params"]
instance._num_workers = metadata["_num_workers"]
instance._float32_inputs = metadata["_float32_inputs"]
return instance
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/umap.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import logging
import sys
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Set, Tuple, Union
if TYPE_CHECKING:
import cudf
import cupy as cp
import numpy as np
from pyspark import BarrierTaskContext, SparkContext, TaskContext
from pyspark.sql import SparkSession
_ArrayOrder = Literal["C", "F"]
def _method_names_from_param(spark_param_name: str) -> List[str]:
"""
Returns getter and setter method names, per Spark ML conventions, for passed in attribute.
"""
cap = spark_param_name[0].upper() + spark_param_name[1:]
getter = f"get{cap}"
setter = f"set{cap}"
return [getter, setter]
def _unsupported_methods_attributes(clazz: Any) -> Set[str]:
"""
Returns set of methods and attributes not supported by spark-rapids-ml for passed in class
as determined from empty values in the dictionary returned by _param_mapping() invoked on the class.
"""
if "_param_mapping" in [
member_name for member_name, _ in inspect.getmembers(clazz, inspect.ismethod)
]:
param_map = clazz._param_mapping()
_unsupported_params = [k for k, v in param_map.items() if not v]
_unsupported_methods: List[str] = sum(
[_method_names_from_param(k) for k in _unsupported_params], []
)
return set(_unsupported_params + _unsupported_methods)
else:
return set()
def _get_spark_session() -> SparkSession:
"""Get or create spark session.
Note: This function can only be invoked from driver side."""
if TaskContext.get() is not None:
# safety check.
raise RuntimeError(
"_get_spark_session should not be invoked from executor side."
)
return SparkSession.builder.getOrCreate()
def _is_local(sc: SparkContext) -> bool:
"""Whether it is Spark local mode"""
return sc._jsc.sc().isLocal() # type: ignore
def _is_standalone_or_localcluster(sc: SparkContext) -> bool:
master = sc.getConf().get("spark.master")
return master is not None and (
master.startswith("spark://") or master.startswith("local-cluster")
)
def _str_or_numerical(x: str) -> Union[str, float, int]:
"""
Convert to int if x is str representation of integer,
otherwise float if x is representation of float, otherwise return input string.
"""
try:
_x: Union[str, int, float] = int(x)
except:
try:
_x = float(x)
except:
_x = x
return _x
def _get_gpu_id(task_context: TaskContext) -> int:
"""Get the gpu id from the task resources"""
import os
if "CUDA_VISIBLE_DEVICES" in os.environ:
if os.environ["CUDA_VISIBLE_DEVICES"]:
num_assigned = len(os.environ["CUDA_VISIBLE_DEVICES"].split(","))
# when CUDA_VISIBLE_DEVICES is set and non-empty, use 0-th index entry
gpu_id = 0
else:
raise RuntimeError(
"Couldn't get gpu id since CUDA_VISIBLE_DEVICES is set to an empty string. Please check the GPU resource configuration."
)
else:
if task_context is None:
# safety check.
raise RuntimeError("_get_gpu_id should not be invoked from driver side.")
resources = task_context.resources()
if "gpu" not in resources:
raise RuntimeError(
"Couldn't get the gpu id, Please check the GPU resource configuration."
)
num_assigned = len(resources["gpu"].addresses)
# return the first gpu id.
gpu_id = int(resources["gpu"].addresses[0].strip())
if num_assigned > 1:
logger = get_logger(_get_gpu_id)
logger.warning(
f"Task got assigned {num_assigned} GPUs but using only 1. This could be a waste of GPU resources."
)
return gpu_id
def _get_default_params_from_func(
func: Callable, unsupported_set: List[str] = []
) -> Dict[str, Any]:
"""
Returns a dictionary of parameters and their default value of function fn.
Only the parameters with a default value will be included.
"""
sig = inspect.signature(func)
filtered_params_dict = {}
for parameter in sig.parameters.values():
# Remove parameters without a default value and those in the unsupported_set
if (
parameter.default is not parameter.empty
and parameter.name not in unsupported_set
):
filtered_params_dict[parameter.name] = parameter.default
return filtered_params_dict
def _get_class_or_callable_name(cls_or_callable: Union[type, Callable]) -> str:
"""
Return the class name.
"""
return f"{cls_or_callable.__module__}.{cls_or_callable.__name__}"
class PartitionDescriptor:
"""
Partition descriptor
m: total number of rows across all workers
n: total number of cols
parts_rank_size: a sequence of (rank, rows per partitions)
rank: rank to be mapped
"""
def __init__(
self, m: int, n: int, rank: int, parts_rank_size: List[Tuple[int, int]]
) -> None:
super().__init__()
self.m = m
self.n = n
self.rank = rank
self.parts_rank_size = parts_rank_size
@classmethod
def build(cls, partition_rows: List[int], total_cols: int) -> "PartitionDescriptor":
context = BarrierTaskContext.get()
if context is None:
# safety check.
raise RuntimeError("build should not be invoked from driver side.")
rank = context.partitionId()
# prepare (parts, rank)
import json
rank_size = [(rank, size) for size in partition_rows]
messages = context.allGather(message=json.dumps(rank_size))
parts_rank_size = [item for pair in messages for item in json.loads(pair)]
total_rows = sum(pair[1] for pair in parts_rank_size)
return cls(total_rows, total_cols, rank, parts_rank_size)
def _concat_and_free(
array_list: Union[List["cp.ndarray"], List[np.ndarray]], order: _ArrayOrder = "F"
) -> Union["cp.ndarray", np.ndarray]:
"""
concatenates a list of compatible numpy arrays into a 'order' ordered output array,
in a memory efficient way.
Note: frees list elements so do not reuse after calling.
"""
import cupy as cp
array_module = cp if isinstance(array_list[0], cp.ndarray) else np
rows = sum(arr.shape[0] for arr in array_list)
if len(array_list[0].shape) > 1:
cols = array_list[0].shape[1]
concat_shape: Tuple[int, ...] = (rows, cols)
else:
concat_shape = (rows,)
d_type = array_list[0].dtype
concated = array_module.empty(shape=concat_shape, order=order, dtype=d_type)
array_module.concatenate(array_list, out=concated)
del array_list[:]
return concated
def cudf_to_cuml_array(gdf: Union["cudf.DataFrame", "cudf.Series"], order: str = "F"): # type: ignore
try:
# Compatible with older cuml version (before 23.02)
from cuml.common.input_utils import input_to_cuml_array
except ImportError:
from cuml.common import input_to_cuml_array
cumlarray, _, _, _ = input_to_cuml_array(gdf, order=order)
return cumlarray
def dtype_to_pyspark_type(dtype: Union[np.dtype, str]) -> str:
"""Convert np.dtype to the corresponding pyspark type"""
dtype = np.dtype(dtype)
if dtype == np.float32:
return "float"
elif dtype == np.float64:
return "double"
elif dtype == np.int32:
return "int"
elif dtype == np.int16:
return "short"
else:
raise RuntimeError("Unsupported dtype, found ", dtype)
# similar to https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/spark/utils.py
def get_logger(
cls_or_callable: Union[type, Callable], level: str = "INFO"
) -> logging.Logger:
"""Gets a logger by name, or creates and configures it for the first time."""
name = _get_class_or_callable_name(cls_or_callable)
logger = logging.getLogger(name)
logger.setLevel(level)
# If the logger is configured, skip the configure
if not logger.handlers and not logging.getLogger().handlers:
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def java_uid(sc: SparkContext, prefix: str) -> str:
"""Returns a random UID that concatenates the given prefix, "_", and 12 random hex chars."""
assert sc._jvm is not None
return sc._jvm.org.apache.spark.ml.util.Identifiable.randomUID(prefix)
def _fake_java_impurity_calc(sc: SparkContext, count: int = 3): # type: ignore
"""Fake a java ImpurityCalculator"""
assert sc._jvm is not None
assert sc._gateway is not None
object_class = sc._jvm.double
fake_python_impurity_calc = [0 for _ in range(count)]
fake_java_impurity_calc = sc._gateway.new_array(
object_class, len(fake_python_impurity_calc)
)
for i in range(len(fake_python_impurity_calc)):
fake_java_impurity_calc[i] = float(fake_java_impurity_calc[i])
return fake_java_impurity_calc
def _create_internal_node(sc: SparkContext, impurity: str, model: Dict[str, Any], left, right): # type: ignore
"""Return a Java InternalNode"""
assert sc._jvm is not None
assert sc._gateway is not None
java_split = sc._jvm.org.apache.spark.ml.tree.ContinuousSplit(
int(model["split_feature"]), float(model["split_threshold"])
)
fake_java_impurity_calc = _fake_java_impurity_calc(sc)
if impurity == "gini":
java_impurity_cal = sc._jvm.org.apache.spark.mllib.tree.impurity.GiniCalculator(
fake_java_impurity_calc, int(model["instance_count"])
)
elif impurity == "entropy":
java_impurity_cal = (
sc._jvm.org.apache.spark.mllib.tree.impurity.EntropyCalculator(
fake_java_impurity_calc, int(model["instance_count"])
)
)
elif impurity == "variance":
java_impurity_cal = (
sc._jvm.org.apache.spark.mllib.tree.impurity.VarianceCalculator(
fake_java_impurity_calc, int(model["instance_count"])
)
)
else:
# never reach here
raise ValueError("Unsupported impurity! ", impurity)
java_internal_node = sc._jvm.org.apache.spark.ml.tree.InternalNode(
0.0, # prediction value is nonsense for internal node, just fake it
0.0, # impurity value is nonsense for internal node. just fake it
float(model["gain"]),
left,
right,
java_split,
java_impurity_cal,
)
return java_internal_node
def _create_leaf_node(sc: SparkContext, impurity: str, model: Dict[str, Any]): # type: ignore
"""Return a Java LeaftNode
Please note that, cuml trees uses probs as the leaf values while spark uses
the stats (how many counts this node has for each label), but they are behave
the same purpose when doing prediction
"""
assert sc._jvm is not None
assert sc._gateway is not None
leaf_values = model["leaf_value"]
if impurity == "gini" or impurity == "entropy":
object_class = sc._jvm.double
java_probs = sc._gateway.new_array(object_class, len(leaf_values))
for i in range(len(leaf_values)):
java_probs[i] = float(leaf_values[i])
java_impurity_cal = (
sc._jvm.org.apache.spark.mllib.tree.impurity.GiniCalculator(
java_probs, int(model["instance_count"])
)
if impurity == "gini"
else sc._jvm.org.apache.spark.mllib.tree.impurity.EntropyCalculator(
java_probs, int(model["instance_count"])
)
)
prediction = np.argmax(np.asarray(leaf_values))
elif impurity == "variance":
fake_java_impurity_calc = _fake_java_impurity_calc(sc, 3)
java_impurity_cal = (
sc._jvm.org.apache.spark.mllib.tree.impurity.VarianceCalculator(
fake_java_impurity_calc, int(model["instance_count"])
)
)
prediction = leaf_values[0]
else:
# never reach here
raise ValueError("Unsupported impurity! ", impurity)
java_leaf_node = sc._jvm.org.apache.spark.ml.tree.LeafNode(
float(prediction),
0.0, # TODO calculate the impurity according to leaf value, prediction doesn't require it.
java_impurity_cal,
)
return java_leaf_node
def translate_trees(sc: SparkContext, impurity: str, model: Dict[str, Any]): # type: ignore
"""Translate Cuml RandomForest trees to PySpark trees
Cuml trees
[
{
"nodeid": 0,
"split_feature": 3,
"split_threshold": 0.827687974732221,
"gain": 0.41999999999999998,
"instance_count": 10,
"yes": 1,
"no": 2,
"children": [
{
"nodeid": 1,
"leaf_value": [
1,
0
],
"instance_count": 7
},
{
"nodeid": 2,
"leaf_value": [
0,
1
],
"instance_count": 3
}
]
}
]
Spark trees,
InternalNode {split{featureIndex=3, threshold=0.827687974732221}, gain = 0.41999999999999998}
/ \
left right
/ \
LeafNode LeafNode
"""
if "split_feature" in model:
left_child_id = model["yes"]
right_child_id = model["no"]
for child in model["children"]:
if child["nodeid"] == left_child_id:
left_child = child
elif child["nodeid"] == right_child_id:
right_child = child
else:
raise ValueError("Unexpected node id")
return _create_internal_node(
sc,
impurity,
model,
translate_trees(sc, impurity, left_child),
translate_trees(sc, impurity, right_child),
)
elif "leaf_value" in model:
return _create_leaf_node(sc, impurity, model)
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/utils.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from multiprocessing.pool import ThreadPool
from typing import Any, Dict, List, Optional, Tuple, cast
import numpy as np
from pyspark import inheritable_thread_target
from pyspark.ml import Model
from pyspark.ml.tuning import CrossValidator as SparkCrossValidator
from pyspark.ml.tuning import CrossValidatorModel
from pyspark.ml.util import DefaultParamsReader
from pyspark.sql import DataFrame
from .core import _CumlEstimator, _CumlModel
def _gen_avg_and_std_metrics_(
metrics_all: List[List[float]],
) -> Tuple[List[float], List[float]]:
avg_metrics = np.mean(metrics_all, axis=0)
std_metrics = np.std(metrics_all, axis=0)
return list(avg_metrics), list(std_metrics)
class CrossValidator(SparkCrossValidator):
"""K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
It is the gpu version CrossValidator which fits multiple models in a single pass for a single
training dataset and transforms/evaluates in a single pass for multiple models.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.tuning import ParamGridBuilder, CrossValidatorModel
>>> from pyspark.ml.evaluation import MulticlassClassificationEvaluator
>>> from spark_rapids_ml.tuning import CrossValidator
>>> from spark_rapids_ml.classification import RandomForestClassifier
>>> import tempfile
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 2.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> rfc = RandomForestClassifier()
>>> grid = ParamGridBuilder().addGrid(rfc.maxBins, [8, 16]).build()
>>> evaluator = MulticlassClassificationEvaluator()
>>> cv = CrossValidator(estimator=rfc, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
...
>>> cvModel.getNumFolds()
3
>>> cvModel.avgMetrics[0]
1.0
>>> evaluator.evaluate(cvModel.transform(dataset))
1.0
>>> path = tempfile.mkdtemp()
>>> model_path = path + "/model"
>>> cvModel.write().save(model_path)
>>> cvModelRead = CrossValidatorModel.read().load(model_path)
>>> cvModelRead.avgMetrics
[1.0, 1.0]
>>> evaluator.evaluate(cvModel.transform(dataset))
1.0
>>> evaluator.evaluate(cvModelRead.transform(dataset))
1.0
"""
def _fit(self, dataset: DataFrame) -> "CrossValidatorModel":
est = self.getOrDefault(self.estimator)
eva = self.getOrDefault(self.evaluator)
# fallback at very early time.
if not (
isinstance(est, _CumlEstimator) and est._supportsTransformEvaluate(eva)
):
return super()._fit(dataset)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
nFolds = self.getOrDefault(self.numFolds)
metrics_all = [[0.0] * numModels for i in range(nFolds)]
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [[None for j in range(numModels)] for i in range(nFolds)]
datasets = self._kFold(dataset)
def singePassTask(
fold: int,
) -> Tuple[int, List[float], Optional[List[_CumlModel]]]:
index_models = list(est.fitMultiple(datasets[fold][0], epm))
models = [model for _, model in index_models]
model = models[0]._combine(models)
metrics = model._transformEvaluate(datasets[fold][1], eva)
return fold, metrics, models if collectSubModelsParam else None
for fold, metrics, subModels in pool.imap_unordered(
inheritable_thread_target(singePassTask), range(nFolds)
):
metrics_all[fold] = metrics
if collectSubModelsParam:
assert subModels is not None
subModels[fold] = subModels
metrics, std_metrics = _gen_avg_and_std_metrics_(metrics_all)
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
try:
model = CrossValidatorModel(
bestModel, metrics, cast(List[List[Model]], subModels), std_metrics
)
except:
model = CrossValidatorModel(
bestModel, metrics, cast(List[List[Model]], subModels)
)
return self._copyValues(model)
@staticmethod
def _is_python_params_instance(metadata: Dict[str, Any]) -> bool:
# If it's not python module, pyspark will load spark_rapids_ml.tuning.CrossValidator
# from JVM package. So we need to hack here
return metadata["class"].startswith(("pyspark.ml.", "spark_rapids_ml."))
@classmethod
def load(cls, path: str) -> "CrossValidator":
orig_is_python_params_instance = DefaultParamsReader.isPythonParamsInstance
try:
# Replace isPythonParamsInstance
setattr(
DefaultParamsReader,
"isPythonParamsInstance",
CrossValidator._is_python_params_instance,
)
cv_pyspark = super().load(path)
cv = cls()
cv_pyspark._copyValues(cv)
finally:
# Must restore to the original isPythonParamsInstance
setattr(
DefaultParamsReader,
"isPythonParamsInstance",
orig_is_python_params_instance,
)
return cv
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/tuning.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import pandas as pd
from pyspark.ml.functions import vector_to_array
from pyspark.ml.linalg import VectorUDT
from pyspark.ml.param.shared import (
HasInputCol,
HasInputCols,
HasLabelCol,
Param,
Params,
TypeConverters,
)
from pyspark.ml.util import MLReader, MLWriter
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import col, lit, monotonically_increasing_id
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
LongType,
Row,
StructField,
StructType,
)
from .core import (
CumlT,
FitInputType,
_ConstructFunc,
_CumlCaller,
_CumlEstimatorSupervised,
_CumlModel,
_EvaluateFunc,
_TransformFunc,
alias,
param_alias,
transform_evaluate,
)
from .params import P, _CumlClass, _CumlParams
from .utils import _concat_and_free, get_logger
class NearestNeighborsClass(_CumlClass):
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {"k": "n_neighbors"}
def _get_cuml_params_default(self) -> Dict[str, Any]:
return {"n_neighbors": 5, "verbose": False, "batch_size": 2000000}
class _NearestNeighborsCumlParams(_CumlParams, HasInputCol, HasLabelCol, HasInputCols):
"""
Shared Spark Params for NearestNeighbor and NearestNeighborModel.
"""
def __init__(self) -> None:
super().__init__()
self._setDefault(id_col=alias.row_number)
k = Param(
Params._dummy(),
"k",
"The number nearest neighbors to retrieve. Must be >= 1.",
typeConverter=TypeConverters.toInt,
)
id_col = Param(
Params._dummy(),
"id_col",
"id column name.",
typeConverter=TypeConverters.toString,
)
def setK(self: P, value: int) -> P:
"""
Sets the value of `k`.
"""
self.set_params(k=value)
return self
def setInputCol(self: P, value: Union[str, List[str]]) -> P:
"""
Sets the value of :py:attr:`inputCol` or :py:attr:`inputCols`. Used when input vectors are stored in a single column.
"""
if isinstance(value, str):
self.set_params(inputCol=value)
else:
self.set_params(inputCols=value)
return self
def setInputCols(self: P, value: List[str]) -> P:
"""
Sets the value of :py:attr:`inputCols`. Used when input vectors are stored as multiple feature columns.
"""
return self.set_params(inputCols=value)
def setIdCol(self: P, value: str) -> P:
"""
Sets the value of `id_col`. If not set, an id column will be added with column name `unique_id`. The id column is used to specify nearest neighbor vectors by associated id value.
"""
self.set_params(id_col=value)
return self
def getIdCol(self) -> str:
"""
Gets the value of `id_col`.
"""
return self.getOrDefault(self.id_col)
def _ensureIdCol(self, df: DataFrame) -> DataFrame:
"""
Ensure an id column exists in the input dataframe. Add the column if not exists.
"""
if not self.isSet("id_col") and self.getIdCol() in df.columns:
raise ValueError(
f"Cannot create a default id column since a column with the default name '{self.getIdCol()}' already exists."
+ "Please specify an id column"
)
id_col_name = self.getIdCol()
df_withid = (
df
if self.isSet("id_col")
else df.select(monotonically_increasing_id().alias(id_col_name), "*")
)
return df_withid
class NearestNeighbors(
NearestNeighborsClass, _CumlEstimatorSupervised, _NearestNeighborsCumlParams
):
"""
NearestNeighbors retrieves the exact k nearest neighbors in item vectors for each
query vector. The main methods accept distributed CPU dataframes as inputs,
leverage GPUs to accelerate computation, and take care of communication and
aggregation automatically. However, it should be noted that only the euclidean
distance (also known as L2 distance) is supported in the current implementations
and the feature data type must be of the float type. All other data types will
be converted into float during computation.
Parameters
----------
k: int (default = 5)
the default number nearest neighbors to retrieve for each query.
inputCol: str
the name of the column that contains input vectors. inputCol should be set when feature vectors
are stored in a single column of a dataframe.
inputCols: List[str]
the names of feature columns that form input vectors. inputCols should be set when input vectors
are stored as multiple feature columns of a dataframe.
idCol: str
the name of the column in a dataframe that uniquely identifies each vector. idCol should be set
if such a column exists in the dataframe. If idCol is not set, a column with the name `unique_id`
will be automatically added to the dataframe and used as unique identifier for each vector.
Examples
--------
>>> from spark_rapids_ml.knn import NearestNeighbors
>>> data = [(0, [1.0, 1.0]),
... (1, [2.0, 2.0]),
... (2, [3.0, 3.0]),]
>>> data_df = spark.createDataFrame(data, schema="id int, features array<float>")
>>> query = [(3, [1.0, 1.0]),
... (4, [3.0, 3.0]),]
>>> query_df = spark.createDataFrame(query, schema="id int, features array<float>")
>>> topk = 2
>>> gpu_knn = NearestNeighbors().setInputCol("features").setIdCol("id").setK(topk)
>>> gpu_model = gpu_knn.fit(data_df)
>>> (data_df, query_df, knn_df) = gpu_model.kneighbors(query_df)
>>> knn_df.show()
+--------+-------+----------------+
|query_id|indices| distances|
+--------+-------+----------------+
| 3| [0, 1]|[0.0, 1.4142135]|
| 4| [2, 1]|[0.0, 1.4142135]|
+--------+-------+----------------+
>>> data_df.show()
+---+----------+
| id| features|
+---+----------+
| 0|[1.0, 1.0]|
| 1|[2.0, 2.0]|
| 2|[3.0, 3.0]|
+---+----------+
>>> query_df.show()
+---+----------+
| id| features|
+---+----------+
| 3|[1.0, 1.0]|
| 4|[3.0, 3.0]|
+---+----------+
>>> knnjoin_df = gpu_model.exactNearestNeighborsJoin(query_df, distCol="EuclideanDistance")
>>> knnjoin_df.show()
+---------------+---------------+-----------------+
| item_df| query_df|EuclideanDistance|
+---------------+---------------+-----------------+
|{1, [2.0, 2.0]}|{3, [1.0, 1.0]}| 1.4142135|
|{0, [1.0, 1.0]}|{3, [1.0, 1.0]}| 0.0|
|{2, [3.0, 3.0]}|{4, [3.0, 3.0]}| 0.0|
|{1, [2.0, 2.0]}|{4, [3.0, 3.0]}| 1.4142135|
+---------------+---------------+-----------------+
>>> # vector column input
>>> from spark_rapids_ml.knn import NearestNeighbors
>>> from pyspark.ml.linalg import Vectors
>>> data = [(0, Vectors.dense([1.0, 1.0]),),
... (1, Vectors.dense([2.0, 2.0]),),
... (2, Vectors.dense([3.0, 3.0]),)]
>>> data_df = spark.createDataFrame(data, ["id", "features"])
>>> query = [(3, Vectors.dense([1.0, 1.0]),),
... (4, Vectors.dense([3.0, 3.0]),)]
>>> query_df = spark.createDataFrame(query, ["id", "features"])
>>> topk = 2
>>> gpu_knn = NearestNeighbors().setInputCol("features").setIdCol("id").setK(topk)
>>> gpu_model = gpu_knn.fit(data_df)
>>> # multi-column input
>>> from spark_rapids_ml.knn import NearestNeighbors
>>> data = [(0, 1.0, 1.0),
... (1, 2.0, 2.0),
... (2, 3.0, 3.0),]
>>> data_df = spark.createDataFrame(data, schema="id int, f1 float, f2 float")
>>> query = [(3, 1.0, 1.0),
... (4, 3.0, 3.0),]
>>> query_df = spark.createDataFrame(query, schema="id int, f1 float, f2 float")
>>> topk = 2
>>> gpu_knn = NearestNeighbors().setInputCols(["f1", "f2"]).setIdCol("id").setK(topk)
>>> gpu_model = gpu_knn.fit(data_df)
"""
def __init__(self, **kwargs: Any) -> None:
if not kwargs.get("float32_inputs", True):
get_logger(self.__class__).warning(
"This estimator does not support double precision inputs. Setting float32_inputs to False will be ignored."
)
kwargs.pop("float32_inputs")
super().__init__()
self.set_params(**kwargs)
self._label_isdata = 0
self._label_isquery = 1
self.set_params(labelCol=alias.label)
def _create_pyspark_model(self, result: Row) -> "NearestNeighborsModel":
return NearestNeighborsModel.from_row(result)
def _fit(self, item_df: DataFrame) -> "NearestNeighborsModel":
self._item_df_withid = self._ensureIdCol(item_df)
self._processed_item_df = self._item_df_withid.withColumn(
alias.label, lit(self._label_isdata)
)
# TODO: should test this at scale to see if/when we hit limits
model = self._create_pyspark_model(
Row(
item_df_withid=self._item_df_withid,
processed_item_df=self._processed_item_df,
label_isdata=self._label_isdata,
label_isquery=self._label_isquery,
)
)
model._num_workers = self._num_workers
model._float32_inputs = self._float32_inputs
self._copyValues(model)
self._copy_cuml_params(model) # type: ignore
return model
def _out_schema(self) -> Union[StructType, str]: # type: ignore
"""
This class overrides _fit and will not call _out_schema.
"""
pass
def _get_cuml_fit_func( # type: ignore
self, dataset: DataFrame
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
"""
This class overrides _fit and will not call _get_cuml_fit_func.
"""
pass
def write(self) -> MLWriter:
raise NotImplementedError(
"NearestNeighbors does not support saving/loading, just re-create the estimator."
)
@classmethod
def read(cls) -> MLReader:
raise NotImplementedError(
"NearestNeighbors does not support saving/loading, just re-create the estimator."
)
class NearestNeighborsModel(
_CumlCaller, _CumlModel, NearestNeighborsClass, _NearestNeighborsCumlParams
):
def __init__(
self,
item_df_withid: DataFrame,
processed_item_df: DataFrame,
label_isdata: int,
label_isquery: int,
):
super().__init__()
self._item_df_withid = item_df_withid
self._processed_item_df = processed_item_df
self._label_isdata = label_isdata
self._label_isquery = label_isquery
def _out_schema(self) -> Union[StructType, str]: # type: ignore
return StructType(
[
StructField(
f"query_{self.getIdCol()}", ArrayType(LongType(), False), False
),
StructField(
"indices", ArrayType(ArrayType(LongType(), False), False), False
),
StructField(
"distances", ArrayType(ArrayType(DoubleType(), False), False), False
),
]
)
def _require_nccl_ucx(self) -> Tuple[bool, bool]:
"""Enable ucx over NCCL"""
return (True, True)
def _pre_process_data( # type: ignore
self, dataset: DataFrame
) -> Tuple[
List[Column], Optional[List[str]], int, Union[Type[FloatType], Type[DoubleType]]
]:
(
select_cols,
multi_col_names,
dimension,
feature_type,
) = super()._pre_process_data(dataset)
# if input format is vectorUDT, convert data type from float64 to float32
input_col, _ = self._get_input_columns()
if input_col is not None and isinstance(
dataset.schema[input_col].dataType, VectorUDT
):
select_cols[0] = vector_to_array(col(input_col), dtype="float32").alias(
alias.data
)
select_cols.append(col(alias.label))
if self.hasParam("id_col") and self.isDefined("id_col"):
id_col_name = self.getOrDefault("id_col")
select_cols.append(col(id_col_name).alias(alias.row_number))
else:
select_cols.append(col(alias.row_number))
return select_cols, multi_col_names, dimension, feature_type
def kneighbors(self, query_df: DataFrame) -> Tuple[DataFrame, DataFrame, DataFrame]:
"""Return the exact nearest neighbors for each query in query_df. The data
vectors (or equivalently item vectors) should be provided through the fit
function (see Examples in the spark_rapids_ml.knn.NearestNeighbors). The
distance measure here is euclidean distance and the number of target exact
nearest neighbors can be set through setK(). The function currently only
supports float32 type and will convert other data types into float32.
Parameters
----------
query_df: pyspark.sql.DataFrame
query vectors where each row corresponds to one query. The query_df can be in the
format of a single array column, a single vector column, or multiple float columns.
Returns
-------
query_df: pyspark.sql.DataFrame
the query_df itself if it has an id column set through setIdCol(). If not,
a monotonically increasing id column will be added.
item_df: pyspark.sql.DataFrame
the item_df (or equivalently data_df) itself if it has an id column set
through setIdCol(). If not, a monotonically increasing id column will be added.
knn_df: pyspark.sql.DataFrame
the result k nearest neighbors (knn) dataframe that has three
columns (id, indices, distances). Each row of knn_df corresponds to the knn
result of a query vector, identified by the id column. The indices/distances
column stores the ids/distances of knn item_df vectors.
"""
query_default_num_partitions = query_df.rdd.getNumPartitions()
query_df_withid = self._ensureIdCol(query_df)
processed_query_df = query_df_withid.withColumn(
alias.label, lit(self._label_isquery)
)
union_df = self._processed_item_df.union(processed_query_df)
pipelinedrdd = self._call_cuml_fit_func(union_df, partially_collect=False)
pipelinedrdd = pipelinedrdd.repartition(query_default_num_partitions) # type: ignore
query_id_col_name = f"query_{self.getIdCol()}"
id_col_type = dict(union_df.dtypes)[self.getIdCol()]
knn_rdd = pipelinedrdd.flatMap(
lambda row: list(
zip(row[query_id_col_name], row["indices"], row["distances"])
)
)
knn_df = knn_rdd.toDF(
schema=f"{query_id_col_name} {id_col_type}, indices array<{id_col_type}>, distances array<float>"
).sort(query_id_col_name)
return (self._item_df_withid, query_df_withid, knn_df)
def _get_cuml_fit_func(
self,
dataset: DataFrame,
extra_params: Optional[List[Dict[str, Any]]] = None,
) -> Callable[[FitInputType, Dict[str, Any]], Dict[str, Any],]:
label_isdata = self._label_isdata
label_isquery = self._label_isquery
id_col_name = self.getIdCol()
def _cuml_fit(
dfs: FitInputType,
params: Dict[str, Any],
) -> Dict[str, Any]:
from pyspark import BarrierTaskContext
context = BarrierTaskContext.get()
rank = context.partitionId()
from cuml.neighbors.nearest_neighbors_mg import NearestNeighborsMG as cumlNN
nn_object = cumlNN(
handle=params[param_alias.handle],
n_neighbors=params[param_alias.cuml_init]["n_neighbors"],
output_type="numpy",
verbose=params[param_alias.cuml_init]["verbose"],
)
item_list = []
query_list = []
item_row_number = []
query_row_number = []
for x_array, label_array, row_number_array in dfs:
item_filter = label_array == label_isdata
query_filter = label_array == label_isquery
item_list.append(x_array[item_filter])
query_list.append(x_array[query_filter])
item_row_number += row_number_array[item_filter].tolist() # type: ignore
query_row_number += row_number_array[query_filter].tolist() # type: ignore
if isinstance(item_list[0], pd.DataFrame):
item = [pd.concat(item_list)]
query = [pd.concat(query_list)]
else:
# do not use item_list or query_list after this, as elements are freed
item = [_concat_and_free(item_list)]
query = [_concat_and_free(query_list)]
item_row_number = [item_row_number]
query_row_number = [query_row_number]
item_size: List[int] = [len(chunk) for chunk in item]
query_size: List[int] = [len(chunk) for chunk in query]
assert len(item_size) == len(query_size)
import json
async def do_allGather() -> List[str]:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None,
context.allGather,
json.dumps((rank, item_size, query_size, item_row_number)),
)
return result
messages = params[param_alias.loop].run_until_complete(
asyncio.ensure_future(do_allGather())
)
rank_stats = [json.loads(msg) for msg in messages]
item_parts_to_ranks = []
query_parts_to_ranks = []
for m_rank, m_item_size, m_query_size, _ in rank_stats:
item_parts_to_ranks += [(m_rank, size) for size in m_item_size]
query_parts_to_ranks += [(m_rank, size) for size in m_query_size]
item_nrows = sum(pair[1] for pair in item_parts_to_ranks)
query_nrows = sum(pair[1] for pair in query_parts_to_ranks)
res_tuple: Tuple[List[np.ndarray], List[np.ndarray]] = nn_object.kneighbors(
index=item,
index_parts_to_ranks=item_parts_to_ranks,
index_nrows=item_nrows,
query=query,
query_parts_to_ranks=query_parts_to_ranks,
query_nrows=query_nrows,
ncols=params[param_alias.num_cols],
rank=rank,
n_neighbors=params[param_alias.cuml_init]["n_neighbors"],
convert_dtype=False, # only np.float32 is supported in cuml. Should set to True for all other types
)
distances: List[np.ndarray] = res_tuple[0]
indices: List[np.ndarray] = res_tuple[1]
distances = [ary.tolist() for ary in distances]
indices = [ary.tolist() for ary in indices]
# id mapping
id2row: Dict[int, int] = {}
count = 0
for _, _, _, m_item_row_number in rank_stats:
for chunk in m_item_row_number:
chunk_id2row = [(count + i, chunk[i]) for i in range(len(chunk))]
id2row.update(chunk_id2row)
count += len(chunk)
transformed_indices = []
for two_d in indices:
res = []
for row in two_d:
res.append([id2row[cuid] for cuid in row])
transformed_indices.append(res)
return {
f"query_{id_col_name}": query_row_number,
"indices": transformed_indices,
"distances": distances,
}
return _cuml_fit
def _transform(self, dataset: DataFrame) -> DataFrame:
raise NotImplementedError(
"NearestNeighborsModel does not provide a transform function. Use 'kneighbors' instead."
)
def _get_cuml_transform_func(
self, dataset: DataFrame, category: str = transform_evaluate.transform
) -> Tuple[_ConstructFunc, _TransformFunc, Optional[_EvaluateFunc],]:
raise NotImplementedError(
"'_CumlModel._get_cuml_transform_func' method is not implemented. Use 'kneighbors' instead."
)
def exactNearestNeighborsJoin(
self,
query_df: DataFrame,
distCol: str = "distCol",
) -> DataFrame:
"""
This function returns the k exact nearest neighbors (knn) in item_df of each query vector in query_df.
item_df is the dataframe passed to the fit function of the NearestNeighbors estimator.
Note that the knn relationship is asymmetric with respect to the input datasets (e.g., if x is a knn of y
, y is not necessarily a knn of x).
Parameters
----------
query_df: pyspark.sql.DataFrame
the query_df dataframe. Each row represents a query vector.
distCol: str
the name of the output distance column
Returns
-------
knnjoin_df: pyspark.sql.DataFrame
the result dataframe that has three columns (item_df, query_df, distCol).
item_df column is of struct type that includes as fields all the columns of input item dataframe.
Similarly, query_df column is of struct type that includes as fields all the columns of input query dataframe.
distCol is the distance column. A row in knnjoin_df is in the format (v1, v2, dist(v1, v2)),
where item_vector v1 is one of the k nearest neighbors of query_vector v2 and their distance is dist(v1, v2).
"""
id_col_name = self.getIdCol()
# call kneighbors then prepare return results
(item_df_withid, query_df_withid, knn_df) = self.kneighbors(query_df)
from pyspark.sql.functions import arrays_zip, col, explode, struct
knn_pair_df = knn_df.select(
f"query_{id_col_name}",
explode(arrays_zip("indices", "distances")).alias("zipped"),
).select(
f"query_{id_col_name}",
col("zipped.indices").alias(f"item_{id_col_name}"),
col("zipped.distances").alias(distCol),
)
item_df_struct = item_df_withid.select(struct("*").alias("item_df"))
query_df_struct = query_df_withid.select(struct("*").alias("query_df"))
knnjoin_df = item_df_struct.join(
knn_pair_df,
item_df_struct[f"item_df.{id_col_name}"]
== knn_pair_df[f"item_{id_col_name}"],
)
knnjoin_df = knnjoin_df.join(
query_df_struct,
knnjoin_df[f"query_{id_col_name}"]
== query_df_struct[f"query_df.{id_col_name}"],
)
if self.isSet(self.id_col):
knnjoin_df = knnjoin_df.select("item_df", "query_df", distCol)
else:
knnjoin_df = knnjoin_df.select(
knnjoin_df["item_df"].dropFields(id_col_name).alias("item_df"),
knnjoin_df["query_df"].dropFields(id_col_name).alias("query_df"),
distCol,
)
return knnjoin_df
def write(self) -> MLWriter:
raise NotImplementedError(
"NearestNeighborsModel does not support saving/loading, just re-fit the estimator to re-create a model."
)
@classmethod
def read(cls) -> MLReader:
raise NotImplementedError(
"NearestNeighborsModel does not support loading/loading, just re-fit the estimator to re-create a model."
)
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/knn.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/metrics/__init__.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
class MulticlassMetrics:
"""Metrics for multiclass classification."""
SUPPORTED_MULTI_CLASS_METRIC_NAMES = [
"f1",
"accuracy",
"weightedPrecision",
"weightedRecall",
"weightedTruePositiveRate",
"weightedFalsePositiveRate",
"weightedFMeasure",
"truePositiveRateByLabel",
"falsePositiveRateByLabel",
"precisionByLabel",
"recallByLabel",
"fMeasureByLabel",
"hammingLoss",
]
# This class is aligning with MulticlassMetrics scala version.
def __init__(
self,
tp: Dict[float, float],
fp: Dict[float, float],
label: Dict[float, float],
label_count: int,
) -> None:
self._tp_by_class = tp
self._fp_by_class = fp
self._label_count_by_class = label
self._label_count = label_count
def _precision(self, label: float) -> float:
"""Returns precision for a given label (category)"""
tp = self._tp_by_class[label]
fp = self._fp_by_class[label]
return 0.0 if (tp + fp == 0) else tp / (tp + fp)
def _recall(self, label: float) -> float:
"""Returns recall for a given label (category)"""
return self._tp_by_class[label] / self._label_count_by_class[label]
def _f_measure(self, label: float, beta: float = 1.0) -> float:
"""Returns f-measure for a given label (category)"""
p = self._precision(label)
r = self._recall(label)
beta_sqrd = beta * beta
return 0.0 if (p + r == 0) else (1 + beta_sqrd) * p * r / (beta_sqrd * p + r)
def false_positive_rate(self, label: float) -> float:
"""Returns false positive rate for a given label (category)"""
fp = self._fp_by_class[label]
return fp / (self._label_count - self._label_count_by_class[label])
def weighted_fmeasure(self, beta: float = 1.0) -> float:
"""Returns weighted averaged f1-measure"""
sum = 0.0
for k, v in self._label_count_by_class.items():
sum += self._f_measure(k, beta) * v / self._label_count
return sum
def accuracy(self) -> float:
"""Returns accuracy (equals to the total number of correctly classified instances
out of the total number of instances.)"""
return sum(self._tp_by_class.values()) / self._label_count
def weighted_precision(self) -> float:
"""Returns weighted averaged precision"""
return sum(
[
self._precision(category) * count / self._label_count
for category, count in self._label_count_by_class.items()
]
)
def weighted_recall(self) -> float:
"""Returns weighted averaged recall (equals to precision, recall and f-measure)"""
return sum(
[
self._recall(category) * count / self._label_count
for category, count in self._label_count_by_class.items()
]
)
def weighted_true_positive_rate(self) -> float:
"""Returns weighted true positive rate. (equals to precision, recall and f-measure)"""
return self.weighted_recall()
def weighted_false_positive_rate(self) -> float:
"""Returns weighted false positive rate"""
return sum(
[
self.false_positive_rate(category) * count / self._label_count
for category, count in self._label_count_by_class.items()
]
)
def true_positive_rate_by_label(self, label: float) -> float:
"""Returns true positive rate for a given label (category)"""
return self._recall(label)
def hamming_loss(self) -> float:
"""Returns Hamming-loss"""
numerator = sum(self._fp_by_class.values())
denominator = self._label_count
return numerator / denominator
def evaluate(self, evaluator: MulticlassClassificationEvaluator) -> float:
metric_name = evaluator.getMetricName()
if metric_name == "f1":
return self.weighted_fmeasure()
elif metric_name == "accuracy":
return self.accuracy()
elif metric_name == "weightedPrecision":
return self.weighted_precision()
elif metric_name == "weightedRecall":
return self.weighted_recall()
elif metric_name == "weightedTruePositiveRate":
return self.weighted_true_positive_rate()
elif metric_name == "weightedFalsePositiveRate":
return self.weighted_false_positive_rate()
elif metric_name == "weightedFMeasure":
return self.weighted_fmeasure(evaluator.getBeta())
elif metric_name == "truePositiveRateByLabel":
return self.true_positive_rate_by_label(evaluator.getMetricLabel())
elif metric_name == "falsePositiveRateByLabel":
return self.false_positive_rate(evaluator.getMetricLabel())
elif metric_name == "precisionByLabel":
return self._precision(evaluator.getMetricLabel())
elif metric_name == "recallByLabel":
return self._recall(evaluator.getMetricLabel())
elif metric_name == "fMeasureByLabel":
return self._f_measure(evaluator.getMetricLabel(), evaluator.getBeta())
elif metric_name == "hammingLoss":
return self.hamming_loss()
else:
raise ValueError(f"Unsupported metric name, found {metric_name}")
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/metrics/MulticlassMetrics.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from collections import namedtuple
from typing import List, Optional, cast
from pyspark import Row
from pyspark.ml.evaluation import RegressionEvaluator
from spark_rapids_ml.core import pred
RegMetrics = namedtuple("RegMetrics", ("m2n", "m2", "l1", "mean", "total_count"))
reg_metrics = RegMetrics("m2n", "m2", "l1", "mean", "total_count")
# This class is aligning with Spark SummarizerBuffer scala version
class _SummarizerBuffer:
def __init__(
self,
mean: List[float],
m2n: List[float],
m2: List[float],
l1: List[float],
total_cnt: int,
):
"""All of the mean/m2n/m2/l1 have the same length which must be equal to 3,
and the order of their values is [label, label-prediction, prediction]
mean = 1/N * \sum_{i=1}^{N}(x_i)
m2n = variance * N
m2 = \sum_{i=1}^{N}(x_i)^{2}
l1 (norm) = \sum_{i=1}^{N}|x_i|
"""
self._curr_mean = mean
self._curr_m2n = m2n
self._curr_m2 = m2
self._curr_l1 = l1
self._num_cols = len(mean)
self._total_cnt = total_cnt
# spark-rapids-ml doesn't support weight col, so default to 1 for each sample.
self._total_weight_sum = total_cnt
# weight_square = weight * weight (weight defaults to 1)
self._weight_square_sum = total_cnt
# scala version uses _curr_weight_sum to represent the accumulated weight sum for
# the values which has been calculated. Spark-rapids-ml doesn't need
# to iterate over row by row, Instead, it calculates the metrics in the columnar way.
# So default it the total count, which is align with scala version
self._curr_weight_sum = [total_cnt] * self._num_cols
def merge(self, other: "_SummarizerBuffer") -> "_SummarizerBuffer":
"""Merge the other into self and return a new SummarizerBuffer"""
self._total_cnt += other._total_cnt
self._total_weight_sum += other._total_weight_sum
self._weight_square_sum += other._weight_square_sum
for i in range(self._num_cols):
this_weight_sum = self._curr_weight_sum[i]
other_weight_sum = other._curr_weight_sum[i]
total_weight_sum = this_weight_sum + other_weight_sum
if total_weight_sum != 0.0:
delta_mean = other._curr_mean[i] - self._curr_mean[i]
# merge mean together
self._curr_mean[i] += delta_mean * other_weight_sum / total_weight_sum
# merge m2n together
self._curr_m2n[i] += (
other._curr_m2n[i]
+ delta_mean
* delta_mean
* this_weight_sum
* other_weight_sum
/ total_weight_sum
)
self._curr_weight_sum[i] = total_weight_sum
self._curr_m2[i] += other._curr_m2[i]
self._curr_l1[i] += other._curr_l1[i]
return _SummarizerBuffer(
self._curr_mean,
self._curr_m2n,
self._curr_m2,
self._curr_l1,
self._total_cnt,
)
@property
def total_count(self) -> int:
return self._total_cnt
@property
def m2(self) -> List[float]:
"""\sum_{i=1}^{N}(x_i)^{2} of each dimension"""
return self._curr_m2
@property
def norm_l2(self) -> List[float]:
"""L2 (Euclidean) norm of each dimension."""
real_magnitude = [math.sqrt(m2) for m2 in self._curr_m2]
return real_magnitude
@property
def norm_l1(self) -> List[float]:
"""L1 norm of each dimension."""
return self._curr_l1
@property
def mean(self) -> List[float]:
"""mean of each dimension."""
real_mean = [
self._curr_mean[i] * (self._curr_weight_sum[i] / self._total_weight_sum)
for i in range(self._num_cols)
]
return real_mean
def _compute_variance(self) -> List[float]:
denominator = self._total_weight_sum - (
self._weight_square_sum / self._total_weight_sum
)
if denominator > 0.0:
real_variance = [
max(self._curr_m2n[i] / denominator, 0.0) for i in range(self._num_cols)
]
else:
real_variance = [0] * self._num_cols
return real_variance
@property
def weight_sum(self) -> int:
"""Sum of weights."""
return self._total_weight_sum
@property
def variance(self) -> List[float]:
"""Unbiased estimate of sample variance of each dimension."""
return self._compute_variance()
# This class is aligning with Spark RegressionMetrics scala version.
class RegressionMetrics:
"""Metrics for regression case."""
def __init__(self, summary: _SummarizerBuffer):
self._summary = summary
@staticmethod
def create(
mean: List[float],
m2n: List[float],
m2: List[float],
l1: List[float],
total_cnt: int,
) -> "RegressionMetrics":
return RegressionMetrics(_SummarizerBuffer(mean, m2n, m2, l1, total_cnt))
@classmethod
def from_rows(cls, num_models: int, rows: List[Row]) -> List["RegressionMetrics"]:
"""The rows must contain pred.model_index, and mean/m2n/m2/l1/total_count"""
metrics: List[Optional["RegressionMetrics"]] = [None] * num_models
for row in rows:
index = row[pred.model_index]
metric = RegressionMetrics.create(
mean=row[reg_metrics.mean],
m2n=row[reg_metrics.m2n],
m2=row[reg_metrics.m2],
l1=row[reg_metrics.l1],
total_cnt=row[reg_metrics.total_count],
)
old_metric = metrics[index]
metrics[index] = (
old_metric.merge(metric) if old_metric is not None else metric
)
return cast(List["RegressionMetrics"], metrics)
def merge(self, other: "RegressionMetrics") -> "RegressionMetrics":
"""Merge other to self and return a new RegressionMetrics"""
summary = self._summary.merge(other._summary)
return RegressionMetrics(summary)
@property
def _ss_y(self) -> float:
"""sum of squares for label"""
return self._summary.m2[0]
@property
def _ss_err(self) -> float:
"""sum of squares for 'label-prediction'"""
return self._summary.m2[1]
@property
def _ss_tot(self) -> float:
"""total sum of squares"""
return self._summary.variance[0] * (self._summary.weight_sum - 1)
@property
def _ss_reg(self) -> float:
return (
self._summary.m2[2]
+ math.pow(self._summary.mean[0], 2) * self._summary.weight_sum
- 2
* self._summary.mean[0]
* self._summary.mean[2]
* self._summary.weight_sum
)
@property
def mean_squared_error(self) -> float:
"""Returns the mean squared error, which is a risk function corresponding to the
expected value of the squared error loss or quadratic loss."""
return self._ss_err / self._summary.weight_sum
@property
def root_mean_squared_error(self) -> float:
"""Returns the root mean squared error, which is defined as the square root of
the mean squared error."""
return math.sqrt(self.mean_squared_error)
def r2(self, through_origin: bool) -> float:
"""Returns R^2^, the unadjusted coefficient of determination."""
return (
(1 - self._ss_err / self._ss_y)
if through_origin
else (1 - self._ss_err / self._ss_tot)
)
@property
def mean_absolute_error(self) -> float:
"""Returns the mean absolute error, which is a risk function corresponding to the
expected value of the absolute error loss or l1-norm loss."""
return self._summary.norm_l1[1] / self._summary.weight_sum
@property
def explained_variance(self) -> float:
"""Returns the variance explained by regression.
explained_variance = $\sum_i (\hat{y_i} - \bar{y})^2^ / n$"""
return self._ss_reg / self._summary.weight_sum
def evaluate(self, evaluator: RegressionEvaluator) -> float:
metric_name = evaluator.getMetricName()
if metric_name == "rmse":
return self.root_mean_squared_error
elif metric_name == "mse":
return self.mean_squared_error
elif metric_name == "r2":
return self.r2(evaluator.getThroughOrigin())
elif metric_name == "mae":
return self.mean_absolute_error
elif metric_name == "var":
return self.explained_variance
else:
raise ValueError(f"Unsupported metric name, found {metric_name}")
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/metrics/RegressionMetrics.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/common/__init__.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import json
import os
from asyncio import AbstractEventLoop
from typing import TYPE_CHECKING, Any, List, Optional, Tuple
import psutil
if TYPE_CHECKING:
# need this first to load shared ucx shared libraries from ucx-py instead of raft-dask
from ucp import Endpoint
from pylibraft.common import Handle # isort: split
from raft_dask.common import UCX
from raft_dask.common.nccl import nccl
from pyspark import BarrierTaskContext
class CumlContext:
def __init__(
self,
rank: int,
nranks: int,
context: BarrierTaskContext,
enable: bool,
require_ucx: bool = False,
) -> None:
"""
Initialize the nccl unique id for workers.
1. get the nccl unique id for worker 0
2. do all gather for all the workers to get the nccl unique uid.
3. if require_ucx is true, initialize ucx and inject ucx together with nccl into a handle
"""
# need this first to load shared ucx shared libraries from ucx-py instead of raft-dask
from ucp import Endpoint
from pylibraft.common import Handle # isort: split
from raft_dask.common import UCX
from raft_dask.common.nccl import nccl
self.enable = enable
self._handle: Optional["Handle"] = None
self._loop: Optional[AbstractEventLoop] = None
if not enable:
return
self._rank = rank
self._nranks = nranks
self._require_ucx = require_ucx
self._handle = Handle(n_streams=0)
self._nccl_comm: Optional["nccl"] = None
self._nccl_unique_id = None
self._ucx: Optional["UCX"] = None
self._ucx_port = None
self._ucx_eps = None
nccl_uid = ""
if context.partitionId() == 0:
nccl_uid = base64.b64encode(nccl.get_unique_id()).decode("utf-8")
if self._require_ucx is False:
nccl_uids = context.allGather(nccl_uid)
self._nccl_unique_id = base64.b64decode(nccl_uids[0])
else:
tasks = context.getTaskInfos()
self._ips = [task.address.split(":")[0] for task in tasks]
# set environmental variables according to https://github.com/rapidsai/ucx-py
# the code occasionally run fail without setting the variables
# TODO: will have to figure how to make this more flexible to take advantage of higher speed interconnects on multi-gpu nodes
my_ip = self._ips[self._rank]
if "UCX_TLS" not in os.environ:
os.environ["UCX_TLS"] = "tcp,cuda_copy,cuda_ipc"
if "UCXPY_IFNAME" not in os.environ:
try:
my_ifname = CumlContext.get_ifname_from_ip(my_ip)
os.environ["UCXPY_IFNAME"] = my_ifname
except ValueError:
pass
self._ucx = UCX.get()
self._ucx_port = self._ucx.listener_port()
msgs = context.allGather(json.dumps((nccl_uid, self._ucx_port)))
self._nccl_unique_id = base64.b64decode(json.loads(msgs[0])[0])
self._ports = [json.loads(msg)[1] for msg in msgs]
@property
def handle(self) -> Optional["Handle"]:
return self._handle
def __enter__(self) -> "CumlContext":
if not self.enable:
return self
from raft_dask.common.nccl import nccl
# initialize nccl and inject it to the handle. A GPU must be assigned exclusively before init() is called
self._nccl_comm = nccl()
self._nccl_comm.init(self._nranks, self._nccl_unique_id, self._rank)
if self._require_ucx is False:
from raft_dask.common.comms_utils import inject_comms_on_handle_coll_only
inject_comms_on_handle_coll_only(
self._handle, self._nccl_comm, self._nranks, self._rank, True
)
else:
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self._ucx_eps = self._loop.run_until_complete(
asyncio.ensure_future(
CumlContext._ucp_create_endpoints(
self._ucx, list(zip(self._ips, self._ports))
)
)
)
from raft_dask.common.comms_utils import inject_comms_on_handle
inject_comms_on_handle(
self._handle,
self._nccl_comm,
self._ucx.get_worker(), # type: ignore
self._ucx_eps,
self._nranks,
self._rank,
True,
)
return self
def __exit__(self, *args: Any) -> None:
if not self.enable:
return
assert self._nccl_comm is not None
self._nccl_comm.destroy()
del self._nccl_comm
del self._handle
if self._loop is not None:
asyncio.get_event_loop().stop()
asyncio.get_event_loop().close()
@staticmethod
def get_ifname_from_ip(target_ip: str) -> str:
if_addrs_dict = psutil.net_if_addrs()
for ifname in if_addrs_dict:
ip = if_addrs_dict[ifname][0].address
if ip == target_ip:
return ifname
raise ValueError(f"target_ip ${target_ip} does not exist")
@staticmethod
async def _ucp_create_endpoints(
ucx_worker: "UCX",
target_ip_ports: List[Tuple[str, int]],
additional_timeout: float = 0.1,
) -> "Endpoint":
"""
ucp initialization may require a larger additional_timeout a complex network environment
"""
eps = [None] * len(target_ip_ports)
for i in range(len(eps)):
ip, port = target_ip_ports[i]
ep = await ucx_worker.get_endpoint(ip, port)
eps[i] = ep
await asyncio.sleep(additional_timeout)
return eps
| spark-rapids-ml-branch-23.10 | python/src/spark_rapids_ml/common/cuml_context.py |
from typing import Dict, List, Tuple
import argparse
import os
import subprocess
import sys
from multiprocessing import Pool, cpu_count
from pylint import epylint
# This script is copied from dmlc/xgboost
CURDIR = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
PROJECT_ROOT = os.path.normpath(os.path.join(CURDIR, os.path.pardir))
SRC_PATHS = [
"src/spark_rapids_ml",
"tests",
"benchmark",
]
def run_formatter(rel_paths: List[str]) -> bool:
isort_cmd = ["isort", "--check", "--profile=black"] + rel_paths
black_cmd = ["black", "--check"] + rel_paths
isort_ret = subprocess.run(isort_cmd).returncode
black_ret = subprocess.run(black_cmd).returncode
if isort_ret != 0 or black_ret != 0:
isort_cmd.remove("--check")
black_cmd.remove("--check")
msg = (
"Please run the following command on your machine to address the format"
" errors:\n {}\n {}".format(" ".join(isort_cmd), " ".join(black_cmd))
)
print(msg, file=sys.stdout)
return False
return True
def run_mypy(rel_paths: List[str]) -> bool:
ret = subprocess.run(["mypy"] + rel_paths)
return ret.returncode == 0
class PyLint:
"""A helper for running pylint, mostly copied from dmlc-core/scripts."""
def __init__(self) -> None:
self.pypackage_root = PROJECT_ROOT
self.pylint_cats = set(["error", "warning", "convention", "refactor"])
self.pylint_opts = [
"--extension-pkg-whitelist=numpy",
"--rcfile=" + os.path.join(self.pypackage_root, ".pylintrc"),
]
def run(self, path: str) -> Tuple[Dict, str, str]:
(pylint_stdout, pylint_stderr) = epylint.py_run(
" ".join([str(path)] + self.pylint_opts), return_std=True
)
emap = {}
err = pylint_stderr.read()
out = []
for line in pylint_stdout:
out.append(line)
key = line.split(":")[-1].split("(")[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
return {path: emap}, err, "\n".join(out)
def __call__(self) -> bool:
all_errors: Dict[str, Dict[str, int]] = {}
def print_summary_map(result_map: Dict[str, Dict[str, int]]) -> int:
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
ftype = "Python"
npass = sum(1 for x in result_map.values() if len(x) == 0)
print(f"====={npass}/{len(result_map)} {ftype} files passed check=====")
for fname, emap in result_map.items():
if len(emap) == 0:
continue
print(
f"{fname}: {sum(emap.values())} Errors of {len(emap)} Categories map={str(emap)}"
)
return len(result_map) - npass
all_scripts = []
for root, dirs, files in os.walk(self.pypackage_root):
for f in files:
if f.endswith(".py"):
all_scripts.append(os.path.join(root, f))
with Pool(cpu_count()) as pool:
error_maps = pool.map(self.run, all_scripts)
for emap, err, out in error_maps:
print(out)
if len(err) != 0:
print(err)
all_errors.update(emap)
nerr = print_summary_map(all_errors)
return nerr == 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--format", action="store_true", default=False)
parser.add_argument("--type-check", action="store_true", default=False)
parser.add_argument("--pylint", action="store_true", default=False)
args = parser.parse_args()
if args.format:
print("Formatting...")
if not run_formatter(SRC_PATHS):
sys.exit(-1)
if args.type_check:
print("Type checking...")
if not run_mypy(SRC_PATHS):
sys.exit(-1)
if args.pylint:
print("Running PyLint...")
if not PyLint()():
sys.exit(-1)
| spark-rapids-ml-branch-23.10 | ci/lint_python.py |
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'spark-rapids-ml'
copyright = '2023, NVIDIA'
author = 'NVIDIA'
release = '23.8.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'numpydoc',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
]
numpydoc_show_class_members = False
autodoc_inherit_docstrings = False
templates_path = ['_templates']
exclude_patterns = []
intersphinx_mapping = {
'pyspark': ('https://spark.apache.org/docs/latest/api/python', None),
'cuml': ('https://docs.rapids.ai/api/cuml/stable', None),
}
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'pydata_sphinx_theme'
import inspect
from spark_rapids_ml.utils import _unsupported_methods_attributes
_unsupported_by_class = {}
def autodoc_skip_member(app, what, name, obj, skip, options):
# adapted from this https://github.com/sphinx-doc/sphinx/issues/9533#issuecomment-962007846
doc_class=None
for frame in inspect.stack():
if frame.function == "get_members":
doc_class = frame.frame.f_locals["obj"]
break
exclude = skip
if doc_class:
if doc_class not in _unsupported_by_class:
_unsupported_by_class[doc_class] = _unsupported_methods_attributes(doc_class)
exclude = name in _unsupported_by_class[doc_class]
# return True if (skip or exclude) else None # Can interfere with subsequent skip functions.
return True if exclude or skip else None
def setup(app):
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_js_file("https://docs.rapids.ai/assets/js/custom.js", loading_method="defer")
app.connect('autodoc-skip-member', autodoc_skip_member)
| spark-rapids-ml-branch-23.10 | docs/source/conf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.30" # assumed parallelism: 3 (A6000 49GB VRAM)
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false" # disable preallocation behavior
import runpy
import subprocess
import sys
from contextlib import contextmanager
from dataclasses import dataclass
from typing import List
from unittest.mock import patch
import numpy as np
import pytest
import webdataset as wds
from absl import logging
MANUAL_MARKERS = {
"integration": "used for integration tests (may be slow)",
"data": "used for dataset utility tests",
"perf": "used for speed tests",
"convergence": "used for tests where a certain loss/accuracy is validated",
"manual": "these are manual tests, e.g., things that require manually standing up a server or require certain datasets to be present",
}
def pytest_configure(config):
for marker_name, marker_desc in MANUAL_MARKERS.items():
config.addinivalue_line("markers", f"{marker_name}: {marker_desc}")
def pytest_addoption(parser):
# TODO(terry): use these options to implement conjunctive/disjunctive selections
for marker_name in MANUAL_MARKERS:
parser.addoption(
f"--{marker_name}",
action="store_true",
default=False,
help=f"Run {marker_name} tests",
)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(config, items):
# trylast=True, is so we can let pytest filter out marks before we try to skip
if not config.getoption("-m"):
# We will disable integration, convergence and manual tests if markers aren't given
for marker_name in ('integration', 'convergence', 'manual'):
skipper = pytest.mark.skip(reason=f"Only run {marker_name} marked tests if present when specifying in -m <markexpr>")
for item in items:
if marker_name in item.keywords:
item.add_marker(skipper)
@pytest.fixture
def package_root_dir():
return os.path.dirname(os.path.realpath(__file__))
@pytest.fixture
def rng():
import jax
return jax.random.PRNGKey(0)
@pytest.fixture
def run_subprocess_blocking():
def _run_subprocess(*cmd_and_args, env=None):
pipes = subprocess.Popen(
cmd_and_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
stdout, stderr = pipes.communicate()
return stdout, stderr, pipes.returncode
return _run_subprocess
@pytest.fixture
def run_subprocess_in_background():
def block_until_text_found(process, block_until_seen: str):
for line in iter(process.stdout.readline, b''):
line = line.decode()
logging.info(line.rstrip())
if block_until_seen in line:
break
@contextmanager
def _run_subprocess(*cmd_and_args, block_until_seen: str, env=None):
ON_POSIX = 'posix' in sys.builtin_module_names
process = subprocess.Popen(
cmd_and_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
close_fds=ON_POSIX,
env=env,
)
block_until_text_found(process, block_until_seen)
yield
process.terminate()
process.wait()
return _run_subprocess
# This doesn't work with modules using gin configs relying on the __main__ module
@pytest.fixture
def run_module(capfd):
def _run_module(module: str, argv: List[str] | None = None):
argv = [module] + argv if argv else [module]
with patch('sys.argv', argv):
runpy.run_module(module, run_name='__main__')
stdout, stderr = capfd.readouterr()
return stdout, stderr
return _run_module
@dataclass
class WebdatasetMetadata:
num_examples: int = 20 * 4
batch_size: int = 4
image_size: int = 224
channels: int = 3
seq_len: int = 77
image_key: str = 'jpg'
text_key: str = 'txt'
class_key: str = 'cls'
num_classes: int = 10
path: str | None = None
@pytest.fixture(scope='session')
def dummy_wds_metadata(
tmp_path_factory: pytest.TempPathFactory,
):
# HACK(terry): There is a bug in webdataset/writer.py that imports PIL, but not the module under it so we are doing it here as a WAR
# https://github.com/webdataset/webdataset/issues/198
import PIL.Image # noqa: F401
metadata = WebdatasetMetadata()
out_tar = tmp_path_factory.mktemp('wds_test') / 'dataset.tar'
out_tar_path = out_tar.as_posix()
with wds.TarWriter(out_tar_path) as dst:
for index in range(metadata.num_examples):
dst.write({
"__key__": f"sample{index:06d}",
metadata.image_key: np.full((metadata.image_size, metadata.image_size, metadata.channels), fill_value=1.0/index if index else 0.0, dtype=np.float32),
metadata.class_key: index % metadata.num_classes,
metadata.text_key: f'A random image #{index}',
})
metadata.path = out_tar_path
yield metadata
| JAX-Toolbox-main | rosetta/conftest.py |
import os
import sys
import setuptools
package_path = os.path.join(os.path.dirname(__file__), 'rosetta')
sys.path.append(package_path)
from rosetta import __version__ # noqa: E402
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
setuptools.setup(
name='rosetta',
version=__version__,
description='Rosetta: a Jax project for training LLM/CV/Multimodal models',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author='NVIDIA',
author_email='[email protected]',
# TODO(terry): license, url
packages=setuptools.find_packages(),
package_data={
'': ['**/*.gin'], # not all subdirectories may have __init__.py.
},
scripts=[],
install_requires=[
'nvidia-dali-cuda120',
'webdataset',
],
extras_require={
'test': [
'pandas',
'pytest',
'pytest-xdist',
'Pillow'
],
'lint': [
'ruff',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: GPU',
'Environment :: GPU :: NVIDIA CUDA',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='machinelearning,multimodal,llm,jax-toolbox,jax,deep learning',
)
| JAX-Toolbox-main | rosetta/setup.py |
import pytest
def test_placeholder():
assert 1 == 1
@pytest.mark.integration
def test_integration_placeholder():
assert 1 == 1
| JAX-Toolbox-main | rosetta/rosetta/test_placeholder.py |
__version__ = '0.0.1.dev0'
| JAX-Toolbox-main | rosetta/rosetta/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections.abc import Callable
import jax
import jax.numpy as jnp
import optax
import pytest
from rosetta.projects.vit import config, models
from rosetta.projects.vit.layers import FlaxGViTForImageClassificationModule
from t5x import optimizers, utils
def small_vit_module(dtype):
conf = config.GoogleViTConfig(
hidden_size = 384,
num_hidden_layers = 12,
num_attention_heads = 6,
intermediate_size = 1536,
hidden_dropout_prob = 0.0,
attention_probs_dropout_prob = 0.0,
patch_size = 16,
encoder_stride = 16,
classifier = 'token',
## single linear layer for fine-tuning
representation_size = None,
num_classes = 1000,
dtype = dtype,
)
return FlaxGViTForImageClassificationModule(conf)
def timeit(fn: Callable, num_iter=1, warmup_iter=1):
"""Calculates avg wall clock time"""
for _ in range(warmup_iter):
fn()
start = time.time()
for _ in range(num_iter):
fn()
elapsed = time.time() - start
return elapsed / num_iter
@pytest.mark.manual
@pytest.mark.perf
def test_vit_bfloat16_speedup(rng):
vit_module_f32 = small_vit_module(dtype=jnp.float32)
vit_module_bf16 = small_vit_module(dtype=jnp.bfloat16)
vit_model_f32 = models.ViTModel(module=vit_module_f32, optimizer_def=None)
vit_model_bf16 = models.ViTModel(module=vit_module_bf16, optimizer_def=None)
state = vit_model_bf16.get_initial_variables(
rng,
input_shapes={
'images': (32, 224, 224, 3),
},
input_types={
'images': jnp.float32,
},
)
params = state['params']
batch = {
'images': jnp.ones((32, 224, 224, 3), jnp.float32),
'labels': jnp.zeros((32, 1000), dtype=jnp.float32),
}
times = {}
baseline_jit = None
for model_name in ('vit_model_f32', 'vit_model_bf16'):
header = f'{model_name}'
model = locals()[model_name]
jitted_call = jax.jit(model.loss_fn)
jit_time = timeit(lambda: jitted_call(params, batch, rng)[0].block_until_ready())
times[f'(jit) {header}'] = jit_time
if model_name == 'vit_model_f32':
baseline_jit = jit_time
print('======================')
print('==== FWD SUMMARY =====')
print('======================')
max_fwd_speedup = float('-inf')
for name, ttime in sorted(times.items(), key=lambda x: x[1]):
speedup = baseline_jit / ttime
max_fwd_speedup = max(max_fwd_speedup, speedup)
print(f'{ttime*1000:7.3f}ms ({speedup:3.1f}x) {name}')
assert max_fwd_speedup > 1.6
## dummy lr schedule
schedule = utils.create_learning_rate_scheduler(
factors='linear_decay',
base_learning_rate=0.0,
warmup_steps=100,
min_learning_rate=0.00001,
decay_factor=1e-6,
)
optimizer = optax.adamw(
learning_rate=schedule, weight_decay=0.02, b1=0.9, b2=0.999, eps=1e-8,
)
OPTIMIZER = optimizers.chain(transformations=[optax.clip_by_global_norm(max_norm=1.0), optimizer])
step_times = {}
step_baseline_jit = None
for model_name in ('vit_model_f32', 'vit_model_bf16'):
header = f'{model_name}'
model = locals()[model_name]
# optax stuff
optax_state = optimizer.init(params)
def loss_fn(params, batch, rng):
return model.loss_fn(params, batch, rng)[0]
def one_step(optax_state, params, batch, rng):
grads = jax.grad(loss_fn)(params, batch, rng)
updates, optax_state = optimizer.update(grads, optax_state, params)
params = optax.apply_updates(params, updates)
return params, updates
###
jitted_call = jax.jit(one_step)
jit_time = timeit(
lambda: jax.block_until_ready(jitted_call(optax_state, params, batch, rng)[0]),
)
step_times[f'(jit) {header}'] = jit_time
if model_name == 'vit_model_f32':
step_baseline_jit = jit_time
print('=======================')
print('==== STEP SUMMARY =====')
print('=======================')
max_step_speedup = float('-inf')
for name, ttime in sorted(step_times.items(), key=lambda x: x[1]):
speedup = step_baseline_jit / ttime
max_step_speedup = max(max_step_speedup, speedup)
print(f'{ttime*1000:7.3f}ms ({speedup:3.1f}x) {name}')
assert max_step_speedup > 1.6
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/models_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from jax import numpy as jnp
class GoogleViTConfig:
r"""
This is the configuration class to store the configuration of a [`ViTModel`]. It is used to instantiate an ViT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ViT
[google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) architecture.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
Supported activation functions can be found at https://jax.readthedocs.io/en/latest/jax.nn.html
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to `224`):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to `16`):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to `3`):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
encoder_stride (`int`, `optional`, defaults to 16):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
"""
model_type = 'google-vit'
def __init__(
self,
# parameters from ViTModel
hidden_size=768, # hidden_size
num_hidden_layers=12, # transformer.num_layers
num_attention_heads=12, # transformer.num_heads
intermediate_size=3072, # transformer.mlp_dim
hidden_act='gelu', # HF CLIPFlaxVisionModel uses quick_gelu, but google uses gelu_new (for mlp)
hidden_dropout_prob=0.0, # transformer.dropout_rate
attention_probs_dropout_prob=0.0, # transformer.attention_dropout_rate
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=224, # not in config, but it's the dimension they fine-tuned imagenet on
patch_size=16, #patches[0] == patches[1]
num_channels=3,
qkv_bias=True,
encoder_stride=16, # embeded in model name, i.e. Vit B/16
# Additional parameters from Google ViT
classifier: str = 'token',
head_bias_init: float = 0.0,
representation_size: int | None = None,
num_classes: int | None = None,
# pre_layernorm=True for parity with (HF's FlaxCLIPVisionModel), pre_layernorm=False for original Google ViT impl
pre_layernorm: bool = False,
dtype: Any = jnp.float32,
**kwargs,
):
if not qkv_bias:
raise NotImplementedError("FlaxViTModel supports this, but currently turning it off isn't supported")
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.encoder_stride = encoder_stride
# classifier: str = 'token' | 'token_unpooled' | 'unpooled' | 'gap' (token* = use trainable cls token | gap = return mean feature)
self.classifier = classifier
self.head_bias_init = head_bias_init
self.representation_size = representation_size
self.num_classes = num_classes
self.pre_layernorm = pre_layernorm
self.dtype = dtype
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.types as types
from nvidia.dali import fn, pipeline_def
from nvidia.dali.auto_aug import auto_augment
from rosetta.data.dali import BaseDALIPipeline
from rosetta.data.wds_utils import ModalityConfig
class ViTPipeline(BaseDALIPipeline):
## all pipelines are extpected to take wds_config, per_shard_batch_size into constructor
def __init__(self,
wds_config,
shard_id,
num_shards,
num_classes,
image_shape,
training=True,
):
self.num_classes = num_classes
self.image_shape = image_shape
modalities = [ModalityConfig(name='images',
ftype='jpg',
out_type='float32',
shape=self.image_shape),
ModalityConfig(name='labels',
ftype='cls',
out_type='float32',
shape=(self.num_classes,))]
super().__init__(wds_config=wds_config,
modalities=modalities,
shard_id=shard_id,
num_shards=num_shards,
training=training)
def get_wds_pipeline(self):
@pipeline_def(batch_size=self.per_shard_batch_size, num_threads=1, device_id=None, seed=self.seed)
def wds_vit_pipeline():
## assumes a particular order to the ftypes
img, clss = fn.readers.webdataset(
paths=self.urls,
index_paths=self.index_paths,
ext=[m.ftype for m in self.modalities],
missing_component_behavior='error',
random_shuffle=self.shuffle,
shard_id=self.shard_id,
num_shards=self.num_shards,
pad_last_batch=False if self.training else True)
return img, clss
return wds_vit_pipeline()
def non_image_preprocessing(self, raw_text, num_classes):
""" preprocessing of class labels. """
bs = len(raw_text.shape())
ascii = [np.asarray(raw_text[i]) for i in range(bs)]
one_hot = np.zeros((bs, num_classes))
for i, el in enumerate(ascii):
idx = int(bytes(el).decode('utf-8'))
one_hot[i][idx] = 1
return one_hot
def data_source(self, num_classes):
while True:
preprocessed_img, raw_text = self.pipe.run()
preprocessed_label = self.non_image_preprocessing(raw_text, num_classes)
yield preprocessed_img, preprocessed_label
def get_dali_pipeline(self):
## need to enable conditionals for auto-augment
@pipeline_def(batch_size=self.per_shard_batch_size, num_threads=self.num_workers, device_id=None, enable_conditionals=True, seed=self.seed, prefetch_queue_depth=self.prefetch)
def main_vit_pipeline():
jpegs, labels = fn.external_source(source=self.data_source(self.num_classes), num_outputs=2)
img = fn.decoders.image(jpegs, device='cpu', output_type=types.RGB)
if self.training:
img = fn.random_resized_crop(img, size=self.image_shape[:-1], seed=self.seed)
img = fn.flip(img, depthwise=0, horizontal=fn.random.coin_flip(seed=self.seed))
## color jitter
brightness = fn.random.uniform(range=[0.6,1.4], seed=self.seed)
contrast = fn.random.uniform(range=[0.6,1.4], seed=self.seed)
saturation = fn.random.uniform(range=[0.6,1.4], seed=self.seed)
hue = fn.random.uniform(range=[0.9,1.1], seed=self.seed)
img = fn.color_twist(img,
brightness=brightness,
contrast=contrast,
hue=hue,
saturation=saturation)
## auto-augment
## `shape` controls the magnitude of the translation operations
img = auto_augment.auto_augment_image_net(img, seed=self.seed)
else:
img = fn.resize(img, size=self.image_shape[:-1])
## normalize
## https://github.com/NVIDIA/DALI/issues/4469
mean = np.asarray([0.485, 0.456, 0.406])[None, None, :]
std = np.asarray([0.229, 0.224, 0.225])[None, None, :]
scale = 1 / 255.
img = fn.normalize(img,
mean=mean / scale,
stddev=std,
scale=scale,
dtype=types.FLOAT)
return img, labels
return main_vit_pipeline()
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/dali_utils.py |
# Copyright (c) 2023, The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Mapping
from typing import Any, Optional
import clu.metrics as clu_metrics
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from flax.core import scope as flax_scope
from rosetta.projects.vit.layers import FlaxGViTForImageClassificationModule
from t5x import metrics as metrics_lib
from t5x import optimizers
from t5x.models import BaseModel
_ShardedDeviceArray = Any
Array = np.ndarray | jnp.ndarray | _ShardedDeviceArray | tf.Tensor
MetricsMap = metrics_lib.MetricsMap
PyTreeDef = type(jax.tree_util.tree_structure(None))
class ViTModel(BaseModel):
FEATURE_CONVERTER_CLS = None
def __init__(
self,
module: FlaxGViTForImageClassificationModule,
optimizer_def: optimizers.OptimizerDefType,
):
super().__init__(optimizer_def)
self.module = module
def loss_fn(
self, params: PyTreeDef,
batch: Mapping[str, jnp.ndarray],
dropout_rng: jax.random.KeyArray | None,
flax_mutables: Optional[PyTreeDef] = None,
) -> tuple[jnp.ndarray, MetricsMap]:
"""Computes loss and metrics.
Args:
params: model parameters.
batch: a batch of inputs.
dropout_rng: rng to use for dropout, or None for deterministic mode.
Returns:
loss: the loss computed for the given inputs and parameters.
aux:
weight_sum: sum of the per-token weights applied to the loss.
metrics: a mapping of metrics computed for this batch.
"""
assert not flax_mutables, "ViT currently does not support 'flax_mutables'"
if dropout_rng is not None:
dropout_rng = {'dropout': dropout_rng}
logits = self.module.apply(
{'params': params},
rngs=dropout_rng,
pixel_values=batch['images'],
## train == not deterministic
deterministic=(dropout_rng is None))
def cross_entropy_loss(*, logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * labels, axis=1))
labels = batch['labels']
loss = cross_entropy_loss(logits=logits, labels=labels)
labels = jnp.argmax(labels, axis=-1).astype(jnp.int32)
metrics = self._compute_metrics(
logits=logits,
targets=labels,
loss=loss)
return loss, metrics
def eval_fn(
self,
params: PyTreeDef,
batch: Mapping[str, jnp.ndarray],
) -> tuple[jnp.ndarray, MetricsMap]:
return self.loss_fn(params, batch, dropout_rng=None)
def _compute_metrics(
self,
logits: jnp.ndarray,
targets: jnp.ndarray,
loss: jnp.ndarray,
) -> MetricsMap:
return compute_base_metrics_vit(
logits=logits, targets=targets, loss=loss)
def predict_batch_with_aux(
self,
params: PyTreeDef,
batch: Mapping[str, jnp.ndarray],
rng: jax.random.KeyArray | None = None,
flax_mutables: Optional[PyTreeDef] = None,
) -> tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
"""Predict a batch from the modelwith auxiliary outputs.
Args:
params: model parameters.
batch: a batch of inputs.
rng: an optional RNG key to use during prediction (e.g., for decoding).
Returns:
predictions: the model predictions
aux: auxiliary data
"""
assert not flax_mutables, "ViT currently does not support 'flax_mutables'"
logits = self.module.apply(
{'params': params},
rngs=rng,
pixel_values=batch['images'],
## train == not deterministic
deterministic=True)
predictions = jnp.argmax(logits, axis=-1)
return predictions, None
def score_batch(
self,
params: PyTreeDef,
batch: Mapping[str, jnp.ndarray],
return_intermediates: bool = False,
flax_mutables: Optional[PyTreeDef] = None,
) -> jnp.ndarray:
"""Compute log likelihood score on a batch."""
assert not return_intermediates, '`return_intermediates` is not supported'
assert not flax_mutables, "ViT currently does not support 'flax_mutables'"
logits = self.module.apply(
{'params': params},
rngs=None,
pixel_values=batch['images'],
deterministic=True)
logp = jax.nn.log_softmax(logits)
labels = batch['labels'].astype(jnp.int32)
sequence_scores = jnp.sum(logp * labels, axis=1)
return sequence_scores
def get_metrics_per_batch(
self,
params: PyTreeDef,
batch: Mapping[str, jnp.ndarray],
) -> jnp.ndarray:
"""Computes evaluation metrics for a batch.
Returns: dict mapping metric name to per-example metric value. """
logits = self.module.apply(
{'params': params},
rngs=None,
pixel_values=batch['images'],
deterministic=True)
logp = jax.nn.log_softmax(logits)
labels = batch['labels'].astype(jnp.int32)
loss = -jnp.sum(logp * labels, axis=1)
labels = jnp.argmax(labels, axis=-1).astype(jnp.int32)
accuracy = (jnp.argmax(logits, axis=-1) == labels)
return {'loss': loss, 'accuracy': accuracy}
def get_initial_variables(
self,
rng: jax.random.KeyArray,
input_shapes: Mapping[str, Array],
input_types: Mapping[str, jnp.dtype] | None = None,
flax_mutables: Optional[PyTreeDef] = None,
) -> flax_scope.FrozenVariableDict:
assert not flax_mutables, "ViT currently does not support 'flax_mutables'"
return self.module.init(
rng,
jnp.ones(input_shapes['images'], dtype=input_types['images']),
deterministic=True)
def compute_base_metrics_vit(
logits: jnp.ndarray,
targets: jnp.ndarray,
loss: jnp.ndarray,
) -> MetricsMap:
"""Compute summary metrics.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array of categories.
values (float-valued weights not supported).
loss: loss (float)
for packing, i.e. [batch, length] arrays.
Returns:
Dict of metrics.
"""
num_examples = targets.shape[0]
num_devices = jax.device_count()
metrics = {
'accuracy':
clu_metrics.Accuracy.from_model_output(
logits=logits, labels=targets),
'loss':
metrics_lib.AveragePerStep(total=loss),
'timing/samples_per_second':
metrics_lib.TimeRate.from_model_output(numerator=num_examples),
'timing/steps_per_second':
metrics_lib.StepsPerTime.from_model_output(),
'timing/seconds':
metrics_lib.Time(),
'timing/samples':
metrics_lib.Sum(num_examples),
'timing/samples_per_second_per_core':
metrics_lib.TimeRate.from_model_output(numerator=num_examples /
num_devices),
}
return metrics
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/models.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow_datasets as tfds
from rosetta.data import dali, wds_utils
from rosetta.projects.vit.dali_utils import ViTPipeline
def iter_per_sec(dataset, batch_size: int = 1, num_iter: int | None = None):
"""
Example Stats:
duration num_examples avg
first+lasts 0.169234 15 88.634839
first 0.103241 3 29.058269
lasts 0.065993 12 181.837903
"""
return tfds.benchmark(dataset, num_iter=num_iter, batch_size=batch_size).stats['avg']['lasts']
@pytest.mark.perf
@pytest.mark.data
def test_baseline_dali_iteration_stats(
dummy_wds_metadata,
):
"""Computes dataset stats for a batched raw webdataset with cls/img elements"""
img_shape = (dummy_wds_metadata.image_size, dummy_wds_metadata.image_size, dummy_wds_metadata.channels)
config = wds_utils.WebDatasetConfig(
urls=dummy_wds_metadata.path,
batch_size=dummy_wds_metadata.batch_size,
shuffle=False,
seed=0,
num_parallel_processes=1,
)
ds_shard_id = 0
num_ds_shards = 1
dataset = iter(dali.DALIIterator(ViTPipeline(config,
ds_shard_id,
num_ds_shards,
num_classes=dummy_wds_metadata.num_classes,
image_shape=img_shape)))
bps = iter_per_sec(dataset, batch_size=dummy_wds_metadata.batch_size, num_iter=500)
assert bps > 170
def test_dali_cls_preprocessing(dummy_wds_metadata):
config = wds_utils.WebDatasetConfig(
urls=dummy_wds_metadata.path,
batch_size=dummy_wds_metadata.batch_size,
shuffle=False,
seed=0,
num_parallel_processes=1,
)
img_shape = (dummy_wds_metadata.image_size, dummy_wds_metadata.image_size, dummy_wds_metadata.channels)
ds_shard_id = 0
num_ds_shards = 1
dataset = iter(dali.DALIIterator(
ViTPipeline(config,
ds_shard_id,
num_ds_shards,
num_classes=dummy_wds_metadata.num_classes,
image_shape=img_shape)))
batch = next(dataset)
class_labels = np.argmax(batch['labels'], -1)
assert(np.array_equal(class_labels, np.arange(4)))
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/dali_utils_test.py |
JAX-Toolbox-main | rosetta/rosetta/projects/vit/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from t5x import state_utils
def just_states_transform(checkpoint, opt_state, is_resuming: bool = False):
## unused
del is_resuming
return state_utils.apply_assignment_map(checkpoint, opt_state,
assignment_map=[(r'state.*', None)])
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/utils.py |
# Copyright 2022 Google LLC.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import asdict, dataclass
from typing import Any
import flax.linen as nn
import jax.numpy as jnp
from flax.linen.partitioning import param_with_axes
from rosetta.projects.vit.config import GoogleViTConfig
from t5x.examples.t5.layers import _convert_to_activation_function
Array = Any
PRNGKey = Any
Shape = tuple[int]
Dtype = Any
class IdentityLayer(nn.Module):
"""Identity layer, convenient for giving a name to an array."""
@nn.compact
def __call__(self, x):
return x
class AddPositionEmbs(nn.Module):
"""Adds learned positional embeddings to the inputs.
Attributes:
posemb_init: positional embedding initializer.
"""
posemb_init: Callable[[PRNGKey, Shape, Dtype], Array]
dtype: Dtype = jnp.float32
@nn.compact
def __call__(self, inputs):
"""Applies the AddPositionEmbs module.
Args:
inputs: Inputs to the layer.
Returns:
Output tensor with shape `(bs, timesteps, in_dim)`.
"""
# inputs.shape is (batch_size, seq_len, emb_dim).
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
pos_emb_shape = (inputs.shape[1], inputs.shape[2])
pe = param_with_axes('pos_embedding', self.posemb_init, pos_emb_shape, jnp.float32, axes=('length', 'abspos_buckets'))
pe = jnp.asarray(pe, self.dtype)
return inputs + pe
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block."""
mlp_dim: int
dtype: Dtype = jnp.float32
out_dim: int | None = None
dropout_rate: float = 0.1
kernel_init: Callable[[PRNGKey, Shape, Dtype],
Array] = nn.initializers.xavier_uniform()
bias_init: Callable[[PRNGKey, Shape, Dtype],
Array] = nn.initializers.normal(stddev=1e-6)
hidden_act: str = 'gelu_new'
@nn.compact
def __call__(self, inputs, *, deterministic):
"""Applies Transformer MlpBlock module."""
actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
x = nn.Dense(
features=self.mlp_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
kernel_axes=('embed', 'mlp'),
bias_axes=('mlp',))( # pytype: disable=wrong-arg-types
inputs)
x = _convert_to_activation_function(self.hidden_act)(x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
output = nn.Dense(
features=actual_out_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
kernel_axes=('mlp', 'embed'),
bias_axes=('embed',))( # pytype: disable=wrong-arg-types
x)
output = nn.Dropout(
rate=self.dropout_rate)(
output, deterministic=deterministic)
return output
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
inputs: input data.
mlp_dim: dimension of the mlp on top of attention block.
dtype: the dtype of the computation (default: float32).
dropout_rate: dropout rate.
attention_dropout_rate: dropout for attention heads.
deterministic: bool, deterministic or not (to apply dropout).
num_heads: Number of heads in nn.MultiHeadDotProductAttention
"""
mlp_dim: int
num_heads: int
dtype: Dtype = jnp.float32
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
hidden_act: str = 'gelu_new'
layer_norm_eps: float = 1e-6
@nn.compact
def __call__(self, inputs, *, deterministic):
"""Applies Encoder1DBlock module.
Args:
inputs: Inputs to the layer.
deterministic: Dropout will not be applied when set to true.
Returns:
output after transformer encoder block.
"""
# Attention block.
assert inputs.ndim == 3, f'Expected (batch, seq, hidden) got {inputs.shape}'
x = nn.LayerNorm(epsilon=self.layer_norm_eps,
dtype=self.dtype,
pjit_axis_name=('embed',))(inputs)
x = nn.MultiHeadDotProductAttention(
dtype=self.dtype,
kernel_init=nn.initializers.xavier_uniform(),
broadcast_dropout=False,
deterministic=deterministic,
dropout_rate=self.attention_dropout_rate,
num_heads=self.num_heads,
in_proj_kernel_axes=('embed', 'heads', 'kv'),
in_proj_bias_axes=('heads', 'kv'),
out_proj_kernel_axes=('heads', 'kv', 'embed'),
out_proj_bias_axes=('embed',),
decode_axes=('batch', 'length', 'heads', 'kv'))(
x, x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(epsilon=self.layer_norm_eps,
dtype=self.dtype,
pjit_axis_name=('embed',))(x)
y = MlpBlock(
mlp_dim=self.mlp_dim, dtype=self.dtype, dropout_rate=self.dropout_rate, hidden_act=self.hidden_act)(
y, deterministic=deterministic)
return x + y
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
num_layers: number of layers
mlp_dim: dimension of the mlp on top of attention block
num_heads: Number of heads in nn.MultiHeadDotProductAttention
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate in self attention.
"""
num_layers: int
mlp_dim: int
num_heads: int
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
add_position_embedding: bool = True
dtype: Dtype = jnp.float32
hidden_act: str = 'gelu_new'
pre_layernorm: bool = False
layer_norm_eps: float = 1e-6
@nn.compact
def __call__(self, x, *, train):
"""Applies Transformer model on the inputs.
Args:
x: Inputs to the layer.
train: Set to `True` when training.
Returns:
output of a transformer encoder.
"""
assert x.ndim == 3 # (batch, len, emb)
if self.add_position_embedding:
x = AddPositionEmbs(
posemb_init=nn.initializers.normal(stddev=0.02), # from BERT.
dtype=self.dtype,
name='posembed_input')(
x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
if self.pre_layernorm:
x = nn.LayerNorm(epsilon=self.layer_norm_eps,
dtype=self.dtype,
pjit_axis_name=('embed',),
name='pre_layernorm')(x)
# Input Encoder
for lyr in range(self.num_layers):
x = Encoder1DBlock(
mlp_dim=self.mlp_dim,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
name=f'encoderblock_{lyr}',
num_heads=self.num_heads,
dtype=self.dtype,
hidden_act=self.hidden_act,
layer_norm_eps=self.layer_norm_eps)(
x, deterministic=not train)
encoded = nn.LayerNorm(epsilon=self.layer_norm_eps,
dtype=self.dtype,
pjit_axis_name=('embed',),
name='encoder_norm')(x)
return encoded
@dataclass
class TransformerEncoderSubConfig:
mlp_dim: int
num_heads: int
num_layers: int
attention_dropout_rate: float
dropout_rate: float
hidden_act: str = 'gelu_new'
pre_layernorm: bool = False
layer_norm_eps: float = 1e-6
@dataclass
class ResnetConfig:
width_factor: int | None
num_layers: list[int] | None
@dataclass
class PatchesConfig:
size: tuple[int, ...]
def VisionTransformer(config: GoogleViTConfig, name=None):
transformer = None
if config.intermediate_size or config.num_attention_heads \
or config.num_hidden_layers or config.attention_probs_dropout_prob \
or config.hidden_dropout_prob:
transformer = TransformerEncoderSubConfig(
config.intermediate_size,
config.num_attention_heads,
config.num_hidden_layers,
config.attention_probs_dropout_prob,
config.hidden_dropout_prob,
config.hidden_act,
config.pre_layernorm,
config.layer_norm_eps,
)
return _VisionTransformer(
patches=PatchesConfig((config.patch_size, config.patch_size)),
transformer=transformer,
hidden_size=config.hidden_size,
representation_size=config.representation_size,
classifier=config.classifier,
dtype=config.dtype,
name=name,
)
class _VisionTransformer(nn.Module):
"""VisionTransformer original, without the classification head"""
patches: Any
transformer: Any
hidden_size: int
representation_size: int | None = None
classifier: str = 'token'
encoder: type[nn.Module] = Encoder
dtype: Dtype = jnp.float32
@nn.compact
def __call__(self,
pixel_values,
*,
deterministic,
return_dict=True):
train = not deterministic
x = pixel_values
### convert x to the appropriate dtype
x = jnp.asarray(x, self.dtype)
n, h, w, c = x.shape
# We can merge s2d+emb into a single conv; it's the same.
x = nn.Conv(
features=self.hidden_size,
kernel_size=self.patches.size,
strides=self.patches.size,
dtype=self.dtype,
kernel_axes=('height', 'width', 'input', 'embed'),
bias_axes=('embed', ),
padding='VALID',
name='embedding')(
x)
# Here, x is a grid of embeddings.
# (Possibly partial) Transformer.
if self.transformer is not None:
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# If we want to add a class token, add it here.
if self.classifier in ['token', 'token_unpooled']:
cls = param_with_axes('cls', nn.initializers.zeros, (c,), jnp.float32, axes=('embed',))
cls = jnp.asarray(cls, self.dtype)
cls = jnp.tile(cls, [n, 1, 1])
x = jnp.concatenate([cls, x], axis=1)
x = Encoder(name='Transformer', dtype=self.dtype, **asdict(self.transformer))(x, train=train)
last_hidden_state = x
if self.classifier == 'token':
x = x[:, 0]
elif self.classifier == 'gap':
x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2)
elif self.classifier in ['unpooled', 'token_unpooled']:
pass
else:
raise ValueError(f'Invalid classifier={self.classifier}')
if self.representation_size is not None:
x = nn.Dense(features=self.representation_size, dtype=self.dtype, name='pre_logits', kernel_axes=('embed', 'mlp'), bias_axes=('mlp',))(x)
x = nn.tanh(x)
else:
x = IdentityLayer(name='pre_logits')(x)
return last_hidden_state, x
class VisionTransformerForImageClassification(nn.Module):
"""
VisionTransformer with additional mlp(dense+tanh) and a final dense layer for classification
In the original implementation, this was part of VisionTransformer, but it is separated here to be pluggable with CLIP
"""
config: GoogleViTConfig
@nn.compact
def __call__(self,
pixel_values,
*,
deterministic):
outputs = VisionTransformer(self.config, name='VisionTransformer')(
pixel_values,
deterministic=deterministic,
)
x = outputs[1]
if self.config.num_classes:
x = nn.Dense(
features=self.config.num_classes,
name='head',
kernel_init=nn.initializers.zeros,
bias_init=nn.initializers.zeros,
kernel_axes=('mlp', 'vocab'), bias_axes=('vocab',),
dtype=self.config.dtype)(x)
return x
class FlaxGViTModule(nn.Module):
config: GoogleViTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.vision_model = VisionTransformer(self.config)
def __call__(
self,
pixel_values,
deterministic: bool = True,
):
return self.vision_model(
pixel_values=pixel_values,
deterministic=deterministic,
)
class FlaxGViTForImageClassificationModule(nn.Module):
config: GoogleViTConfig
def setup(self):
self.vision_model = VisionTransformerForImageClassification(self.config)
def __call__(
self,
pixel_values,
deterministic: bool = True,
):
return self.vision_model(
pixel_values=pixel_values,
deterministic=deterministic,
)
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pytest
def print_output(stdout, stderr):
def box_print(text):
n_len = len(text)
print('='*(n_len + 4))
print(f'= {text} =')
print('='*(n_len + 4))
box_print('stdout')
print(stdout.decode())
box_print('stderr')
print(stderr.decode())
@pytest.mark.integration
def test_small_vit_train_on_dummy_data(dummy_wds_metadata, run_subprocess_blocking, package_root_dir, tmp_path):
tmp_model_dir = str(tmp_path)
stdout, stderr, returncode = run_subprocess_blocking(
sys.executable, '-m',
't5x.train',
'--gin_file=rosetta/projects/vit/configs/tests/small_pretrain_dummy.gin',
'--gin.TRAIN_STEPS=100',
f'--gin.MIXTURE_OR_TASK_NAME="{dummy_wds_metadata.path}"',
f'--gin.MODEL_DIR="{tmp_model_dir}"',
'--gin.DTYPE="bfloat16"',
'--gin.BATCH_SIZE=4',
'--gin.train.stats_period=100',
'--gin.trainer.Trainer.num_microbatches=0',
'--gin_search_paths=/opt/rosetta',
env={'CUDA_VISIBLE_DEVICES': '0'},
)
print_output(stdout, stderr)
assert returncode == 0
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/integration_test.py |
# Copyright 2022 Google LLC.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections.abc import Sequence
import flax
import jax
import jax.numpy as jnp
import numpy as np
import scipy
from absl import logging
from rosetta.projects.vit import models
from t5x import checkpoints, partitioning, utils
_DEFAULT_GIN_SEARCH_PATHS = [
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
]
#### from https://github.com/google-research/vision_transformer/blob/62a446f1b3bb9e470db5689bfd7407a8d91bae8a/vit_jax/checkpoint.py
def interpolate_posembed(posemb, num_tokens: int, has_class_token: bool):
"""Interpolate given positional embedding parameters into a new shape.
Args:
posemb: positional embedding parameters.
num_tokens: desired number of tokens.
has_class_token: True if the positional embedding parameters contain a
class token.
Returns:
Positional embedding parameters interpolated into the new shape.
"""
assert posemb.shape[0] == 1
if has_class_token:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
num_tokens -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0, 0:]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(num_tokens))
logging.info('interpolate_posembed: grid-size from %s to %s', gs_old, gs_new)
assert gs_old ** 2 == len(posemb_grid), f'{gs_old ** 2} != {len(posemb_grid)}'
assert gs_new ** 2 == num_tokens, f'{gs_new ** 2} != {num_tokens}'
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = scipy.ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
return jnp.array(np.concatenate([posemb_tok, posemb_grid], axis=1))
def convert_t5x_finetune_to_pretrain(
pretrain_ckpt_dir: str,
finetune_ckpt_dir: str,
pretrained_model: models.ViTModel,
finetune_resolution: int,
partitioner: partitioning.PjitPartitioner,
):
pt_input_shapes = {'images': (1, pretrained_model.module.config.image_size, pretrained_model.module.config.image_size, 3)}
input_dtypes = {'images': jnp.float32}
pt_train_state_initializer = utils.TrainStateInitializer(
optimizer_def=None, # Do not load optimizer state.
init_fn=pretrained_model.get_initial_variables,
input_shapes=pt_input_shapes,
input_types=input_dtypes,
partitioner=partitioner)
# Start by filling t5x state with initialized model
pt_init_ts = pt_train_state_initializer.from_scratch(jax.random.PRNGKey(0))
pt_checkpointer = checkpoints.Checkpointer(
pt_init_ts,
partitioner,
pretrain_ckpt_dir,
)
restored_state = pt_checkpointer.restore()
ft_input_shapes = {'images': (1, finetune_resolution, finetune_resolution, 3)}
utils.TrainStateInitializer(
optimizer_def=None, # Do not load optimizer state.
init_fn=pretrained_model.get_initial_variables,
input_shapes=ft_input_shapes,
input_types=input_dtypes,
partitioner=partitioner)
ft_init_ts = pt_train_state_initializer.from_scratch(jax.random.PRNGKey(0))
pt_posemb = restored_state.params['vision_model']['VisionTransformer']['Transformer']['posembed_input']['pos_embedding']
pt_posemb = jnp.expand_dims(pt_posemb, 0)
new_shape = 1 + (finetune_resolution // pretrained_model.module.config.patch_size)**2
ft_posemb = interpolate_posembed(
pt_posemb, new_shape, has_class_token=True)
ft_posemb = jnp.squeeze(ft_posemb, 0)
pt_params = restored_state.params.unfreeze()
pt_params['vision_model']['VisionTransformer']['Transformer']['posembed_input']['pos_embedding'] = ft_posemb
## drop head
pt_params['vision_model']['VisionTransformer']['pre_logits'] = {}
pt_params['vision_model']['head'] = ft_init_ts.params['vision_model']['head']
ft_init_ts = ft_init_ts.replace_params(pt_params)
ft_checkpointer = checkpoints.Checkpointer(
ft_init_ts,
partitioner,
finetune_ckpt_dir,
)
ft_checkpointer.save(ft_init_ts)
print(f'Saved to {finetune_ckpt_dir}')
# Verify that the state transition worked
flat_ft_params = flax.traverse_util.flatten_dict(ft_init_ts.params, sep='/')
flat_pt_params = flax.traverse_util.flatten_dict(restored_state.params, sep='/')
for n in flat_ft_params.keys():
if 'vision_model/head' in n or 'posembed_input' in n:
continue
np.testing.assert_allclose(flat_ft_params[n], flat_pt_params[n])
### MLP head should be reinitialized from scratch
if 'vision_model/head' in n:
np.testing.assert_allclose(flat_ft_params[n], np.zeros_like(flat_ft_params[n]))
if __name__ == '__main__':
# pylint: disable=g-import-not-at-top
import gin
from absl import app, flags
from t5x import gin_utils
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
jax.config.parse_flags_with_absl()
flags.DEFINE_multi_string(
'gin_file',
default=None,
help='Path to gin configuration file. Multiple paths may be passed and '
'will be imported in the given order, with later configurations '
'overriding earlier ones.')
flags.DEFINE_multi_string(
'gin_bindings', default=[], help='Individual gin bindings.')
flags.DEFINE_list(
'gin_search_paths',
default=['.'],
help='Comma-separated list of gin config path prefixes to be prepended '
'to suffixes given via `--gin_file`. If a file appears in. Only the '
'first prefix that produces a valid path for each suffix will be '
'used.')
def main(argv: Sequence[str]):
"""Wrapper for pdb post mortems."""
_main(argv)
def _main(argv: Sequence[str]):
"""True main function."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
save_using_gin = gin.configurable(convert_t5x_finetune_to_pretrain)
gin_utils.parse_gin_flags(
# User-provided gin paths take precedence if relative paths conflict.
FLAGS.gin_search_paths + _DEFAULT_GIN_SEARCH_PATHS,
FLAGS.gin_file,
FLAGS.gin_bindings)
save_using_gin()
jax.effects_barrier()
gin_utils.run(main)
| JAX-Toolbox-main | rosetta/rosetta/projects/vit/scripts/convert_t5x_pretrain_to_finetune_ckpt.py |
JAX-Toolbox-main | rosetta/rosetta/projects/vit/scripts/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.types as types
import pytest
from nvidia.dali import fn, pipeline_def
from rosetta.data import wds_utils
from rosetta.data.dali import BaseDALIPipeline
class DummyPipeline(BaseDALIPipeline):
def __init__(self,
wds_config,
shard_id,
num_shards,
num_classes,
image_shape,
):
modalities = [
wds_utils.ModalityConfig(
name='image',
ftype='jpg',
out_type='float32',
shape=image_shape,
),
wds_utils.ModalityConfig(
name='label',
ftype='cls',
out_type='int',
shape=(num_classes,),
),
]
super().__init__(wds_config=wds_config,
modalities=modalities,
shard_id=shard_id,
num_shards=num_shards,
training=False)
def get_wds_pipeline(self):
@pipeline_def(batch_size=self.per_shard_batch_size, num_threads=1, device_id=None)
def wds_pipeline():
img, clss = fn.readers.webdataset(
paths=self.urls,
index_paths=self.index_paths,
ext=[m.ftype for m in self.modalities],
missing_component_behavior='error',
random_shuffle=self.shuffle,
shard_id=self.shard_id,
num_shards=self.num_shards,
pad_last_batch=False)
return img, clss
return wds_pipeline()
## non-image preprocessing
def class_preproc(self, raw_text):
bs = len(raw_text.shape())
ascii = [np.asarray(raw_text[i]) for i in range(bs)]
labels = np.zeros((bs, ))
for i, el in enumerate(ascii):
idx = int(bytes(el).decode('utf-8'))
labels[i] = idx
return labels
def data_source(self):
while True:
img, clss = self.pipe.run()
clss = self.class_preproc(clss)
yield img, clss
def get_dali_pipeline(self):
@pipeline_def(batch_size=self.per_shard_batch_size, num_threads=self.num_workers, device_id=None)
def main_pipeline():
img, labels = fn.external_source(source=self.data_source(), num_outputs=2)
img = fn.decoders.image(img, device='cpu', output_type=types.RGB)
return img, labels
return main_pipeline()
@pytest.mark.data
def test_baseline_dali_singleprocess_output(
dummy_wds_metadata,
):
img_shape = (dummy_wds_metadata.image_size, dummy_wds_metadata.image_size, dummy_wds_metadata.channels)
config = wds_utils.WebDatasetConfig(
urls=dummy_wds_metadata.path,
batch_size=dummy_wds_metadata.batch_size,
shuffle=False,
seed=0,
)
ds_shard_id = 0
num_ds_shards = 1
pipe = DummyPipeline(config,
ds_shard_id,
num_ds_shards,
dummy_wds_metadata.num_classes,
img_shape).get_dali_pipeline()
pipe.build()
labels = []
for _ in range(2):
img, lab = pipe.run()
labels.extend(lab.as_array())
assert labels == list(range(8))
@pytest.mark.data
def test_baseline_dali_multiprocess_output(
dummy_wds_metadata,
):
img_shape = (dummy_wds_metadata.image_size, dummy_wds_metadata.image_size, dummy_wds_metadata.channels)
config = wds_utils.WebDatasetConfig(
urls=dummy_wds_metadata.path,
batch_size=dummy_wds_metadata.batch_size,
shuffle=False,
seed=0,
)
ds_shard_id = 0
num_ds_shards = 2
first_proc_pipe = DummyPipeline(config,
ds_shard_id,
num_ds_shards,
dummy_wds_metadata.num_classes,
img_shape).get_dali_pipeline()
first_proc_pipe.build()
labels = []
source = []
for _ in range(2):
img, lab = first_proc_pipe.run()
labels.extend(lab.as_array())
source += [l.source_info() for l in img]
assert labels == list(range(4))
assert (source[i].endswith(f'sample00000{i}.jpg') for i in range(len(source)))
ds_shard_id = 1
second_proc_pipe = DummyPipeline(config,
ds_shard_id,
num_ds_shards,
dummy_wds_metadata.num_classes,
img_shape).get_dali_pipeline()
second_proc_pipe.build()
labels = []
source = []
for _ in range(2):
img, lab = second_proc_pipe.run()
labels.extend(lab.as_array())
source += [l.source_info() for l in img]
assert (source[i].endswith(f'sample00000{40+i}.jpg') for i in range(len(source)))
| JAX-Toolbox-main | rosetta/rosetta/data/dali_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def test_generated_indices(
dummy_wds_metadata,
run_subprocess_blocking,
tmp_path,
):
index_dir = f'{tmp_path}/indices'
stdout, stderr, returncode = run_subprocess_blocking(
sys.executable, '-m',
'rosetta.data.generate_wds_indices',
f'--archive={dummy_wds_metadata.path}',
f'--index_dir={index_dir}')
files = os.listdir(index_dir)
print(index_dir)
## one index file per wds tar file
assert len(files)==1
with open(os.path.join(index_dir, files[0])) as test_file:
lines = test_file.readlines()
assert len(lines) == dummy_wds_metadata.num_examples+1
first_example = lines[1].split()
### 4 entries per modality, 3 modalities (cls, jpg, txt)
assert len(first_example) == 12
final_example = lines[-1].split()
assert final_example[0] == 'cls'
assert final_example[3] == 'sample000079.cls'
| JAX-Toolbox-main | rosetta/rosetta/data/generate_wds_indices_test.py |
JAX-Toolbox-main | rosetta/rosetta/data/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from typing import Any
# -----------------------------------------------------------------------------
# Configurations
# -----------------------------------------------------------------------------
@dataclasses.dataclass
class ModalityConfig:
"""Information about a particular modality present in the dataset.
Note that a 1-1 mapping is expected between modalities and features,
such that a single modality maps to a single feature in the dataset.
Attributes:
name: Desired name of the feature to be created using this modality.
ftype: Extension of the files names corresponding to this modality .
(e.g. 'jpg', 'png', 'cls', 'txt')
out_type: Expected return type for this feature.
shape: Expected output shape for this feature.
"""
name: str | None
ftype: str | None
out_type: tuple[Any]
shape: tuple[int]
@dataclasses.dataclass
class WebDatasetConfig:
"""Configuration for loading a WebDataset
Attributes:
urls: String with the path to the webdataset tar files. A sequence of tar files
can be specified using braceexpand notation.
batch_size: Global batch size.
seed: Dataloading seed.
index_dir: Path to the index files corresponding to the webdataset. Index files can be
generated using `generate_wds_indices.py`.
prefetch: Prefetch depth of the dataloading pipeline
shuffle: Whether to shuffle the data
num_parallel_processes: Number of CPU threads used for the dataloading pipeline.
"""
urls: str
batch_size: int
seed: int | None
index_dir: str | None = None
prefetch: int = 2 # prefetch buffer size
shuffle: bool = True
num_parallel_processes: int = 16
| JAX-Toolbox-main | rosetta/rosetta/data/wds_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import wds2idx
from absl import logging
from braceexpand import braceexpand
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='A script to generate the webdataset index files to be used during data loading.',
)
parser.add_argument('--archive', help='path to .tar files.')
parser.add_argument(
'--index_dir',
help='location to store index files',
)
args = parser.parse_args()
## make sure to generate train and eval inidces separately
os.makedirs(args.index_dir, exist_ok=True)
urls = list(braceexpand(args.archive))
for (i, url) in enumerate(urls):
creator = wds2idx.IndexCreator(url, os.path.join(args.index_dir, f'idx_{i}.txt'))
creator.create_index()
creator.close()
logging.info(f'Done! Index files written to {args.index_dir}.')
| JAX-Toolbox-main | rosetta/rosetta/data/generate_wds_indices.py |
# Copyright (c) 2023, The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import re
import threading
import time
from typing import Any
import jax
import numpy as np
from braceexpand import braceexpand
from clu import asynclib
from clu.data.dataset_iterator import ArraySpec, Element, ElementSpec
from jax.experimental import multihost_utils
from rosetta.data import wds_utils
from t5x import partitioning, utils
PyTree = Any
def type_proc(dtype:str):
if dtype == 'float32':
return np.float32
elif dtype == 'int':
return np.int32
elif dtype == 'float16':
return np.float16
elif dtype == 'bfloat16':
return jax.numpy.bfloat16
else:
raise ValueError('Could not parse dtype: %s' % dtype)
class BaseDALIPipeline(abc.ABC):
def __init__(self,
wds_config: wds_utils.WebDatasetConfig,
modalities: wds_utils.ModalityConfig,
shard_id: int,
num_shards: int,
training: bool=True):
"""Abstract class for defining a DALI pipeline for t5x models.
Attributes:
wds_config: a WebDatasetConfig instance containing the dataloading configuration. See `wds_utils.py` for more information
modalities: a ModalityConfig instance containing information about the modalities present in the dataset. See `wds_utils.py` for more information
shard_id: dataset shard index
num_shards: number of dataset shards
training: whether data is being loaded in training or evaluation mode.
"""
index_dir = wds_config.index_dir
index_paths = [os.path.join(index_dir, f) for f in os.listdir(index_dir)] if index_dir else None
self.index_paths = sorted(index_paths, key=lambda x: int(re.split('_|\\.', x)[-2])) if index_paths else None
self.urls = list(braceexpand(wds_config.urls))
self.modalities = modalities
self.shard_id = shard_id
self.num_shards = num_shards
self.seed = wds_config.seed
self.per_shard_batch_size = wds_config.batch_size // num_shards
self.shuffle = wds_config.shuffle
self.num_workers = wds_config.num_parallel_processes
self.prefetch = wds_config.prefetch
self.training = training
## set up the wds reader
self.pipe = self.get_wds_pipeline()
self.pipe.build()
## dataset metadata
meta_dict = self.pipe.reader_meta()
assert(len(meta_dict) == 1), 'Pipeline has multiple readers but is expected to have only one'
self.meta = list(meta_dict.values())[0]
@abc.abstractmethod
def get_wds_pipeline(self):
"""Returns the pipeline which loads the wds files.
Expected to have the following format:
@pipeline_def(batch_size=self.per_shard_batch_size, num_threads=1, device_id=None)
def wds_pipeline():
outputs = fn.readers.webdataset(
...)
return outputs
return wds_pipeline()
See ViT's `dali_utils.py` for an example
"""
pass
@abc.abstractmethod
def get_dali_pipeline(self):
"""Returns a DALI pipeline instance
In general, pipelines should have the following structure:
def text_preprocessing(text):
## non-image preprocessing ##
def data_source(num_classes):
while True:
img, text = self.pipe.run()
text = text_preprocessing(text)
yield img, text
@pipeline_def
def main_vit_pipeline():
img, text = fn.external_source(source=data_source(), num_outputs=...)
## image preprocessing ##
return img, text
See ViT's `dali_utils.py` for an example
"""
pass
class DALIIterator:
"""Wrapper around BaseDaliPipeline that makes iterator compatible
with clu's PeekableDatasetIterator API."""
def __init__(self, dali_wrapped_pipeline: BaseDALIPipeline):
self.wrapped_pipeline = dali_wrapped_pipeline
self.pipeline = dali_wrapped_pipeline.get_dali_pipeline()
self.pipeline.build()
self.training = dali_wrapped_pipeline.training
## from clu
self._mutex = threading.Lock()
self._peek: Element | None = None
self._pool = None
self._peek_future = None
## has source info about the current batch
self.num_unique_examples = 0
self.source2idx = {}
self.source_info = None
self.last_source = None
def __iter__(self):
return self
def get_next_element(self):
out = self.pipeline.run()
## stores source information for the current batch
## NOTE: source_info gets lost when using certain preprocessing fns
## but for eval, preprocessing is simple enough that this works
## Update source_info.
## Needed to keep track of padding examples during eval
if not self.training:
if self.source_info:
self.last_source = self.source_info[-1]
self.source_info = []
for ex in out[0]:
info = ex.source_info()
if info not in self.source2idx:
self.source2idx[info] = self.num_unique_examples
self.num_unique_examples += 1
self.source_info.append(self.source2idx[info])
return {m.name: out[i].as_array() for i, m in enumerate(self.wrapped_pipeline.modalities)}
def __next__(self):
with self._mutex:
if self._peek is None:
return self.get_next_element()
peek = self._peek
self._peek = None
return peek
## "peek" and "peek_async" taken from clu
def peek(self):
"""Returns the next element without consuming it.
This will get the next element from the underlying iterator. The element
is stored and return on the next call of __next__().
Returns:
The next element.
"""
if self._peek is None:
self._peek = next(self)
return self._peek
def peek_async(self):
"""Same as peek() but returns the Future of the element.
Users can call this to warm up the iterator.
Returns:
Future with the next element. The element is also kept and returned on the
next call of __next__().
"""
with self._mutex:
if self._peek_future is None:
if self._pool is None:
self._pool = asynclib.Pool(max_workers=1)
self._peek_future = self._pool(self.peek)()
return self._peek_future
@property
def element_spec(self) -> ElementSpec:
batch_size = self.wrapped_pipeline.per_shard_batch_size
return {
m.name: ArraySpec(dtype=type_proc(m.out_type), shape=(batch_size, *m.shape))
for m in self.wrapped_pipeline.modalities
}
def get_dali_dataset(cfg,
ds_shard_id,
num_ds_shards,
feature_converter_cls,
pipeline = None,
):
assert not bool(feature_converter_cls), 'Passing `feature_converter_cls` is not supported'
seed = cfg.seed
if seed is None:
# Use a shared timestamp across devices as the seed.
seed = int(multihost_utils.broadcast_one_to_all(np.int32(time.time())))
cfg.seed = seed
return iter(DALIIterator(pipeline(cfg, ds_shard_id, num_ds_shards)))
def get_dali_eval_dataset(cfg,
ds_shard_id,
num_ds_shards,
feature_converter_cls,
eval_steps = None,
pipeline = None,
):
assert not bool(feature_converter_cls), 'Passing `feature_converter_cls` is not supported'
ds = iter(DALIIterator(pipeline(cfg, ds_shard_id, num_ds_shards)))
datasets = {'validation': ds}
return datasets
class ShardedDatasetIterator:
"""A wrapper iterator that returns sharded arrays."""
def __init__(
self,
iterator: DALIIterator,
partitioner: partitioning.BasePartitioner,
global_shapes: PyTree,
):
self._iterator = iterator
self._global_shapes = global_shapes
self._partitioner = partitioner
def __next__(self):
return utils._create_sharded_array(
self._partitioner, self._global_shapes, next(self._iterator),
)
@property
def element_spec(self):
return self._iterator.element_spec
@property
def is_nonpadding(self):
""" Returns a boolean array indicating which examples in the batch
are not padding examples. """
bs = self._global_shapes[next(iter(self._global_shapes))][0]
source_info = self._iterator.source_info
source_shift_right = [self._iterator.last_source] + source_info[:-1]
is_nonpadding = (1-(np.array(source_info)==np.array(source_shift_right))).astype(bool)
return utils._create_sharded_array(
self._partitioner, {'source': (bs,)}, {'source': np.array(is_nonpadding)},
)['source']
@property
def iterator(self):
return self._iterator
def __iter__(self):
return iter(self._iterator)
def peek(self):
return self._iterator.peek()
def peek_async(self):
return self._iterator.peek_async()
def create_sharded_iterator(train_iter,
partitioner,
checkpoint_cfg,
data_layout):
input_shapes = jax.tree_map(
lambda x: (data_layout.batch_size, *x.shape[1:]), train_iter.element_spec,
)
return ShardedDatasetIterator(train_iter, partitioner, input_shapes)
| JAX-Toolbox-main | rosetta/rosetta/data/dali.py |
import os
import sys
import numpy as np
import json
def main():
if len(sys.argv) < 3:
sys.exit(1)
config = sys.argv[1]
run_dirs = sys.argv[2:]
# Store metrics data as list of dicts
json_fnames = [f"{r}/{config}_metrics.json" for r in run_dirs]
src_data = []
for fname in json_fnames:
with open(fname, "r") as f:
src_data.append(json.load(f))
# Ensure start step, end step, interval equal across runs
src_data
for k in ["start_step", "end_step", "step_interval"]:
values = [metrics[k] for metrics in src_data]
print("checking equality for", k)
print(values)
assert all([v == values[0] for v in values])
# Gather metrics across dirs
avg_data = src_data[0].copy() # Use first metrics dict as a template
loss_data = np.array([metrics["loss_values"] for metrics in src_data])
step_times_data = np.array([metrics["step_times"] for metrics in src_data])
mean_step_times_data = np.array([metrics["step_time_avg"] for metrics in src_data])
e2e_time_data = np.array([metrics["e2e_time_seconds"] for metrics in src_data])
# Average
avg_data["loss_values"] = list(np.mean(loss_data, axis=0))
avg_data["step_times"] = list(np.mean(step_times_data, axis=0))
avg_data["step_time_avg"] = np.mean(mean_step_times_data)
avg_data["e2e_time_seconds"] = np.mean(e2e_time_data)
# save to file
fname = config + ".json"
with open(fname, "w") as f:
json.dump(avg_data, f)
if __name__ == "__main__":
main()
| JAX-Toolbox-main | .github/workflows/baselines/average_baselines.py |
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.util import tensor_util
def read_tb_tag(tb_file: str, summary_name: str) -> dict:
ea = event_accumulator.EventAccumulator(tb_file)
ea.Reload()
return {
event.step: tensor_util.make_ndarray(event.tensor_proto).item()
for event in ea.Tensors(summary_name)
}
def read_e2e_time(log_file: str) -> float:
with open(log_file, "r") as log:
for line in log:
if line.startswith("real"):
minutes = line.split()[1].split('m')[0]
seconds = line.split('m')[1].split('s')[0]
return float(minutes) * 60 + float(seconds)
return -100000000
| JAX-Toolbox-main | .github/workflows/baselines/test_utils.py |
import os
import json
import glob
import sys
from statistics import mean
from test_utils import read_tb_tag, read_e2e_time
def _create_summary(loss, train_time, e2e_time):
steps = list(loss.keys())
intervals = [k2 - k1 for k1, k2 in zip(loss.keys(), steps[1:])]
assert all(i == intervals[0] for i in intervals)
baseline = {
"start_step": steps[0],
"end_step": steps[-1],
"step_interval": intervals[0],
"loss_values": list(loss.values()),
"step_times": list(train_time.values()),
"step_time_avg": mean(list(train_time.values())),
"e2e_time_seconds": e2e_time,
}
return baseline
def main():
loss_summary_name = "loss"
train_time_summary_name = "Steps/sec"
if sys.argv[1]:
test_config = sys.argv[1]
else:
sys.exit(1)
try:
event_file = os.path.join(test_config, "summaries/train/events*")
event_file = glob.glob(event_file)[0]
loss = read_tb_tag(event_file, loss_summary_name)
train_time = read_tb_tag(event_file, train_time_summary_name)
e2e_time = read_e2e_time(test_config + ".log")
baseline = _create_summary(loss, train_time, e2e_time)
json_fname = test_config + "_metrics.json"
with open(json_fname, "w") as f:
json.dump(baseline, f)
except KeyError as e:
print(e)
print("Run might have failed, see", test_config)
if __name__ == "__main__":
main()
| JAX-Toolbox-main | .github/workflows/baselines/summarize_metrics.py |
import pytest
import os
import json
import glob
import test_utils
from statistics import mean
STEP_TIME_MULT = {
"1DP1TP1PP": 0.95,
"8DP1TP1PP": 0.95,
"1DP8TP1PP": 0.95,
"2DP1TP4PP": 0.95,
"16DP1TP1PP": 0.95,
"2DP2TP4PP": 0.95,
}
E2E_TIME_MULT = {
"1DP1TP1PP": 0.95,
"8DP1TP1PP": 0.95,
"1DP8TP1PP": 0.95,
"2DP1TP4PP": 0.95,
"16DP1TP1PP": 0.95,
"2DP2TP4PP": 0.95,
}
test_dir = os.path.dirname(os.path.abspath(__file__))
baselines_dir = os.path.join(test_dir, "PAX_MGMN")
results_dir = os.environ.get("RESULTS_DIR")
loss_summary_name = "loss"
step_time_summary_name = "Steps/sec"
@pytest.mark.parametrize("baseline_filename", os.listdir(baselines_dir))
def test_loss(baseline_filename):
baseline_filepath = os.path.join(baselines_dir, baseline_filename)
test_config = baseline_filename.split(".")[0]
event_file = os.path.join(results_dir, test_config, "summaries/train/events*")
event_file = glob.glob(event_file)[0]
with open(baseline_filepath, "r") as baseline_file:
end_step = json.load(baseline_file)["end_step"]
loss_actual = test_utils.read_tb_tag(event_file, loss_summary_name)
assert loss_actual[end_step] == 0, f"Loss at final step: {loss_actual[end_step]}, Expected loss: 0"
@pytest.mark.parametrize("baseline_filename", os.listdir(baselines_dir))
def test_step_time(baseline_filename):
baseline_filepath = os.path.join(baselines_dir, baseline_filename)
test_config = baseline_filename.split(".")[0]
event_file = os.path.join(results_dir, test_config, "summaries/train/events*")
event_file = glob.glob(event_file)[0]
with open(baseline_filepath, "r") as baseline_file:
step_time_avg_expected = json.load(baseline_file)["step_time_avg"]
step_time_values = test_utils.read_tb_tag(event_file, step_time_summary_name).values()
step_time_avg_actual = mean(step_time_values)
assert step_time_avg_actual > step_time_avg_expected * \
STEP_TIME_MULT[test_config], f"Step time values: {step_time_values} (Avg: {step_time_avg_actual}), Expected avg: {step_time_avg_expected}"
@pytest.mark.parametrize("baseline_filename", os.listdir(baselines_dir))
def test_e2e_time(baseline_filename):
baseline_filepath = os.path.join(baselines_dir, baseline_filename)
test_config = baseline_filename.split(".")[0]
run_log = os.path.join(results_dir, test_config + ".log")
with open(baseline_filepath, "r") as baseline_file:
e2e_time_expected = json.load(baseline_file)["e2e_time_seconds"]
e2e_time_actual = test_utils.read_e2e_time(run_log)
assert e2e_time_actual < e2e_time_expected / \
E2E_TIME_MULT[test_config], f"Run E2E time: {e2e_time_actual}, Expected E2E time: {e2e_time_expected}"
| JAX-Toolbox-main | .github/workflows/baselines/test_pax_mgmn_metrics.py |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='odtk',
version='0.2.6',
description='Fast and accurate single shot object detector',
author = 'NVIDIA Corporation',
packages=['odtk', 'odtk.backbones'],
ext_modules=[CUDAExtension('odtk._C',
['csrc/extensions.cpp', 'csrc/engine.cpp', 'csrc/cuda/decode.cu', 'csrc/cuda/decode_rotate.cu', 'csrc/cuda/nms.cu', 'csrc/cuda/nms_iou.cu'],
extra_compile_args={
'cxx': ['-std=c++14', '-O2', '-Wall'],
'nvcc': [
'-std=c++14', '--expt-extended-lambda', '--use_fast_math', '-Xcompiler', '-Wall,-fno-gnu-unique',
'-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61',
'-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72',
'-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_80,code=sm_80',
'-gencode=arch=compute_86,code=sm_86', '-gencode=arch=compute_86,code=compute_86'
],
},
libraries=['nvinfer', 'nvinfer_plugin', 'nvonnxparser', 'opencv_core', 'opencv_imgproc', 'opencv_highgui', 'opencv_imgcodecs'])
],
cmdclass={'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)},
install_requires=[
'torch>=1.0.0a0',
'torchvision',
'apex @ git+https://github.com/NVIDIA/apex',
'pycocotools @ git+https://github.com/nvidia/cocoapi.git#subdirectory=PythonAPI',
'pillow',
'requests',
],
entry_points = {'console_scripts': ['odtk=odtk.main:main']}
)
| retinanet-examples-main | setup.py |
import torch
from ._C import decode as decode_cuda
from ._C import iou as iou_cuda
from ._C import nms as nms_cuda
import numpy as np
from .utils import order_points, rotate_boxes
def generate_anchors(stride, ratio_vals, scales_vals, angles_vals=None):
'Generate anchors coordinates from scales/ratios'
scales = torch.FloatTensor(scales_vals).repeat(len(ratio_vals), 1)
scales = scales.transpose(0, 1).contiguous().view(-1, 1)
ratios = torch.FloatTensor(ratio_vals * len(scales_vals))
wh = torch.FloatTensor([stride]).repeat(len(ratios), 2)
ws = torch.sqrt(wh[:, 0] * wh[:, 1] / ratios)
dwh = torch.stack([ws, ws * ratios], dim=1)
xy1 = 0.5 * (wh - dwh * scales)
xy2 = 0.5 * (wh + dwh * scales)
return torch.cat([xy1, xy2], dim=1)
def generate_anchors_rotated(stride, ratio_vals, scales_vals, angles_vals):
'Generate anchors coordinates from scales/ratios/angles'
scales = torch.FloatTensor(scales_vals).repeat(len(ratio_vals), 1)
scales = scales.transpose(0, 1).contiguous().view(-1, 1)
ratios = torch.FloatTensor(ratio_vals * len(scales_vals))
wh = torch.FloatTensor([stride]).repeat(len(ratios), 2)
ws = torch.round(torch.sqrt(wh[:, 0] * wh[:, 1] / ratios))
dwh = torch.stack([ws, torch.round(ws * ratios)], dim=1)
xy0 = 0.5 * (wh - dwh * scales)
xy2 = 0.5 * (wh + dwh * scales) - 1
xy1 = xy0 + (xy2 - xy0) * torch.FloatTensor([0,1])
xy3 = xy0 + (xy2 - xy0) * torch.FloatTensor([1,0])
angles = torch.FloatTensor(angles_vals)
theta = angles.repeat(xy0.size(0),1)
theta = theta.transpose(0,1).contiguous().view(-1,1)
xmin_ymin = xy0.repeat(int(theta.size(0)/xy0.size(0)),1)
xmax_ymax = xy2.repeat(int(theta.size(0)/xy2.size(0)),1)
widths_heights = dwh * scales
widths_heights = widths_heights.repeat(int(theta.size(0)/widths_heights.size(0)),1)
u = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1)
l = torch.stack([-torch.sin(angles), torch.cos(angles)], dim=1)
R = torch.stack([u, l], dim=1)
xy0R = torch.matmul(R,xy0.transpose(1,0) - stride/2 + 0.5) + stride/2 - 0.5
xy1R = torch.matmul(R,xy1.transpose(1,0) - stride/2 + 0.5) + stride/2 - 0.5
xy2R = torch.matmul(R,xy2.transpose(1,0) - stride/2 + 0.5) + stride/2 - 0.5
xy3R = torch.matmul(R,xy3.transpose(1,0) - stride/2 + 0.5) + stride/2 - 0.5
xy0R = xy0R.permute(0,2,1).contiguous().view(-1,2)
xy1R = xy1R.permute(0,2,1).contiguous().view(-1,2)
xy2R = xy2R.permute(0,2,1).contiguous().view(-1,2)
xy3R = xy3R.permute(0,2,1).contiguous().view(-1,2)
anchors_axis = torch.cat([xmin_ymin, xmax_ymax], dim=1)
anchors_rotated = order_points(torch.stack([xy0R,xy1R,xy2R,xy3R],dim = 1)).view(-1,8)
return anchors_axis, anchors_rotated
def box2delta(boxes, anchors):
'Convert boxes to deltas from anchors'
anchors_wh = anchors[:, 2:] - anchors[:, :2] + 1
anchors_ctr = anchors[:, :2] + 0.5 * anchors_wh
boxes_wh = boxes[:, 2:] - boxes[:, :2] + 1
boxes_ctr = boxes[:, :2] + 0.5 * boxes_wh
return torch.cat([
(boxes_ctr - anchors_ctr) / anchors_wh,
torch.log(boxes_wh / anchors_wh)
], 1)
def box2delta_rotated(boxes, anchors):
'Convert boxes to deltas from anchors'
anchors_wh = anchors[:, 2:4] - anchors[:, :2] + 1
anchors_ctr = anchors[:, :2] + 0.5 * anchors_wh
boxes_wh = boxes[:, 2:4] - boxes[:, :2] + 1
boxes_ctr = boxes[:, :2] + 0.5 * boxes_wh
boxes_sin = boxes[:, 4]
boxes_cos = boxes[:, 5]
return torch.cat([
(boxes_ctr - anchors_ctr) / anchors_wh,
torch.log(boxes_wh / anchors_wh), boxes_sin[:, None], boxes_cos[:, None]
], 1)
def delta2box(deltas, anchors, size, stride):
'Convert deltas from anchors to boxes'
anchors_wh = anchors[:, 2:] - anchors[:, :2] + 1
ctr = anchors[:, :2] + 0.5 * anchors_wh
pred_ctr = deltas[:, :2] * anchors_wh + ctr
pred_wh = torch.exp(deltas[:, 2:]) * anchors_wh
m = torch.zeros([2], device=deltas.device, dtype=deltas.dtype)
M = (torch.tensor([size], device=deltas.device, dtype=deltas.dtype) * stride - 1)
clamp = lambda t: torch.max(m, torch.min(t, M))
return torch.cat([
clamp(pred_ctr - 0.5 * pred_wh),
clamp(pred_ctr + 0.5 * pred_wh - 1)
], 1)
def delta2box_rotated(deltas, anchors, size, stride):
'Convert deltas from anchors to boxes'
anchors_wh = anchors[:, 2:4] - anchors[:, :2] + 1
ctr = anchors[:, :2] + 0.5 * anchors_wh
pred_ctr = deltas[:, :2] * anchors_wh + ctr
pred_wh = torch.exp(deltas[:, 2:4]) * anchors_wh
pred_sin = deltas[:, 4]
pred_cos = deltas[:, 5]
m = torch.zeros([2], device=deltas.device, dtype=deltas.dtype)
M = (torch.tensor([size], device=deltas.device, dtype=deltas.dtype) * stride - 1)
clamp = lambda t: torch.max(m, torch.min(t, M))
return torch.cat([
clamp(pred_ctr - 0.5 * pred_wh),
clamp(pred_ctr + 0.5 * pred_wh - 1),
torch.atan2(pred_sin, pred_cos)[:, None]
], 1)
def snap_to_anchors(boxes, size, stride, anchors, num_classes, device, anchor_ious):
'Snap target boxes (x, y, w, h) to anchors'
num_anchors = anchors.size()[0] if anchors is not None else 1
width, height = (int(size[0] / stride), int(size[1] / stride))
if boxes.nelement() == 0:
return (torch.zeros([num_anchors, num_classes, height, width], device=device),
torch.zeros([num_anchors, 4, height, width], device=device),
torch.zeros([num_anchors, 1, height, width], device=device))
boxes, classes = boxes.split(4, dim=1)
# Generate anchors
x, y = torch.meshgrid([torch.arange(0, size[i], stride, device=device, dtype=classes.dtype) for i in range(2)])
xyxy = torch.stack((x, y, x, y), 2).unsqueeze(0)
anchors = anchors.view(-1, 1, 1, 4).to(dtype=classes.dtype)
anchors = (xyxy + anchors).contiguous().view(-1, 4)
# Compute overlap between boxes and anchors
boxes = torch.cat([boxes[:, :2], boxes[:, :2] + boxes[:, 2:] - 1], 1)
xy1 = torch.max(anchors[:, None, :2], boxes[:, :2])
xy2 = torch.min(anchors[:, None, 2:], boxes[:, 2:])
inter = torch.prod((xy2 - xy1 + 1).clamp(0), 2)
boxes_area = torch.prod(boxes[:, 2:] - boxes[:, :2] + 1, 1)
anchors_area = torch.prod(anchors[:, 2:] - anchors[:, :2] + 1, 1)
overlap = inter / (anchors_area[:, None] + boxes_area - inter)
# Keep best box per anchor
overlap, indices = overlap.max(1)
box_target = box2delta(boxes[indices], anchors)
box_target = box_target.view(num_anchors, 1, width, height, 4)
box_target = box_target.transpose(1, 4).transpose(2, 3)
box_target = box_target.squeeze().contiguous()
depth = torch.ones_like(overlap) * -1
depth[overlap < anchor_ious[0]] = 0 # background
depth[overlap >= anchor_ious[1]] = classes[indices][overlap >= anchor_ious[1]].squeeze() + 1 # objects
depth = depth.view(num_anchors, width, height).transpose(1, 2).contiguous()
# Generate target classes
cls_target = torch.zeros((anchors.size()[0], num_classes + 1), device=device, dtype=boxes.dtype)
if classes.nelement() == 0:
classes = torch.LongTensor([num_classes], device=device).expand_as(indices)
else:
classes = classes[indices].long()
classes = classes.view(-1, 1)
classes[overlap < anchor_ious[0]] = num_classes # background has no class
cls_target.scatter_(1, classes, 1)
cls_target = cls_target[:, :num_classes].view(-1, 1, width, height, num_classes)
cls_target = cls_target.transpose(1, 4).transpose(2, 3)
cls_target = cls_target.squeeze().contiguous()
return (cls_target.view(num_anchors, num_classes, height, width),
box_target.view(num_anchors, 4, height, width),
depth.view(num_anchors, 1, height, width))
def snap_to_anchors_rotated(boxes, size, stride, anchors, num_classes, device, anchor_ious):
'Snap target boxes (x, y, w, h, a) to anchors'
anchors_axis, anchors_rotated = anchors
num_anchors = anchors_rotated.size()[0] if anchors_rotated is not None else 1
width, height = (int(size[0] / stride), int(size[1] / stride))
if boxes.nelement() == 0:
return (torch.zeros([num_anchors, num_classes, height, width], device=device),
torch.zeros([num_anchors, 6, height, width], device=device),
torch.zeros([num_anchors, 1, height, width], device=device))
boxes, classes = boxes.split(5, dim=1)
boxes_axis, boxes_rotated = rotate_boxes(boxes)
boxes_axis = boxes_axis.to(device)
boxes_rotated = boxes_rotated.to(device)
anchors_axis = anchors_axis.to(device)
anchors_rotated = anchors_rotated.to(device)
# Generate anchors
x, y = torch.meshgrid([torch.arange(0, size[i], stride, device=device, dtype=classes.dtype) for i in range(2)])
xy_2corners = torch.stack((x, y, x, y), 2).unsqueeze(0)
xy_4corners = torch.stack((x, y, x, y, x, y, x, y), 2).unsqueeze(0)
anchors_axis = (xy_2corners.to(torch.float) + anchors_axis.view(-1, 1, 1, 4)).contiguous().view(-1, 4)
anchors_rotated = (xy_4corners.to(torch.float) + anchors_rotated.view(-1, 1, 1, 8)).contiguous().view(-1, 8)
if torch.cuda.is_available():
iou = iou_cuda
overlap = iou(boxes_rotated.contiguous().view(-1), anchors_rotated.contiguous().view(-1))[0]
# Keep best box per anchor
overlap, indices = overlap.max(1)
box_target = box2delta_rotated(boxes_axis[indices], anchors_axis)
box_target = box_target.view(num_anchors, 1, width, height, 6)
box_target = box_target.transpose(1, 4).transpose(2, 3)
box_target = box_target.squeeze().contiguous()
depth = torch.ones_like(overlap, device=device) * -1
depth[overlap < anchor_ious[0]] = 0 # background
depth[overlap >= anchor_ious[1]] = classes[indices][overlap >= anchor_ious[1]].squeeze() + 1 # objects
depth = depth.view(num_anchors, width, height).transpose(1, 2).contiguous()
# Generate target classes
cls_target = torch.zeros((anchors_axis.size()[0], num_classes + 1), device=device, dtype=boxes_axis.dtype)
if classes.nelement() == 0:
classes = torch.LongTensor([num_classes], device=device).expand_as(indices)
else:
classes = classes[indices].long()
classes = classes.view(-1, 1)
classes[overlap < anchor_ious[0]] = num_classes # background has no class
cls_target.scatter_(1, classes, 1)
cls_target = cls_target[:, :num_classes].view(-1, 1, width, height, num_classes)
cls_target = cls_target.transpose(1, 4).transpose(2, 3)
cls_target = cls_target.squeeze().contiguous()
return (cls_target.view(num_anchors, num_classes, height, width),
box_target.view(num_anchors, 6, height, width),
depth.view(num_anchors, 1, height, width))
def decode(all_cls_head, all_box_head, stride=1, threshold=0.05, top_n=1000, anchors=None, rotated=False):
'Box Decoding and Filtering'
if rotated:
anchors = anchors[0]
num_boxes = 4 if not rotated else 6
if torch.cuda.is_available():
return decode_cuda(all_cls_head.float(), all_box_head.float(),
anchors.view(-1).tolist(), stride, threshold, top_n, rotated)
device = all_cls_head.device
anchors = anchors.to(device).type(all_cls_head.type())
num_anchors = anchors.size()[0] if anchors is not None else 1
num_classes = all_cls_head.size()[1] // num_anchors
height, width = all_cls_head.size()[-2:]
batch_size = all_cls_head.size()[0]
out_scores = torch.zeros((batch_size, top_n), device=device)
out_boxes = torch.zeros((batch_size, top_n, num_boxes), device=device)
out_classes = torch.zeros((batch_size, top_n), device=device)
# Per item in batch
for batch in range(batch_size):
cls_head = all_cls_head[batch, :, :, :].contiguous().view(-1)
box_head = all_box_head[batch, :, :, :].contiguous().view(-1, num_boxes)
# Keep scores over threshold
keep = (cls_head >= threshold).nonzero().view(-1)
if keep.nelement() == 0:
continue
# Gather top elements
scores = torch.index_select(cls_head, 0, keep)
scores, indices = torch.topk(scores, min(top_n, keep.size()[0]), dim=0)
indices = torch.index_select(keep, 0, indices).view(-1)
classes = (indices / width / height) % num_classes
classes = classes.type(all_cls_head.type())
# Infer kept bboxes
x = indices % width
y = (indices / width) % height
a = indices / num_classes / height / width
box_head = box_head.view(num_anchors, num_boxes, height, width)
boxes = box_head[a, :, y, x]
if anchors is not None:
grid = torch.stack([x, y, x, y], 1).type(all_cls_head.type()) * stride + anchors[a, :]
boxes = delta2box(boxes, grid, [width, height], stride)
out_scores[batch, :scores.size()[0]] = scores
out_boxes[batch, :boxes.size()[0], :] = boxes
out_classes[batch, :classes.size()[0]] = classes
return out_scores, out_boxes, out_classes
def nms(all_scores, all_boxes, all_classes, nms=0.5, ndetections=100):
'Non Maximum Suppression'
if torch.cuda.is_available():
return nms_cuda(all_scores.float(), all_boxes.float(), all_classes.float(),
nms, ndetections, False)
device = all_scores.device
batch_size = all_scores.size()[0]
out_scores = torch.zeros((batch_size, ndetections), device=device)
out_boxes = torch.zeros((batch_size, ndetections, 4), device=device)
out_classes = torch.zeros((batch_size, ndetections), device=device)
# Per item in batch
for batch in range(batch_size):
# Discard null scores
keep = (all_scores[batch, :].view(-1) > 0).nonzero()
scores = all_scores[batch, keep].view(-1)
boxes = all_boxes[batch, keep, :].view(-1, 4)
classes = all_classes[batch, keep].view(-1)
if scores.nelement() == 0:
continue
# Sort boxes
scores, indices = torch.sort(scores, descending=True)
boxes, classes = boxes[indices], classes[indices]
areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1).view(-1)
keep = torch.ones(scores.nelement(), device=device, dtype=torch.uint8).view(-1)
for i in range(ndetections):
if i >= keep.nonzero().nelement() or i >= scores.nelement():
i -= 1
break
# Find overlapping boxes with lower score
xy1 = torch.max(boxes[:, :2], boxes[i, :2])
xy2 = torch.min(boxes[:, 2:], boxes[i, 2:])
inter = torch.prod((xy2 - xy1 + 1).clamp(0), 1)
criterion = ((scores > scores[i]) |
(inter / (areas + areas[i] - inter) <= nms) |
(classes != classes[i]))
criterion[i] = 1
# Only keep relevant boxes
scores = scores[criterion.nonzero()].view(-1)
boxes = boxes[criterion.nonzero(), :].view(-1, 4)
classes = classes[criterion.nonzero()].view(-1)
areas = areas[criterion.nonzero()].view(-1)
keep[(~criterion).nonzero()] = 0
out_scores[batch, :i + 1] = scores[:i + 1]
out_boxes[batch, :i + 1, :] = boxes[:i + 1, :]
out_classes[batch, :i + 1] = classes[:i + 1]
return out_scores, out_boxes, out_classes
def nms_rotated(all_scores, all_boxes, all_classes, nms=0.5, ndetections=100):
'Non Maximum Suppression'
if torch.cuda.is_available():
return nms_cuda(all_scores.float(), all_boxes.float(), all_classes.float(),
nms, ndetections, True)
device = all_scores.device
batch_size = all_scores.size()[0]
out_scores = torch.zeros((batch_size, ndetections), device=device)
out_boxes = torch.zeros((batch_size, ndetections, 6), device=device)
out_classes = torch.zeros((batch_size, ndetections), device=device)
# Per item in batch
for batch in range(batch_size):
# Discard null scores
keep = (all_scores[batch, :].view(-1) > 0).nonzero()
scores = all_scores[batch, keep].view(-1)
boxes = all_boxes[batch, keep, :].view(-1, 6)
classes = all_classes[batch, keep].view(-1)
theta = torch.atan2(boxes[:, -2], boxes[:, -1])
boxes_theta = torch.cat([boxes[:, :-2], theta[:, None]], dim=1)
if scores.nelement() == 0:
continue
# Sort boxes
scores, indices = torch.sort(scores, descending=True)
boxes, boxes_theta, classes = boxes[indices], boxes_theta[indices], classes[indices]
areas = (boxes_theta[:, 2] - boxes_theta[:, 0] + 1) * (boxes_theta[:, 3] - boxes_theta[:, 1] + 1).view(-1)
keep = torch.ones(scores.nelement(), device=device, dtype=torch.uint8).view(-1)
for i in range(ndetections):
if i >= keep.nonzero().nelement() or i >= scores.nelement():
i -= 1
break
boxes_axis, boxes_rotated = rotate_boxes(boxes_theta, points=True)
overlap, inter = iou(boxes_rotated.contiguous().view(-1), boxes_rotated[i, :].contiguous().view(-1))
inter = inter.squeeze()
criterion = ((scores > scores[i]) |
(inter / (areas + areas[i] - inter) <= nms) |
(classes != classes[i]))
criterion[i] = 1
# Only keep relevant boxes
scores = scores[criterion.nonzero()].view(-1)
boxes = boxes[criterion.nonzero(), :].view(-1, 6)
boxes_theta = boxes_theta[criterion.nonzero(), :].view(-1, 5)
classes = classes[criterion.nonzero()].view(-1)
areas = areas[criterion.nonzero()].view(-1)
keep[(~criterion).nonzero()] = 0
out_scores[batch, :i + 1] = scores[:i + 1]
out_boxes[batch, :i + 1, :] = boxes[:i + 1, :]
out_classes[batch, :i + 1] = classes[:i + 1]
return out_scores, out_boxes, out_classes
| retinanet-examples-main | odtk/box.py |
from . import backbones
| retinanet-examples-main | odtk/__init__.py |
import os.path
import io
import numpy as np
import math
import torch
import torch.nn as nn
from . import backbones as backbones_mod
from ._C import Engine
from .box import generate_anchors, snap_to_anchors, decode, nms
from .box import generate_anchors_rotated, snap_to_anchors_rotated, nms_rotated
from .loss import FocalLoss, SmoothL1Loss
class Model(nn.Module):
'RetinaNet - https://arxiv.org/abs/1708.02002'
def __init__(
self,
backbones='ResNet50FPN',
classes=80,
ratios=[1.0, 2.0, 0.5],
scales=[4 * 2 ** (i / 3) for i in range(3)],
angles=None,
rotated_bbox=False,
anchor_ious=[0.4, 0.5],
config={}
):
super().__init__()
if not isinstance(backbones, list):
backbones = [backbones]
self.backbones = nn.ModuleDict({b: getattr(backbones_mod, b)() for b in backbones})
self.name = 'RetinaNet'
self.unused_modules = []
for b in backbones: self.unused_modules.extend(getattr(self.backbones, b).features.unused_modules)
self.exporting = False
self.rotated_bbox = rotated_bbox
self.anchor_ious = anchor_ious
self.ratios = ratios
self.scales = scales
self.angles = angles if angles is not None else \
[-np.pi / 6, 0, np.pi / 6] if self.rotated_bbox else None
self.anchors = {}
self.classes = classes
self.threshold = config.get('threshold', 0.05)
self.top_n = config.get('top_n', 1000)
self.nms = config.get('nms', 0.5)
self.detections = config.get('detections', 100)
self.stride = max([b.stride for _, b in self.backbones.items()])
# classification and box regression heads
def make_head(out_size):
layers = []
for _ in range(4):
layers += [nn.Conv2d(256, 256, 3, padding=1), nn.ReLU()]
layers += [nn.Conv2d(256, out_size, 3, padding=1)]
return nn.Sequential(*layers)
self.num_anchors = len(self.ratios) * len(self.scales)
self.num_anchors = self.num_anchors if not self.rotated_bbox else (self.num_anchors * len(self.angles))
self.cls_head = make_head(classes * self.num_anchors)
self.box_head = make_head(4 * self.num_anchors) if not self.rotated_bbox \
else make_head(6 * self.num_anchors) # theta -> cos(theta), sin(theta)
self.cls_criterion = FocalLoss()
self.box_criterion = SmoothL1Loss(beta=0.11)
def __repr__(self):
return '\n'.join([
' model: {}'.format(self.name),
' backbone: {}'.format(', '.join([k for k, _ in self.backbones.items()])),
' classes: {}, anchors: {}'.format(self.classes, self.num_anchors)
])
def initialize(self, pre_trained):
if pre_trained:
# Initialize using weights from pre-trained model
if not os.path.isfile(pre_trained):
raise ValueError('No checkpoint {}'.format(pre_trained))
print('Fine-tuning weights from {}...'.format(os.path.basename(pre_trained)))
state_dict = self.state_dict()
chk = torch.load(pre_trained, map_location=lambda storage, loc: storage)
ignored = ['cls_head.8.bias', 'cls_head.8.weight']
if self.rotated_bbox:
ignored += ['box_head.8.bias', 'box_head.8.weight']
weights = {k: v for k, v in chk['state_dict'].items() if k not in ignored}
state_dict.update(weights)
self.load_state_dict(state_dict)
del chk, weights
torch.cuda.empty_cache()
else:
# Initialize backbone(s)
for _, backbone in self.backbones.items():
backbone.initialize()
# Initialize heads
def initialize_layer(layer):
if isinstance(layer, nn.Conv2d):
nn.init.normal_(layer.weight, std=0.01)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
self.cls_head.apply(initialize_layer)
self.box_head.apply(initialize_layer)
# Initialize class head prior
def initialize_prior(layer):
pi = 0.01
b = - math.log((1 - pi) / pi)
nn.init.constant_(layer.bias, b)
nn.init.normal_(layer.weight, std=0.01)
self.cls_head[-1].apply(initialize_prior)
if self.rotated_bbox:
self.box_head[-1].apply(initialize_prior)
def forward(self, x, rotated_bbox=None):
if self.training: x, targets = x
# Backbones forward pass
features = []
for _, backbone in self.backbones.items():
features.extend(backbone(x))
# Heads forward pass
cls_heads = [self.cls_head(t) for t in features]
box_heads = [self.box_head(t) for t in features]
if self.training:
return self._compute_loss(x, cls_heads, box_heads, targets.float())
cls_heads = [cls_head.sigmoid() for cls_head in cls_heads]
if self.exporting:
self.strides = [x.shape[-1] // cls_head.shape[-1] for cls_head in cls_heads]
return cls_heads, box_heads
global nms, generate_anchors
if self.rotated_bbox:
nms = nms_rotated
generate_anchors = generate_anchors_rotated
# Inference post-processing
decoded = []
for cls_head, box_head in zip(cls_heads, box_heads):
# Generate level's anchors
stride = x.shape[-1] // cls_head.shape[-1]
if stride not in self.anchors:
self.anchors[stride] = generate_anchors(stride, self.ratios, self.scales, self.angles)
# Decode and filter boxes
decoded.append(decode(cls_head.contiguous(), box_head.contiguous(), stride, self.threshold,
self.top_n, self.anchors[stride], self.rotated_bbox))
# Perform non-maximum suppression
decoded = [torch.cat(tensors, 1) for tensors in zip(*decoded)]
return nms(*decoded, self.nms, self.detections)
def _extract_targets(self, targets, stride, size):
global generate_anchors, snap_to_anchors
if self.rotated_bbox:
generate_anchors = generate_anchors_rotated
snap_to_anchors = snap_to_anchors_rotated
cls_target, box_target, depth = [], [], []
for target in targets:
target = target[target[:, -1] > -1]
if stride not in self.anchors:
self.anchors[stride] = generate_anchors(stride, self.ratios, self.scales, self.angles)
anchors = self.anchors[stride]
if not self.rotated_bbox:
anchors = anchors.to(targets.device)
snapped = snap_to_anchors(target, [s * stride for s in size[::-1]], stride,
anchors, self.classes, targets.device, self.anchor_ious)
for l, s in zip((cls_target, box_target, depth), snapped): l.append(s)
return torch.stack(cls_target), torch.stack(box_target), torch.stack(depth)
def _compute_loss(self, x, cls_heads, box_heads, targets):
cls_losses, box_losses, fg_targets = [], [], []
for cls_head, box_head in zip(cls_heads, box_heads):
size = cls_head.shape[-2:]
stride = x.shape[-1] / cls_head.shape[-1]
cls_target, box_target, depth = self._extract_targets(targets, stride, size)
fg_targets.append((depth > 0).sum().float().clamp(min=1))
cls_head = cls_head.view_as(cls_target).float()
cls_mask = (depth >= 0).expand_as(cls_target).float()
cls_loss = self.cls_criterion(cls_head, cls_target)
cls_loss = cls_mask * cls_loss
cls_losses.append(cls_loss.sum())
box_head = box_head.view_as(box_target).float()
box_mask = (depth > 0).expand_as(box_target).float()
box_loss = self.box_criterion(box_head, box_target)
box_loss = box_mask * box_loss
box_losses.append(box_loss.sum())
fg_targets = torch.stack(fg_targets).sum()
cls_loss = torch.stack(cls_losses).sum() / fg_targets
box_loss = torch.stack(box_losses).sum() / fg_targets
return cls_loss, box_loss
def freeze_unused_params(self):
for n, p in self.named_parameters():
if any(i in n for i in self.unused_modules):
p.requires_grad = False
def save(self, state):
checkpoint = {
'backbone': [k for k, _ in self.backbones.items()],
'classes': self.classes,
'state_dict': self.state_dict(),
'ratios': self.ratios,
'scales': self.scales
}
if self.rotated_bbox and self.angles:
checkpoint['angles'] = self.angles
for key in ('iteration', 'optimizer', 'scheduler'):
if key in state:
checkpoint[key] = state[key]
torch.save(checkpoint, state['path'])
@classmethod
def load(cls, filename, rotated_bbox=False):
if not os.path.isfile(filename):
raise ValueError('No checkpoint {}'.format(filename))
checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)
kwargs = {}
for i in ['ratios', 'scales', 'angles']:
if i in checkpoint:
kwargs[i] = checkpoint[i]
if ('angles' in checkpoint) or rotated_bbox:
kwargs['rotated_bbox'] = True
# Recreate model from checkpoint instead of from individual backbones
model = cls(backbones=checkpoint['backbone'], classes=checkpoint['classes'], **kwargs)
model.load_state_dict(checkpoint['state_dict'])
state = {}
for key in ('iteration', 'optimizer', 'scheduler'):
if key in checkpoint:
state[key] = checkpoint[key]
del checkpoint
torch.cuda.empty_cache()
return model, state
def export(self, size, dynamic_batch_opts, precision, calibration_files, calibration_table, verbose, onnx_only=False):
# import torch.onnx.symbolic_opset11 as onnx_symbolic
# def upsample_nearest2d(g, input, output_size, *args):
# # Currently, TRT 7.1 ONNX Parser does not support all ONNX ops
# # needed to support dynamic upsampling ONNX forumlation
# # Here we hardcode scale=2 as a temporary workaround
# scales = g.op("Constant", value_t=torch.tensor([1., 1., 2., 2.]))
# empty_tensor = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
# return g.op("Resize", input, empty_tensor, scales, mode_s="nearest", nearest_mode_s="floor")
# onnx_symbolic.upsample_nearest2d = upsample_nearest2d
# Export to ONNX
print('Exporting to ONNX...')
self.exporting = True
onnx_bytes = io.BytesIO()
zero_input = torch.zeros([1, 3, *size]).cuda()
input_names = ['input_1']
output_names = ['score_1', 'score_2', 'score_3', 'score_4', 'score_5',
'box_1', 'box_2', 'box_3', 'box_4', 'box_5']
dynamic_axes = {input_names[0]: {0:'batch'}}
for _, name in enumerate(output_names):
dynamic_axes[name] = dynamic_axes[input_names[0]]
extra_args = {'opset_version': 12, 'verbose': verbose,
'input_names': input_names, 'output_names': output_names,
'dynamic_axes': dynamic_axes}
torch.onnx.export(self.cuda(), zero_input, onnx_bytes, **extra_args)
self.exporting = False
if onnx_only:
return onnx_bytes.getvalue()
# Build TensorRT engine
model_name = '_'.join([k for k, _ in self.backbones.items()])
anchors = []
if not self.rotated_bbox:
anchors = [generate_anchors(stride, self.ratios, self.scales,
self.angles).view(-1).tolist() for stride in self.strides]
else:
anchors = [generate_anchors_rotated(stride, self.ratios, self.scales,
self.angles)[0].view(-1).tolist() for stride in self.strides]
return Engine(onnx_bytes.getvalue(), len(onnx_bytes.getvalue()), dynamic_batch_opts, precision,
self.threshold, self.top_n, anchors, self.rotated_bbox, self.nms, self.detections,
calibration_files, model_name, calibration_table, verbose)
| retinanet-examples-main | odtk/model.py |
from contextlib import redirect_stdout
from math import ceil
import ctypes
import torch
from nvidia.dali import pipeline, ops, types
from pycocotools.coco import COCO
class COCOPipeline(pipeline.Pipeline):
'Dali pipeline for COCO'
def __init__(self, batch_size, num_threads, path, training, annotations, world, device_id, mean, std, resize,
max_size, stride, rotate_augment=False,
augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0,
augment_saturation=0.0):
super().__init__(batch_size=batch_size, num_threads=num_threads, device_id=device_id,
prefetch_queue_depth=num_threads, seed=42)
self.path = path
self.training = training
self.stride = stride
self.iter = 0
self.rotate_augment = rotate_augment
self.augment_brightness = augment_brightness
self.augment_contrast = augment_contrast
self.augment_hue = augment_hue
self.augment_saturation = augment_saturation
self.reader = ops.COCOReader(annotations_file=annotations, file_root=path, num_shards=world,
shard_id=torch.cuda.current_device(),
ltrb=True, ratio=True, shuffle_after_epoch=True, save_img_ids=True)
self.decode_train = ops.ImageDecoderSlice(device="mixed", output_type=types.RGB)
self.decode_infer = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.bbox_crop = ops.RandomBBoxCrop(device='cpu', bbox_layout="xyXY", scaling=[0.3, 1.0],
thresholds=[0.1, 0.3, 0.5, 0.7, 0.9])
self.bbox_flip = ops.BbFlip(device='cpu', ltrb=True)
self.img_flip = ops.Flip(device='gpu')
self.coin_flip = ops.CoinFlip(probability=0.5)
self.bc = ops.BrightnessContrast(device='gpu')
self.hsv = ops.Hsv(device='gpu')
# Random number generation for augmentation
self.brightness_dist = ops.NormalDistribution(mean=1.0, stddev=augment_brightness)
self.contrast_dist = ops.NormalDistribution(mean=1.0, stddev=augment_contrast)
self.hue_dist = ops.NormalDistribution(mean=0.0, stddev=augment_hue)
self.saturation_dist = ops.NormalDistribution(mean=1.0, stddev=augment_saturation)
if rotate_augment:
raise RuntimeWarning("--augment-rotate current has no effect when using the DALI data loader.")
if isinstance(resize, list): resize = max(resize)
self.rand_resize = ops.Uniform(range=[resize, float(max_size)])
self.resize_train = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC, save_attrs=True)
self.resize_infer = ops.Resize(device='gpu', interp_type=types.DALIInterpType.INTERP_CUBIC,
resize_longer=max_size, save_attrs=True)
padded_size = max_size + ((self.stride - max_size % self.stride) % self.stride)
self.pad = ops.Paste(device='gpu', fill_value=0, ratio=1.1, min_canvas_size=padded_size, paste_x=0, paste_y=0)
self.normalize = ops.CropMirrorNormalize(device='gpu', mean=mean, std=std, crop=(padded_size, padded_size),
crop_pos_x=0, crop_pos_y=0)
def define_graph(self):
images, bboxes, labels, img_ids = self.reader()
if self.training:
crop_begin, crop_size, bboxes, labels = self.bbox_crop(bboxes, labels)
images = self.decode_train(images, crop_begin, crop_size)
resize = self.rand_resize()
images, attrs = self.resize_train(images, resize_longer=resize)
flip = self.coin_flip()
bboxes = self.bbox_flip(bboxes, horizontal=flip)
images = self.img_flip(images, horizontal=flip)
if self.augment_brightness or self.augment_contrast:
images = self.bc(images, brightness=self.brightness_dist(), contrast=self.contrast_dist())
if self.augment_hue or self.augment_saturation:
images = self.hsv(images, hue=self.hue_dist(), saturation=self.saturation_dist())
else:
images = self.decode_infer(images)
images, attrs = self.resize_infer(images)
resized_images = images
images = self.normalize(self.pad(images))
return images, bboxes, labels, img_ids, attrs, resized_images
class DaliDataIterator():
'Data loader for data parallel using Dali'
def __init__(self, path, resize, max_size, batch_size, stride, world, annotations, training=False,
rotate_augment=False, augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0, augment_saturation=0.0):
self.training = training
self.resize = resize
self.max_size = max_size
self.stride = stride
self.batch_size = batch_size // world
self.mean = [255. * x for x in [0.485, 0.456, 0.406]]
self.std = [255. * x for x in [0.229, 0.224, 0.225]]
self.world = world
self.path = path
# Setup COCO
with redirect_stdout(None):
self.coco = COCO(annotations)
self.ids = list(self.coco.imgs.keys())
if 'categories' in self.coco.dataset:
self.categories_inv = {k: i for i, k in enumerate(self.coco.getCatIds())}
self.pipe = COCOPipeline(batch_size=self.batch_size, num_threads=2,
path=path, training=training, annotations=annotations, world=world,
device_id=torch.cuda.current_device(), mean=self.mean, std=self.std, resize=resize,
max_size=max_size, stride=self.stride, rotate_augment=rotate_augment,
augment_brightness=augment_brightness,
augment_contrast=augment_contrast, augment_hue=augment_hue,
augment_saturation=augment_saturation)
self.pipe.build()
def __repr__(self):
return '\n'.join([
' loader: dali',
' resize: {}, max: {}'.format(self.resize, self.max_size),
])
def __len__(self):
return ceil(len(self.ids) // self.world / self.batch_size)
def __iter__(self):
for _ in range(self.__len__()):
data, ratios, ids, num_detections = [], [], [], []
dali_data, dali_boxes, dali_labels, dali_ids, dali_attrs, dali_resize_img = self.pipe.run()
for l in range(len(dali_boxes)):
num_detections.append(dali_boxes.at(l).shape[0])
pyt_targets = -1 * torch.ones([len(dali_boxes), max(max(num_detections), 1), 5])
for batch in range(self.batch_size):
id = int(dali_ids.at(batch)[0])
# Convert dali tensor to pytorch
dali_tensor = dali_data[batch]
tensor_shape = dali_tensor.shape()
datum = torch.zeros(dali_tensor.shape(), dtype=torch.float, device=torch.device('cuda'))
c_type_pointer = ctypes.c_void_p(datum.data_ptr())
dali_tensor.copy_to_external(c_type_pointer)
# Calculate image resize ratio to rescale boxes
prior_size = dali_attrs.as_cpu().at(batch)
resized_size = dali_resize_img[batch].shape()
ratio = max(resized_size) / max(prior_size)
if self.training:
# Rescale boxes
b_arr = dali_boxes.at(batch)
num_dets = b_arr.shape[0]
if num_dets!=0:
pyt_bbox = torch.from_numpy(b_arr).float()
pyt_bbox[:, 0] *= float(prior_size[1])
pyt_bbox[:, 1] *= float(prior_size[0])
pyt_bbox[:, 2] *= float(prior_size[1])
pyt_bbox[:, 3] *= float(prior_size[0])
# (l,t,r,b) -> (x,y,w,h) == (l,r, r-l, b-t)
pyt_bbox[:, 2] -= pyt_bbox[:, 0]
pyt_bbox[:, 3] -= pyt_bbox[:, 1]
pyt_targets[batch, :num_dets, :4] = pyt_bbox * ratio
# Arrange labels in target tensor
l_arr = dali_labels.at(batch)
if num_dets!=0:
pyt_label = torch.from_numpy(l_arr).float()
pyt_label -= 1 # Rescale labels to [0,79] instead of [1,80]
pyt_targets[batch, :num_dets, 4] = pyt_label.squeeze()
ids.append(id)
data.append(datum.unsqueeze(0))
ratios.append(ratio)
data = torch.cat(data, dim=0)
if self.training:
pyt_targets = pyt_targets.cuda(non_blocking=True)
yield data, pyt_targets
else:
ids = torch.Tensor(ids).int().cuda(non_blocking=True)
ratios = torch.Tensor(ratios).cuda(non_blocking=True)
yield data, ids, ratios
| retinanet-examples-main | odtk/dali.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
'Focal Loss - https://arxiv.org/abs/1708.02002'
def __init__(self, alpha=0.25, gamma=2):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, pred_logits, target):
pred = pred_logits.sigmoid()
ce = F.binary_cross_entropy_with_logits(pred_logits, target, reduction='none')
alpha = target * self.alpha + (1. - target) * (1. - self.alpha)
pt = torch.where(target == 1, pred, 1 - pred)
return alpha * (1. - pt) ** self.gamma * ce
class SmoothL1Loss(nn.Module):
'Smooth L1 Loss'
def __init__(self, beta=0.11):
super().__init__()
self.beta = beta
def forward(self, pred, target):
x = (pred - target).abs()
l1 = x - 0.5 * self.beta
l2 = 0.5 * x ** 2 / self.beta
return torch.where(x >= self.beta, l1, l2)
| retinanet-examples-main | odtk/loss.py |
import os.path
import time
import json
import warnings
import signal
from datetime import datetime
from contextlib import contextmanager
from PIL import Image, ImageDraw
import requests
import numpy as np
import math
import torch
def order_points(pts):
pts_reorder = []
for idx, pt in enumerate(pts):
idx = torch.argsort(pt[:, 0])
xSorted = pt[idx, :]
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
leftMost = leftMost[torch.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
D = torch.cdist(tl[np.newaxis], rightMost)[0]
(br, tr) = rightMost[torch.argsort(D, descending=True), :]
pts_reorder.append(torch.stack([tl, tr, br, bl]))
return torch.stack([p for p in pts_reorder])
def rotate_boxes(boxes, points=False):
'''
Rotate target bounding boxes
Input:
Target boxes (xmin_ymin, width_height, theta)
Output:
boxes_axis (xmin_ymin, xmax_ymax, theta)
boxes_rotated (xy0, xy1, xy2, xy3)
'''
u = torch.stack([torch.cos(boxes[:,4]), torch.sin(boxes[:,4])], dim=1)
l = torch.stack([-torch.sin(boxes[:,4]), torch.cos(boxes[:,4])], dim=1)
R = torch.stack([u, l], dim=1)
if points:
cents = torch.stack([(boxes[:,0]+boxes[:,2])/2, (boxes[:,1]+boxes[:,3])/2],1).transpose(1,0)
boxes_rotated = torch.stack([boxes[:,0],boxes[:,1],
boxes[:,2], boxes[:,1],
boxes[:,2], boxes[:,3],
boxes[:,0], boxes[:,3],
boxes[:,-2],
boxes[:,-1]],1)
else:
cents = torch.stack([boxes[:,0]+(boxes[:,2])/2, boxes[:,1]+(boxes[:,3])/2],1).transpose(1,0)
boxes_rotated = torch.stack([boxes[:,0],boxes[:,1],
(boxes[:,0]+boxes[:,2]), boxes[:,1],
(boxes[:,0]+boxes[:,2]), (boxes[:,1]+boxes[:,3]),
boxes[:,0], (boxes[:,1]+boxes[:,3]),
boxes[:,-2],
boxes[:,-1]],1)
xy0R = torch.matmul(R,boxes_rotated[:,:2].transpose(1,0) - cents) + cents
xy1R = torch.matmul(R,boxes_rotated[:,2:4].transpose(1,0) - cents) + cents
xy2R = torch.matmul(R,boxes_rotated[:,4:6].transpose(1,0) - cents) + cents
xy3R = torch.matmul(R,boxes_rotated[:,6:8].transpose(1,0) - cents) + cents
xy0R = torch.stack([xy0R[i,:,i] for i in range(xy0R.size(0))])
xy1R = torch.stack([xy1R[i,:,i] for i in range(xy1R.size(0))])
xy2R = torch.stack([xy2R[i,:,i] for i in range(xy2R.size(0))])
xy3R = torch.stack([xy3R[i,:,i] for i in range(xy3R.size(0))])
boxes_axis = torch.cat([boxes[:, :2], boxes[:, :2] + boxes[:, 2:4] - 1,
torch.sin(boxes[:,-1, None]), torch.cos(boxes[:,-1, None])], 1)
boxes_rotated = order_points(torch.stack([xy0R,xy1R,xy2R,xy3R],dim = 1)).view(-1,8)
return boxes_axis, boxes_rotated
def rotate_box(bbox):
xmin, ymin, width, height, theta = bbox
xy1 = xmin, ymin
xy2 = xmin, ymin + height - 1
xy3 = xmin + width - 1, ymin + height - 1
xy4 = xmin + width - 1, ymin
cents = np.array([xmin + (width - 1) / 2, ymin + (height - 1) / 2])
corners = np.stack([xy1, xy2, xy3, xy4])
u = np.stack([np.cos(theta), -np.sin(theta)])
l = np.stack([np.sin(theta), np.cos(theta)])
R = np.vstack([u, l])
corners = np.matmul(R, (corners - cents).transpose(1, 0)).transpose(1, 0) + cents
return corners.reshape(-1).tolist()
def show_detections(detections):
'Show image with drawn detections'
for image, detections in detections.items():
im = Image.open(image).convert('RGBA')
overlay = Image.new('RGBA', im.size, (255, 255, 255, 0))
draw = ImageDraw.Draw(overlay)
detections.sort(key=lambda d: d['score'])
for detection in detections:
box = detection['bbox']
alpha = int(detection['score'] * 255)
draw.rectangle(box, outline=(255, 255, 255, alpha))
draw.text((box[0] + 2, box[1]), '[{}]'.format(detection['class']),
fill=(255, 255, 255, alpha))
draw.text((box[0] + 2, box[1] + 10), '{:.2}'.format(detection['score']),
fill=(255, 255, 255, alpha))
im = Image.alpha_composite(im, overlay)
im.show()
def save_detections(path, detections):
print('Writing detections to {}...'.format(os.path.basename(path)))
with open(path, 'w') as f:
json.dump(detections, f)
@contextmanager
def ignore_sigint():
handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
yield
finally:
signal.signal(signal.SIGINT, handler)
class Profiler(object):
def __init__(self, names=['main']):
self.names = names
self.lasts = {k: 0 for k in names}
self.totals = self.lasts.copy()
self.counts = self.lasts.copy()
self.means = self.lasts.copy()
self.reset()
def reset(self):
last = time.time()
for name in self.names:
self.lasts[name] = last
self.totals[name] = 0
self.counts[name] = 0
self.means[name] = 0
def start(self, name='main'):
self.lasts[name] = time.time()
def stop(self, name='main'):
self.totals[name] += time.time() - self.lasts[name]
self.counts[name] += 1
self.means[name] = self.totals[name] / self.counts[name]
def bump(self, name='main'):
self.stop(name)
self.start(name)
def post_metrics(url, metrics):
try:
for k, v in metrics.items():
requests.post(url,
data={'time': int(datetime.now().timestamp() * 1e9),
'metric': k, 'value': v})
except Exception as e:
warnings.warn('Warning: posting metrics failed: {}'.format(e))
| retinanet-examples-main | odtk/utils.py |
from statistics import mean
from math import isfinite
import torch
from torch.optim import SGD, AdamW
from torch.optim.lr_scheduler import LambdaLR
from apex import amp, optimizers
from apex.parallel import DistributedDataParallel as ADDP
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import GradScaler, autocast
from .backbones.layers import convert_fixedbn_model
from .data import DataIterator, RotatedDataIterator
from .dali import DaliDataIterator
from .utils import ignore_sigint, post_metrics, Profiler
from .infer import infer
def train(model, state, path, annotations, val_path, val_annotations, resize, max_size, jitter, batch_size, iterations,
val_iterations, lr, warmup, milestones, gamma, rank=0, world=1, mixed_precision=True, with_apex=False,
use_dali=True, verbose=True, metrics_url=None, logdir=None, rotate_augment=False, augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0, augment_saturation=0.0, regularization_l2=0.0001, rotated_bbox=False,
absolute_angle=False):
'Train the model on the given dataset'
# Prepare model
nn_model = model
stride = model.stride
model = convert_fixedbn_model(model)
if torch.cuda.is_available():
model = model.to(memory_format=torch.channels_last).cuda()
# Setup optimizer and schedule
optimizer = SGD(model.parameters(), lr=lr, weight_decay=regularization_l2, momentum=0.9)
is_master = rank==0
if with_apex:
loss_scale = "dynamic" if use_dali else "128.0"
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2' if mixed_precision else 'O0',
keep_batchnorm_fp32=True,
loss_scale=loss_scale,
verbosity=is_master)
if world > 1:
model = DDP(model, device_ids=[rank]) if not with_apex else ADDP(model)
model.train()
if 'optimizer' in state:
optimizer.load_state_dict(state['optimizer'])
def schedule(train_iter):
if warmup and train_iter <= warmup:
return 0.9 * train_iter / warmup + 0.1
return gamma ** len([m for m in milestones if m <= train_iter])
scheduler = LambdaLR(optimizer, schedule)
if 'scheduler' in state:
scheduler.load_state_dict(state['scheduler'])
# Prepare dataset
if verbose: print('Preparing dataset...')
if rotated_bbox:
if use_dali: raise NotImplementedError("This repo does not currently support DALI for rotated bbox detections.")
data_iterator = RotatedDataIterator(path, jitter, max_size, batch_size, stride,
world, annotations, training=True, rotate_augment=rotate_augment,
augment_brightness=augment_brightness,
augment_contrast=augment_contrast, augment_hue=augment_hue,
augment_saturation=augment_saturation, absolute_angle=absolute_angle)
else:
data_iterator = (DaliDataIterator if use_dali else DataIterator)(
path, jitter, max_size, batch_size, stride,
world, annotations, training=True, rotate_augment=rotate_augment, augment_brightness=augment_brightness,
augment_contrast=augment_contrast, augment_hue=augment_hue, augment_saturation=augment_saturation)
if verbose: print(data_iterator)
if verbose:
print(' device: {} {}'.format(
world, 'cpu' if not torch.cuda.is_available() else 'GPU' if world == 1 else 'GPUs'))
print(' batch: {}, precision: {}'.format(batch_size, 'mixed' if mixed_precision else 'full'))
print(' BBOX type:', 'rotated' if rotated_bbox else 'axis aligned')
print('Training model for {} iterations...'.format(iterations))
# Create TensorBoard writer
if is_master and logdir is not None:
from torch.utils.tensorboard import SummaryWriter
if verbose:
print('Writing TensorBoard logs to: {}'.format(logdir))
writer = SummaryWriter(log_dir=logdir)
scaler = GradScaler(enabled=mixed_precision)
profiler = Profiler(['train', 'fw', 'bw'])
iteration = state.get('iteration', 0)
while iteration < iterations:
cls_losses, box_losses = [], []
for i, (data, target) in enumerate(data_iterator):
if iteration>=iterations:
break
# Forward pass
profiler.start('fw')
optimizer.zero_grad()
if with_apex:
cls_loss, box_loss = model([data.contiguous(memory_format=torch.channels_last), target])
else:
with autocast(enabled=mixed_precision):
cls_loss, box_loss = model([data.contiguous(memory_format=torch.channels_last), target])
del data
profiler.stop('fw')
# Backward pass
profiler.start('bw')
if with_apex:
with amp.scale_loss(cls_loss + box_loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
else:
scaler.scale(cls_loss + box_loss).backward()
scaler.step(optimizer)
scaler.update()
scheduler.step()
# Reduce all losses
cls_loss, box_loss = cls_loss.mean().clone(), box_loss.mean().clone()
if world > 1:
torch.distributed.all_reduce(cls_loss)
torch.distributed.all_reduce(box_loss)
cls_loss /= world
box_loss /= world
if is_master:
cls_losses.append(cls_loss)
box_losses.append(box_loss)
if is_master and not isfinite(cls_loss + box_loss):
raise RuntimeError('Loss is diverging!\n{}'.format(
'Try lowering the learning rate.'))
del cls_loss, box_loss
profiler.stop('bw')
iteration += 1
profiler.bump('train')
if is_master and (profiler.totals['train'] > 60 or iteration == iterations):
focal_loss = torch.stack(list(cls_losses)).mean().item()
box_loss = torch.stack(list(box_losses)).mean().item()
learning_rate = optimizer.param_groups[0]['lr']
if verbose:
msg = '[{:{len}}/{}]'.format(iteration, iterations, len=len(str(iterations)))
msg += ' focal loss: {:.3f}'.format(focal_loss)
msg += ', box loss: {:.3f}'.format(box_loss)
msg += ', {:.3f}s/{}-batch'.format(profiler.means['train'], batch_size)
msg += ' (fw: {:.3f}s, bw: {:.3f}s)'.format(profiler.means['fw'], profiler.means['bw'])
msg += ', {:.1f} im/s'.format(batch_size / profiler.means['train'])
msg += ', lr: {:.2g}'.format(learning_rate)
print(msg, flush=True)
if is_master and logdir is not None:
writer.add_scalar('focal_loss', focal_loss, iteration)
writer.add_scalar('box_loss', box_loss, iteration)
writer.add_scalar('learning_rate', learning_rate, iteration)
del box_loss, focal_loss
if metrics_url:
post_metrics(metrics_url, {
'focal loss': mean(cls_losses),
'box loss': mean(box_losses),
'im_s': batch_size / profiler.means['train'],
'lr': learning_rate
})
# Save model weights
state.update({
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
})
with ignore_sigint():
nn_model.save(state)
profiler.reset()
del cls_losses[:], box_losses[:]
if val_annotations and (iteration == iterations or iteration % val_iterations == 0):
stats = infer(model, val_path, None, resize, max_size, batch_size, annotations=val_annotations,
mixed_precision=mixed_precision, is_master=is_master, world=world, use_dali=use_dali,
with_apex=with_apex, is_validation=True, verbose=False, rotated_bbox=rotated_bbox)
model.train()
if is_master and logdir is not None and stats is not None:
writer.add_scalar(
'Validation_Precision/mAP', stats[0], iteration)
writer.add_scalar(
'Validation_Precision/[email protected]', stats[1], iteration)
writer.add_scalar(
'Validation_Precision/[email protected]', stats[2], iteration)
writer.add_scalar(
'Validation_Precision/mAP (small)', stats[3], iteration)
writer.add_scalar(
'Validation_Precision/mAP (medium)', stats[4], iteration)
writer.add_scalar(
'Validation_Precision/mAP (large)', stats[5], iteration)
writer.add_scalar(
'Validation_Recall/mAR (max 1 Dets)', stats[6], iteration)
writer.add_scalar(
'Validation_Recall/mAR (max 10 Dets)', stats[7], iteration)
writer.add_scalar(
'Validation_Recall/mAR (max 100 Dets)', stats[8], iteration)
writer.add_scalar(
'Validation_Recall/mAR (small)', stats[9], iteration)
writer.add_scalar(
'Validation_Recall/mAR (medium)', stats[10], iteration)
writer.add_scalar(
'Validation_Recall/mAR (large)', stats[11], iteration)
if (iteration==iterations and not rotated_bbox) or (iteration>iterations and rotated_bbox):
break
if is_master and logdir is not None:
writer.close()
| retinanet-examples-main | odtk/train.py |
import os
import json
import tempfile
from contextlib import redirect_stdout
import torch
from apex import amp
from apex.parallel import DistributedDataParallel as ADDP
from torch.nn.parallel import DistributedDataParallel
from pycocotools.cocoeval import COCOeval
import numpy as np
from .data import DataIterator, RotatedDataIterator
from .dali import DaliDataIterator
from .model import Model
from .utils import Profiler, rotate_box
def infer(model, path, detections_file, resize, max_size, batch_size, mixed_precision=True, is_master=True, world=0,
annotations=None, with_apex=False, use_dali=True, is_validation=False, verbose=True, rotated_bbox=False):
'Run inference on images from path'
DDP = DistributedDataParallel if not with_apex else ADDP
backend = 'pytorch' if isinstance(model, Model) or isinstance(model, DDP) else 'tensorrt'
stride = model.module.stride if isinstance(model, DDP) else model.stride
# Create annotations if none was provided
if not annotations:
annotations = tempfile.mktemp('.json')
images = [{'id': i, 'file_name': f} for i, f in enumerate(os.listdir(path))]
json.dump({'images': images}, open(annotations, 'w'))
# TensorRT only supports fixed input sizes, so override input size accordingly
if backend == 'tensorrt': max_size = max(model.input_size)
# Prepare dataset
if verbose: print('Preparing dataset...')
if rotated_bbox:
if use_dali: raise NotImplementedError("This repo does not currently support DALI for rotated bbox.")
data_iterator = RotatedDataIterator(path, resize, max_size, batch_size, stride,
world, annotations, training=False)
else:
data_iterator = (DaliDataIterator if use_dali else DataIterator)(
path, resize, max_size, batch_size, stride,
world, annotations, training=False)
if verbose: print(data_iterator)
# Prepare model
if backend == 'pytorch':
# If we are doing validation during training,
# no need to register model with AMP again
if not is_validation:
if torch.cuda.is_available(): model = model.to(memory_format=torch.channels_last).cuda()
if with_apex:
model = amp.initialize(model, None,
opt_level='O2' if mixed_precision else 'O0',
keep_batchnorm_fp32=True,
verbosity=0)
model.eval()
if verbose:
print(' backend: {}'.format(backend))
print(' device: {} {}'.format(
world, 'cpu' if not torch.cuda.is_available() else 'GPU' if world == 1 else 'GPUs'))
print(' batch: {}, precision: {}'.format(batch_size,
'unknown' if backend == 'tensorrt' else 'mixed' if mixed_precision else 'full'))
print(' BBOX type:', 'rotated' if rotated_bbox else 'axis aligned')
print('Running inference...')
results = []
profiler = Profiler(['infer', 'fw'])
with torch.no_grad():
for i, (data, ids, ratios) in enumerate(data_iterator):
# Forward pass
if backend=='pytorch': data = data.contiguous(memory_format=torch.channels_last)
profiler.start('fw')
scores, boxes, classes = model(data, rotated_bbox) #Need to add model size (B, 3, W, H)
profiler.stop('fw')
results.append([scores, boxes, classes, ids, ratios])
profiler.bump('infer')
if verbose and (profiler.totals['infer'] > 60 or i == len(data_iterator) - 1):
size = len(data_iterator.ids)
msg = '[{:{len}}/{}]'.format(min((i + 1) * batch_size,
size), size, len=len(str(size)))
msg += ' {:.3f}s/{}-batch'.format(profiler.means['infer'], batch_size)
msg += ' (fw: {:.3f}s)'.format(profiler.means['fw'])
msg += ', {:.1f} im/s'.format(batch_size / profiler.means['infer'])
print(msg, flush=True)
profiler.reset()
# Gather results from all devices
if verbose: print('Gathering results...')
results = [torch.cat(r, dim=0) for r in zip(*results)]
if world > 1:
for r, result in enumerate(results):
all_result = [torch.ones_like(result, device=result.device) for _ in range(world)]
torch.distributed.all_gather(list(all_result), result)
results[r] = torch.cat(all_result, dim=0)
if is_master:
# Copy buffers back to host
results = [r.cpu() for r in results]
# Collect detections
detections = []
processed_ids = set()
for scores, boxes, classes, image_id, ratios in zip(*results):
image_id = image_id.item()
if image_id in processed_ids:
continue
processed_ids.add(image_id)
keep = (scores > 0).nonzero(as_tuple=False)
scores = scores[keep].view(-1)
if rotated_bbox:
boxes = boxes[keep, :].view(-1, 6)
boxes[:, :4] /= ratios
else:
boxes = boxes[keep, :].view(-1, 4) / ratios
classes = classes[keep].view(-1).int()
for score, box, cat in zip(scores, boxes, classes):
if rotated_bbox:
x1, y1, x2, y2, sin, cos = box.data.tolist()
theta = np.arctan2(sin, cos)
w = x2 - x1 + 1
h = y2 - y1 + 1
seg = rotate_box([x1, y1, w, h, theta])
else:
x1, y1, x2, y2 = box.data.tolist()
cat = cat.item()
if 'annotations' in data_iterator.coco.dataset:
cat = data_iterator.coco.getCatIds()[cat]
this_det = {
'image_id': image_id,
'score': score.item(),
'category_id': cat}
if rotated_bbox:
this_det['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1, theta]
this_det['segmentation'] = [seg]
else:
this_det['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1]
detections.append(this_det)
if detections:
# Save detections
if detections_file and verbose: print('Writing {}...'.format(detections_file))
detections = {'annotations': detections}
detections['images'] = data_iterator.coco.dataset['images']
if 'categories' in data_iterator.coco.dataset:
detections['categories'] = data_iterator.coco.dataset['categories']
if detections_file:
for d_file in detections_file:
json.dump(detections, open(d_file, 'w'), indent=4)
# Evaluate model on dataset
if 'annotations' in data_iterator.coco.dataset:
if verbose: print('Evaluating model...')
with redirect_stdout(None):
coco_pred = data_iterator.coco.loadRes(detections['annotations'])
if rotated_bbox:
coco_eval = COCOeval(data_iterator.coco, coco_pred, 'segm')
else:
coco_eval = COCOeval(data_iterator.coco, coco_pred, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats # mAP and mAR
else:
print('No detections!')
return None
return 0
| retinanet-examples-main | odtk/infer.py |
#!/usr/bin/env python3
import sys
import os
import argparse
import random
import torch.cuda
import torch.distributed
import torch.multiprocessing
from odtk import infer, train, utils
from odtk.model import Model
from odtk._C import Engine
def parse(args):
parser = argparse.ArgumentParser(description='ODTK: Object Detection Toolkit.')
parser.add_argument('--master', metavar='address:port', type=str, help='Address and port of the master worker',
default='127.0.0.1:29500')
subparsers = parser.add_subparsers(help='sub-command', dest='command')
subparsers.required = True
devcount = max(1, torch.cuda.device_count())
parser_train = subparsers.add_parser('train', help='train a network')
parser_train.add_argument('model', type=str, help='path to output model or checkpoint to resume from')
parser_train.add_argument('--annotations', metavar='path', type=str, help='path to COCO style annotations',
required=True)
parser_train.add_argument('--images', metavar='path', type=str, help='path to images', default='.')
parser_train.add_argument('--backbone', action='store', type=str, nargs='+', help='backbone model (or list of)',
default=['ResNet50FPN'])
parser_train.add_argument('--classes', metavar='num', type=int, help='number of classes', default=80)
parser_train.add_argument('--batch', metavar='size', type=int, help='batch size', default=2 * devcount)
parser_train.add_argument('--resize', metavar='scale', type=int, help='resize to given size', default=800)
parser_train.add_argument('--max-size', metavar='max', type=int, help='maximum resizing size', default=1333)
parser_train.add_argument('--jitter', metavar='min max', type=int, nargs=2, help='jitter size within range',
default=[640, 1024])
parser_train.add_argument('--iters', metavar='number', type=int, help='number of iterations to train for',
default=90000)
parser_train.add_argument('--milestones', action='store', type=int, nargs='*',
help='list of iteration indices where learning rate decays', default=[60000, 80000])
parser_train.add_argument('--schedule', metavar='scale', type=float,
help='scale schedule (affecting iters and milestones)', default=1)
parser_train.add_argument('--full-precision', help='train in full precision', action='store_true')
parser_train.add_argument('--lr', metavar='value', help='learning rate', type=float, default=0.01)
parser_train.add_argument('--warmup', metavar='iterations', help='numer of warmup iterations', type=int,
default=1000)
parser_train.add_argument('--gamma', metavar='value', type=float,
help='multiplicative factor of learning rate decay', default=0.1)
parser_train.add_argument('--override', help='override model', action='store_true')
parser_train.add_argument('--val-annotations', metavar='path', type=str,
help='path to COCO style validation annotations')
parser_train.add_argument('--val-images', metavar='path', type=str, help='path to validation images')
parser_train.add_argument('--post-metrics', metavar='url', type=str, help='post metrics to specified url')
parser_train.add_argument('--fine-tune', metavar='path', type=str, help='fine tune a pretrained model')
parser_train.add_argument('--logdir', metavar='logdir', type=str, help='directory where to write logs')
parser_train.add_argument('--val-iters', metavar='number', type=int,
help='number of iterations between each validation', default=8000)
parser_train.add_argument('--with-apex', help='use NVIDIA APEX AMP and DDP', action='store_true')
parser_train.add_argument('--with-dali', help='use dali for data loading', action='store_true')
parser_train.add_argument('--augment-rotate', help='use four-fold rotational augmentation', action='store_true')
parser_train.add_argument('--augment-free-rotate', type=float, metavar='value value', nargs=2, default=[0, 0],
help='rotate images by an arbitrary angle, between min and max (in degrees)')
parser_train.add_argument('--augment-brightness', metavar='value', type=float,
help='adjust the brightness of the image.', default=0.002)
parser_train.add_argument('--augment-contrast', metavar='value', type=float,
help='adjust the contrast of the image.', default=0.002)
parser_train.add_argument('--augment-hue', metavar='value', type=float,
help='adjust the hue of the image.', default=0.0002)
parser_train.add_argument('--augment-saturation', metavar='value', type=float,
help='adjust the saturation of the image.', default=0.002)
parser_train.add_argument('--regularization-l2', metavar='value', type=float, help='L2 regularization for optim',
default=0.0001)
parser_train.add_argument('--rotated-bbox', help='detect rotated bounding boxes [x, y, w, h, theta]',
action='store_true')
parser_train.add_argument('--anchor-ious', metavar='value value', type=float, nargs=2,
help='anchor/bbox overlap threshold', default=[0.4, 0.5])
parser_train.add_argument('--absolute-angle', help='regress absolute angle (rather than -45 to 45 degrees.',
action='store_true')
parser_infer = subparsers.add_parser('infer', help='run inference')
parser_infer.add_argument('model', type=str, help='path to model')
parser_infer.add_argument('--images', metavar='path', type=str, help='path to images', default='.')
parser_infer.add_argument('--annotations', metavar='annotations', type=str,
help='evaluate using provided annotations')
parser_infer.add_argument('--output', metavar='file', type=str, nargs='+',
help='save detections to specified JSON file(s)', default=['detections.json'])
parser_infer.add_argument('--batch', metavar='size', type=int, help='batch size', default=2 * devcount)
parser_infer.add_argument('--resize', metavar='scale', type=int, help='resize to given size', default=800)
parser_infer.add_argument('--max-size', metavar='max', type=int, help='maximum resizing size', default=1333)
parser_infer.add_argument('--with-apex', help='use NVIDIA APEX AMP and DDP', action='store_true')
parser_infer.add_argument('--with-dali', help='use dali for data loading', action='store_true')
parser_infer.add_argument('--full-precision', help='inference in full precision', action='store_true')
parser_infer.add_argument('--rotated-bbox', help='inference using a rotated bounding box model',
action='store_true')
parser_export = subparsers.add_parser('export', help='export a model into a TensorRT engine')
parser_export.add_argument('model', type=str, help='path to model')
parser_export.add_argument('export', type=str, help='path to exported output')
parser_export.add_argument('--size', metavar='height width', type=int, nargs='+',
help='input size (square) or sizes (h w) to use when generating TensorRT engine',
default=[1280])
parser_export.add_argument('--full-precision', help='export in full instead of half precision', action='store_true')
parser_export.add_argument('--int8', help='calibrate model and export in int8 precision', action='store_true')
parser_export.add_argument('--calibration-batches', metavar='size', type=int,
help='number of batches to use for int8 calibration', default=2)
parser_export.add_argument('--calibration-images', metavar='path', type=str,
help='path to calibration images to use for int8 calibration', default="")
parser_export.add_argument('--calibration-table', metavar='path', type=str,
help='path of existing calibration table to load from, or name of new calibration table',
default="")
parser_export.add_argument('--verbose', help='enable verbose logging', action='store_true')
parser_export.add_argument('--rotated-bbox', help='inference using a rotated bounding box model',
action='store_true')
parser_export.add_argument('--dynamic-batch-opts', help='Profile batch sizes for tensorrt engine export (min, opt, max)',
metavar='value value value', type=int, nargs=3, default=[1,8,16])
return parser.parse_args(args)
def load_model(args, verbose=False):
if args.command != 'train' and not os.path.isfile(args.model):
raise RuntimeError('Model file {} does not exist!'.format(args.model))
model = None
state = {}
_, ext = os.path.splitext(args.model)
if args.command == 'train' and (not os.path.exists(args.model) or args.override):
if verbose: print('Initializing model...')
model = Model(backbones=args.backbone, classes=args.classes, rotated_bbox=args.rotated_bbox,
anchor_ious=args.anchor_ious)
model.initialize(args.fine_tune)
# Freeze unused params from training
model.freeze_unused_params()
if verbose: print(model)
elif ext == '.pth' or ext == '.torch':
if verbose: print('Loading model from {}...'.format(os.path.basename(args.model)))
model, state = Model.load(filename=args.model, rotated_bbox=args.rotated_bbox)
# Freeze unused params from training
model.freeze_unused_params()
if verbose: print(model)
elif args.command == 'infer' and ext in ['.engine', '.plan']:
model = None
else:
raise RuntimeError('Invalid model format "{}"!'.format(ext))
state['path'] = args.model
return model, state
def worker(rank, args, world, model, state):
'Per-device distributed worker'
if torch.cuda.is_available():
os.environ.update({
'MASTER_PORT': args.master.split(':')[-1],
'MASTER_ADDR': ':'.join(args.master.split(':')[:-1]),
'WORLD_SIZE': str(world),
'RANK': str(rank),
'CUDA_DEVICE': str(rank)
})
torch.cuda.set_device(rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
if (args.command != 'export') and (args.batch % world != 0):
raise RuntimeError('Batch size should be a multiple of the number of GPUs')
if model and model.angles is not None:
args.rotated_bbox = True
if args.command == 'train':
train.train(model, state, args.images, args.annotations,
args.val_images or args.images, args.val_annotations, args.resize, args.max_size, args.jitter,
args.batch, int(args.iters * args.schedule), args.val_iters, args.lr, args.warmup,
[int(m * args.schedule) for m in args.milestones], args.gamma, rank, world=world,
mixed_precision=not args.full_precision, with_apex=args.with_apex, use_dali=args.with_dali,
metrics_url=args.post_metrics, logdir=args.logdir, verbose=(rank == 0),
rotate_augment=args.augment_rotate, augment_brightness=args.augment_brightness,
augment_contrast=args.augment_contrast, augment_hue=args.augment_hue, augment_saturation=args.augment_saturation,
regularization_l2=args.regularization_l2, rotated_bbox=args.rotated_bbox, absolute_angle=args.absolute_angle)
elif args.command == 'infer':
if model is None:
if rank == 0: print('Loading CUDA engine from {}...'.format(os.path.basename(args.model)))
model = Engine.load(args.model)
infer.infer(model, args.images, args.output, args.resize, args.max_size, args.batch,
annotations=args.annotations, mixed_precision=not args.full_precision,
is_master=(rank == 0), world=world, with_apex=args.with_apex, use_dali=args.with_dali,
verbose=(rank == 0), rotated_bbox=args.rotated_bbox)
elif args.command == 'export':
onnx_only = args.export.split('.')[-1] == 'onnx'
input_size = args.size * 2 if len(args.size) == 1 else args.size
calibration_files = []
if args.int8:
# Get list of images to use for calibration
if os.path.isdir(args.calibration_images):
import glob
file_extensions = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']
for ex in file_extensions:
calibration_files += glob.glob("{}/*{}".format(args.calibration_images, ex), recursive=True)
# Only need enough images for specified num of calibration batches
if len(calibration_files) >= args.calibration_batches * args.dynamic_batch_opts[1]:
calibration_files = calibration_files[:(args.calibration_batches * args.dynamic_batch_opts[1])]
else:
# Number of images for calibration must be greater than or equal to the kOPT optimization profile
if len(calibration_files) >= args.dynamic_batch_opts[1]:
print('Only found enough images for {} batches. Continuing anyway...'.format(
len(calibration_files) // args.dynamic_batch_opts[1]))
else:
raise RuntimeError('Not enough images found for calibration. ({} < {})'
.format(len(calibration_files), args.dynamic_batch_opts[1]))
random.shuffle(calibration_files)
precision = "FP32"
if args.int8:
precision = "INT8"
elif not args.full_precision:
precision = "FP16"
exported = model.export(input_size, args.dynamic_batch_opts, precision, calibration_files,
args.calibration_table, args.verbose, onnx_only=onnx_only)
if onnx_only:
with open(args.export, 'wb') as out:
out.write(exported)
else:
exported.save(args.export)
def main(args=None):
'Entry point for the odtk command'
args = parse(args or sys.argv[1:])
model, state = load_model(args, verbose=True)
if model: model.share_memory()
world = torch.cuda.device_count()
if args.command == 'export' or world <= 1:
worker(0, args, 1, model, state)
else:
torch.multiprocessing.spawn(worker, args=(args, world, model, state), nprocs=world)
if __name__ == '__main__':
main()
| retinanet-examples-main | odtk/main.py |
import os
import random
from contextlib import redirect_stdout
from PIL import Image
import torch
import torch.nn.functional as F
from torch.utils import data
from pycocotools.coco import COCO
import math
from torchvision.transforms.functional import adjust_brightness, adjust_contrast, adjust_hue, adjust_saturation
class CocoDataset(data.dataset.Dataset):
'Dataset looping through a set of images'
def __init__(self, path, resize, max_size, stride, annotations=None, training=False, rotate_augment=False,
augment_brightness=0.0, augment_contrast=0.0,
augment_hue=0.0, augment_saturation=0.0):
super().__init__()
self.path = os.path.expanduser(path)
self.resize = resize
self.max_size = max_size
self.stride = stride
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.training = training
self.rotate_augment = rotate_augment
self.augment_brightness = augment_brightness
self.augment_contrast = augment_contrast
self.augment_hue = augment_hue
self.augment_saturation = augment_saturation
with redirect_stdout(None):
self.coco = COCO(annotations)
self.ids = list(self.coco.imgs.keys())
if 'categories' in self.coco.dataset:
self.categories_inv = {k: i for i, k in enumerate(self.coco.getCatIds())}
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
' Get sample'
# Load image
id = self.ids[index]
if self.coco:
image = self.coco.loadImgs(id)[0]['file_name']
im = Image.open('{}/{}'.format(self.path, image)).convert("RGB")
# Randomly sample scale for resize during training
resize = self.resize
if isinstance(resize, list):
resize = random.randint(self.resize[0], self.resize[-1])
ratio = resize / min(im.size)
if ratio * max(im.size) > self.max_size:
ratio = self.max_size / max(im.size)
im = im.resize((int(ratio * d) for d in im.size), Image.BILINEAR)
if self.training:
# Get annotations
boxes, categories = self._get_target(id)
boxes *= ratio
# Random rotation, if self.rotate_augment
random_angle = random.randint(0, 3) * 90
if self.rotate_augment and random_angle != 0:
# rotate by random_angle degrees.
im = im.rotate(random_angle)
x, y, w, h = boxes[:, 0].clone(), boxes[:, 1].clone(), boxes[:, 2].clone(), boxes[:, 3].clone()
if random_angle == 90:
boxes[:, 0] = y - im.size[1] / 2 + im.size[0] / 2
boxes[:, 1] = im.size[0] / 2 + im.size[1] / 2 - x - w
boxes[:, 2] = h
boxes[:, 3] = w
elif random_angle == 180:
boxes[:, 0] = im.size[0] - x - w
boxes[:, 1] = im.size[1] - y - h
elif random_angle == 270:
boxes[:, 0] = im.size[0] / 2 + im.size[1] / 2 - y - h
boxes[:, 1] = x - im.size[0] / 2 + im.size[1] / 2
boxes[:, 2] = h
boxes[:, 3] = w
# Random horizontal flip
if random.randint(0, 1):
im = im.transpose(Image.FLIP_LEFT_RIGHT)
boxes[:, 0] = im.size[0] - boxes[:, 0] - boxes[:, 2]
# Apply image brightness, contrast etc augmentation
if self.augment_brightness:
brightness_factor = random.normalvariate(1, self.augment_brightness)
brightness_factor = max(0, brightness_factor)
im = adjust_brightness(im, brightness_factor)
if self.augment_contrast:
contrast_factor = random.normalvariate(1, self.augment_contrast)
contrast_factor = max(0, contrast_factor)
im = adjust_contrast(im, contrast_factor)
if self.augment_hue:
hue_factor = random.normalvariate(0, self.augment_hue)
hue_factor = max(-0.5, hue_factor)
hue_factor = min(0.5, hue_factor)
im = adjust_hue(im, hue_factor)
if self.augment_saturation:
saturation_factor = random.normalvariate(1, self.augment_saturation)
saturation_factor = max(0, saturation_factor)
im = adjust_saturation(im, saturation_factor)
target = torch.cat([boxes, categories], dim=1)
# Convert to tensor and normalize
data = torch.ByteTensor(torch.ByteStorage.from_buffer(im.tobytes()))
data = data.float().div(255).view(*im.size[::-1], len(im.mode))
data = data.permute(2, 0, 1)
for t, mean, std in zip(data, self.mean, self.std):
t.sub_(mean).div_(std)
# Apply padding
pw, ph = ((self.stride - d % self.stride) % self.stride for d in im.size)
data = F.pad(data, (0, pw, 0, ph))
if self.training:
return data, target
return data, id, ratio
def _get_target(self, id):
'Get annotations for sample'
ann_ids = self.coco.getAnnIds(imgIds=id)
annotations = self.coco.loadAnns(ann_ids)
boxes, categories = [], []
for ann in annotations:
if ann['bbox'][2] < 1 and ann['bbox'][3] < 1:
continue
boxes.append(ann['bbox'])
cat = ann['category_id']
if 'categories' in self.coco.dataset:
cat = self.categories_inv[cat]
categories.append(cat)
if boxes:
target = (torch.FloatTensor(boxes),
torch.FloatTensor(categories).unsqueeze(1))
else:
target = (torch.ones([1, 4]), torch.ones([1, 1]) * -1)
return target
def collate_fn(self, batch):
'Create batch from multiple samples'
if self.training:
data, targets = zip(*batch)
max_det = max([t.size()[0] for t in targets])
targets = [torch.cat([t, torch.ones([max_det - t.size()[0], 5]) * -1]) for t in targets]
targets = torch.stack(targets, 0)
else:
data, indices, ratios = zip(*batch)
# Pad data to match max batch dimensions
sizes = [d.size()[-2:] for d in data]
w, h = (max(dim) for dim in zip(*sizes))
data_stack = []
for datum in data:
pw, ph = w - datum.size()[-2], h - datum.size()[-1]
data_stack.append(
F.pad(datum, (0, ph, 0, pw)) if max(ph, pw) > 0 else datum)
data = torch.stack(data_stack)
if self.training:
return data, targets
ratios = torch.FloatTensor(ratios).view(-1, 1, 1)
return data, torch.IntTensor(indices), ratios
class DataIterator():
'Data loader for data parallel'
def __init__(self, path, resize, max_size, batch_size, stride, world, annotations, training=False,
rotate_augment=False, augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0, augment_saturation=0.0):
self.resize = resize
self.max_size = max_size
self.dataset = CocoDataset(path, resize=resize, max_size=max_size,
stride=stride, annotations=annotations, training=training,
rotate_augment=rotate_augment,
augment_brightness=augment_brightness,
augment_contrast=augment_contrast, augment_hue=augment_hue,
augment_saturation=augment_saturation)
self.ids = self.dataset.ids
self.coco = self.dataset.coco
self.sampler = data.distributed.DistributedSampler(self.dataset) if world > 1 else None
self.dataloader = data.DataLoader(self.dataset, batch_size=batch_size // world,
sampler=self.sampler, collate_fn=self.dataset.collate_fn, num_workers=2,
pin_memory=True)
def __repr__(self):
return '\n'.join([
' loader: pytorch',
' resize: {}, max: {}'.format(self.resize, self.max_size),
])
def __len__(self):
return len(self.dataloader)
def __iter__(self):
for output in self.dataloader:
if self.dataset.training:
data, target = output
else:
data, ids, ratio = output
if torch.cuda.is_available():
data = data.cuda(non_blocking=True)
if self.dataset.training:
if torch.cuda.is_available():
target = target.cuda(non_blocking=True)
yield data, target
else:
if torch.cuda.is_available():
ids = ids.cuda(non_blocking=True)
ratio = ratio.cuda(non_blocking=True)
yield data, ids, ratio
class RotatedCocoDataset(data.dataset.Dataset):
'Dataset looping through a set of images'
def __init__(self, path, resize, max_size, stride, annotations=None, training=False, rotate_augment=False,
augment_brightness=0.0, augment_contrast=0.0,
augment_hue=0.0, augment_saturation=0.0, absolute_angle=False):
super().__init__()
self.path = os.path.expanduser(path)
self.resize = resize
self.max_size = max_size
self.stride = stride
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.training = training
self.rotate_augment = rotate_augment
self.augment_brightness = augment_brightness
self.augment_contrast = augment_contrast
self.augment_hue = augment_hue
self.augment_saturation = augment_saturation
self.absolute_angle=absolute_angle
with redirect_stdout(None):
self.coco = COCO(annotations)
self.ids = list(self.coco.imgs.keys())
if 'categories' in self.coco.dataset:
self.categories_inv = {k: i for i, k in enumerate(self.coco.getCatIds())}
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
' Get sample'
# Load image
id = self.ids[index]
if self.coco:
image = self.coco.loadImgs(id)[0]['file_name']
im = Image.open('{}/{}'.format(self.path, image)).convert("RGB")
# Randomly sample scale for resize during training
resize = self.resize
if isinstance(resize, list):
resize = random.randint(self.resize[0], self.resize[-1])
ratio = resize / min(im.size)
if ratio * max(im.size) > self.max_size:
ratio = self.max_size / max(im.size)
im = im.resize((int(ratio * d) for d in im.size), Image.BILINEAR)
if self.training:
# Get annotations
boxes, categories = self._get_target(id)
# boxes *= ratio
boxes[:, :4] *= ratio
# Random rotation, if self.rotate_augment
random_angle = random.randint(0, 3) * 90
if self.rotate_augment and random_angle != 0:
# rotate by random_angle degrees.
original_size = im.size
im = im.rotate(random_angle, expand=True)
x, y, w, h, t = boxes[:, 0].clone(), boxes[:, 1].clone(), boxes[:, 2].clone(), \
boxes[:, 3].clone(), boxes[:, 4].clone()
if random_angle == 90:
boxes[:, 0] = y
boxes[:, 1] = original_size[0] - x - w
if not self.absolute_angle:
boxes[:, 2] = h
boxes[:, 3] = w
elif random_angle == 180:
boxes[:, 0] = original_size[0] - x - w
boxes[:, 1] = original_size[1] - y - h
elif random_angle == 270:
boxes[:, 0] = original_size[1] - y - h
boxes[:, 1] = x
if not self.absolute_angle:
boxes[:, 2] = h
boxes[:, 3] = w
pass
# Adjust theta
if self.absolute_angle:
# This is only needed in absolute angle mode.
t += math.radians(random_angle)
rem = torch.remainder(torch.abs(t), math.pi)
sign = torch.sign(t)
t = rem * sign
boxes[:, 4] = t
# Random horizontal flip
if random.randint(0, 1):
im = im.transpose(Image.FLIP_LEFT_RIGHT)
boxes[:, 0] = im.size[0] - boxes[:, 0] - boxes[:, 2]
boxes[:, 1] = boxes[:, 1]
boxes[:, 4] = -boxes[:, 4]
# Apply image brightness, contrast etc augmentation
if self.augment_brightness:
brightness_factor = random.normalvariate(1, self.augment_brightness)
brightness_factor = max(0, brightness_factor)
im = adjust_brightness(im, brightness_factor)
if self.augment_contrast:
contrast_factor = random.normalvariate(1, self.augment_contrast)
contrast_factor = max(0, contrast_factor)
im = adjust_contrast(im, contrast_factor)
if self.augment_hue:
hue_factor = random.normalvariate(0, self.augment_hue)
hue_factor = max(-0.5, hue_factor)
hue_factor = min(0.5, hue_factor)
im = adjust_hue(im, hue_factor)
if self.augment_saturation:
saturation_factor = random.normalvariate(1, self.augment_saturation)
saturation_factor = max(0, saturation_factor)
im = adjust_saturation(im, saturation_factor)
target = torch.cat([boxes, categories], dim=1)
# Convert to tensor and normalize
data = torch.ByteTensor(torch.ByteStorage.from_buffer(im.tobytes()))
data = data.float().div(255).view(*im.size[::-1], len(im.mode))
data = data.permute(2, 0, 1)
for t, mean, std in zip(data, self.mean, self.std):
t.sub_(mean).div_(std)
# Apply padding
pw, ph = ((self.stride - d % self.stride) % self.stride for d in im.size)
data = F.pad(data, (0, pw, 0, ph))
if self.training:
return data, target
return data, id, ratio
def _get_target(self, id):
'Get annotations for sample'
ann_ids = self.coco.getAnnIds(imgIds=id)
annotations = self.coco.loadAnns(ann_ids)
boxes, categories = [], []
for ann in annotations:
if ann['bbox'][2] < 1 and ann['bbox'][3] < 1:
continue
final_bbox = ann['bbox']
if len(final_bbox) == 4:
final_bbox.append(0.0) # add theta of zero.
assert len(ann['bbox']) == 5, "Bounding box for id %i does not contain five entries." % id
boxes.append(final_bbox)
cat = ann['category_id']
if 'categories' in self.coco.dataset:
cat = self.categories_inv[cat]
categories.append(cat)
if boxes:
target = (torch.FloatTensor(boxes),
torch.FloatTensor(categories).unsqueeze(1))
else:
target = (torch.ones([1, 5]), torch.ones([1, 1]) * -1)
return target
def collate_fn(self, batch):
'Create batch from multiple samples'
if self.training:
data, targets = zip(*batch)
max_det = max([t.size()[0] for t in targets])
targets = [torch.cat([t, torch.ones([max_det - t.size()[0], 6]) * -1]) for t in targets]
targets = torch.stack(targets, 0)
else:
data, indices, ratios = zip(*batch)
# Pad data to match max batch dimensions
sizes = [d.size()[-2:] for d in data]
w, h = (max(dim) for dim in zip(*sizes))
data_stack = []
for datum in data:
pw, ph = w - datum.size()[-2], h - datum.size()[-1]
data_stack.append(
F.pad(datum, (0, ph, 0, pw)) if max(ph, pw) > 0 else datum)
data = torch.stack(data_stack)
if self.training:
return data, targets
ratios = torch.FloatTensor(ratios).view(-1, 1, 1)
return data, torch.IntTensor(indices), ratios
class RotatedDataIterator():
'Data loader for data parallel'
def __init__(self, path, resize, max_size, batch_size, stride, world, annotations, training=False,
rotate_augment=False, augment_brightness=0.0,
augment_contrast=0.0, augment_hue=0.0, augment_saturation=0.0, absolute_angle=False
):
self.resize = resize
self.max_size = max_size
self.dataset = RotatedCocoDataset(path, resize=resize, max_size=max_size,
stride=stride, annotations=annotations, training=training,
rotate_augment=rotate_augment,
augment_brightness=augment_brightness,
augment_contrast=augment_contrast, augment_hue=augment_hue,
augment_saturation=augment_saturation, absolute_angle=absolute_angle)
self.ids = self.dataset.ids
self.coco = self.dataset.coco
self.sampler = data.distributed.DistributedSampler(self.dataset) if world > 1 else None
self.dataloader = data.DataLoader(self.dataset, batch_size=batch_size // world,
sampler=self.sampler, collate_fn=self.dataset.collate_fn, num_workers=2,
pin_memory=True)
def __repr__(self):
return '\n'.join([
' loader: pytorch',
' resize: {}, max: {}'.format(self.resize, self.max_size),
])
def __len__(self):
return len(self.dataloader)
def __iter__(self):
for output in self.dataloader:
if self.dataset.training:
data, target = output
else:
data, ids, ratio = output
if torch.cuda.is_available():
data = data.cuda(non_blocking=True)
if self.dataset.training:
if torch.cuda.is_available():
target = target.cuda(non_blocking=True)
yield data, target
else:
if torch.cuda.is_available():
ids = ids.cuda(non_blocking=True)
ratio = ratio.cuda(non_blocking=True)
yield data, ids, ratio
| retinanet-examples-main | odtk/data.py |
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet as vrn
from torchvision.models import mobilenet as vmn
from .resnet import ResNet
from .mobilenet import MobileNet
from .utils import register
class FPN(nn.Module):
'Feature Pyramid Network - https://arxiv.org/abs/1612.03144'
def __init__(self, features):
super().__init__()
self.stride = 128
self.features = features
if isinstance(features, ResNet):
is_light = features.bottleneck == vrn.BasicBlock
channels = [128, 256, 512] if is_light else [512, 1024, 2048]
elif isinstance(features, MobileNet):
channels = [32, 96, 320]
self.lateral3 = nn.Conv2d(channels[0], 256, 1)
self.lateral4 = nn.Conv2d(channels[1], 256, 1)
self.lateral5 = nn.Conv2d(channels[2], 256, 1)
self.pyramid6 = nn.Conv2d(channels[2], 256, 3, stride=2, padding=1)
self.pyramid7 = nn.Conv2d(256, 256, 3, stride=2, padding=1)
self.smooth3 = nn.Conv2d(256, 256, 3, padding=1)
self.smooth4 = nn.Conv2d(256, 256, 3, padding=1)
self.smooth5 = nn.Conv2d(256, 256, 3, padding=1)
def initialize(self):
def init_layer(layer):
if isinstance(layer, nn.Conv2d):
nn.init.xavier_uniform_(layer.weight)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
self.apply(init_layer)
self.features.initialize()
def forward(self, x):
c3, c4, c5 = self.features(x)
p5 = self.lateral5(c5)
p4 = self.lateral4(c4)
p4 = F.interpolate(p5, scale_factor=2) + p4
p3 = self.lateral3(c3)
p3 = F.interpolate(p4, scale_factor=2) + p3
p6 = self.pyramid6(c5)
p7 = self.pyramid7(F.relu(p6))
p3 = self.smooth3(p3)
p4 = self.smooth4(p4)
p5 = self.smooth5(p5)
return [p3, p4, p5, p6, p7]
@register
def ResNet18FPN():
return FPN(ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[3, 4, 5], url=vrn.model_urls['resnet18']))
@register
def ResNet34FPN():
return FPN(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[3, 4, 5], url=vrn.model_urls['resnet34']))
@register
def ResNet50FPN():
return FPN(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet50']))
@register
def ResNet101FPN():
return FPN(ResNet(layers=[3, 4, 23, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet101']))
@register
def ResNet152FPN():
return FPN(ResNet(layers=[3, 8, 36, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], url=vrn.model_urls['resnet152']))
@register
def ResNeXt50_32x4dFPN():
return FPN(ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], groups=32, width_per_group=4, url=vrn.model_urls['resnext50_32x4d']))
@register
def ResNeXt101_32x8dFPN():
return FPN(ResNet(layers=[3, 4, 23, 3], bottleneck=vrn.Bottleneck, outputs=[3, 4, 5], groups=32, width_per_group=8, url=vrn.model_urls['resnext101_32x8d']))
@register
def MobileNetV2FPN():
return FPN(MobileNet(outputs=[6, 13, 17], url=vmn.model_urls['mobilenet_v2']))
| retinanet-examples-main | odtk/backbones/fpn.py |
import sys
from .resnet import *
from .mobilenet import *
from .fpn import *
| retinanet-examples-main | odtk/backbones/__init__.py |
import torch.nn as nn
from torchvision.models import mobilenet as vmn
import torch.utils.model_zoo as model_zoo
class MobileNet(vmn.MobileNetV2):
'MobileNetV2: Inverted Residuals and Linear Bottlenecks - https://arxiv.org/abs/1801.04381'
def __init__(self, outputs=[18], url=None):
self.stride = 128
self.url = url
super().__init__()
self.outputs = outputs
self.unused_modules = ['features.18', 'classifier']
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url))
def forward(self, x):
outputs = []
for indx, feat in enumerate(self.features[:-1]):
x = feat(x)
if indx in self.outputs:
outputs.append(x)
return outputs
| retinanet-examples-main | odtk/backbones/mobilenet.py |
import torchvision
from torchvision.models import resnet as vrn
import torch.utils.model_zoo as model_zoo
from .utils import register
class ResNet(vrn.ResNet):
'Deep Residual Network - https://arxiv.org/abs/1512.03385'
def __init__(self, layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[5], groups=1, width_per_group=64, url=None):
self.stride = 128
self.bottleneck = bottleneck
self.outputs = outputs
self.url = url
kwargs = {'block': bottleneck, 'layers': layers, 'groups': groups, 'width_per_group': width_per_group}
super().__init__(**kwargs)
self.unused_modules = ['fc']
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outputs = []
for i, layer in enumerate([self.layer1, self.layer2, self.layer3, self.layer4]):
level = i + 2
if level > max(self.outputs):
break
x = layer(x)
if level in self.outputs:
outputs.append(x)
return outputs
@register
def ResNet18C4():
return ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet18'])
@register
def ResNet34C4():
return ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet34'])
| retinanet-examples-main | odtk/backbones/resnet.py |
import sys
import torchvision
def register(f):
all = sys.modules[f.__module__].__dict__.setdefault('__all__', [])
if f.__name__ in all:
raise RuntimeError('{} already exist!'.format(f.__name__))
all.append(f.__name__)
return f
| retinanet-examples-main | odtk/backbones/utils.py |
import torch
from torch import nn
import torch.nn.functional as F
class FixedBatchNorm2d(nn.Module):
'BatchNorm2d where the batch statistics and the affine parameters are fixed'
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
return F.batch_norm(x, running_mean=self.running_mean, running_var=self.running_var, weight=self.weight, bias=self.bias)
def convert_fixedbn_model(module):
'Convert batch norm layers to fixed'
mod = module
if isinstance(module, nn.BatchNorm2d):
mod = FixedBatchNorm2d(module.num_features)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_fixedbn_model(child))
return mod
| retinanet-examples-main | odtk/backbones/layers.py |
import numpy as np
from odtk.box import generate_anchors, generate_anchors_rotated
# Generates anchors for export.cpp
# ratios = [1.0, 2.0, 0.5]
# scales = [4 * 2 ** (i / 3) for i in range(3)]
ratios = [0.25, 0.5, 1.0, 2.0, 4.0]
scales = [2 * 2**(2 * i/3) for i in range(3)]
angles = [-np.pi / 6, 0, np.pi / 6]
strides = [2**i for i in range(3,8)]
axis = str(np.round([generate_anchors(stride, ratios, scales,
angles).view(-1).tolist() for stride in strides], decimals=2).tolist()
).replace('[', '{').replace(']', '}').replace('}, ', '},\n')
rot = str(np.round([generate_anchors_rotated(stride, ratios, scales,
angles)[0].view(-1).tolist() for stride in strides], decimals=2).tolist()
).replace('[', '{').replace(']', '}').replace('}, ', '},\n')
print("Axis-aligned:\n"+axis+'\n')
print("Rotated:\n"+rot)
| retinanet-examples-main | extras/cppapi/generate_anchors.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.examples.main import main
main()
| spark-rapids-examples-main | examples/XGBoost-Examples/main.py |
spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/nvidia/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/nvidia/spark/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/nvidia/spark/examples/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/nvidia/spark/examples/agaricus/__init__.py |
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.types import *
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
from xgboost.spark import SparkXGBClassifier, SparkXGBClassifierModel
label = 'label'
feature_names = ['feature_' + str(i) for i in range(0, 126)]
schema = StructType([StructField(x, FloatType()) for x in [label] + feature_names])
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
train_data, eval_data, trans_data = valid_input_data(spark, args, '', schema)
if args.mode in ['all', 'train']:
if train_data is None:
print('-' * 80)
print('Usage: train data path required when mode is all or train')
print('-' * 80)
exit(1)
train_data, features = transform_data(train_data, label, args.use_gpu)
xgboost_args['features_col'] = features
xgboost_args['label_col'] = label
classifier = SparkXGBClassifier(**xgboost_args)
if eval_data:
# TODO
pass
model = with_benchmark('Training', lambda: classifier.fit(train_data))
if args.modelPath:
writer = model.write().overwrite() if args.overwrite else model
writer.save(args.modelPath)
else:
model = SparkXGBClassifierModel.load(args.modelPath)
if args.mode in ['all', 'transform']:
if trans_data is None:
print('-' * 80)
print('Usage: trans data path required when mode is all or transform')
print('-' * 80)
exit(1)
trans_data, _ = transform_data(trans_data, label, args.use_gpu)
def transform():
result = model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
show_sample(args, result, label)
with_benchmark('Evaluation', lambda: check_classification_accuracy(result, label))
spark.stop()
| spark-rapids-examples-main | examples/XGBoost-Examples/agaricus/python/com/nvidia/spark/examples/agaricus/main.py |
spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/__init__.py |
|
#
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.functions import col
def pre_process(data_frame):
processes = [
drop_useless,
encode_categories,
fill_na,
remove_invalid,
convert_datetime,
add_h_distance,
]
for process in processes:
data_frame = process(data_frame)
return data_frame
def drop_useless(data_frame):
return data_frame.drop(
'dropoff_datetime',
'payment_type',
'surcharge',
'mta_tax',
'tip_amount',
'tolls_amount',
'total_amount')
def encode_categories(data_frame):
categories = [ 'vendor_id', 'rate_code', 'store_and_fwd_flag' ]
for category in categories:
data_frame = data_frame.withColumn(category, hash(col(category)))
return data_frame.withColumnRenamed("store_and_fwd_flag", "store_and_fwd")
def fill_na(data_frame):
return data_frame.fillna(-1)
def remove_invalid(data_frame):
conditions = [
( 'fare_amount', 0, 500 ),
( 'passenger_count', 0, 6 ),
( 'pickup_longitude', -75, -73 ),
( 'dropoff_longitude', -75, -73 ),
( 'pickup_latitude', 40, 42 ),
( 'dropoff_latitude', 40, 42 ),
]
for column, min, max in conditions:
data_frame = data_frame.filter('{} > {} and {} < {}'.format(column, min, column, max))
return data_frame
def convert_datetime(data_frame):
datetime = col('pickup_datetime')
return (data_frame
.withColumn('pickup_datetime', to_timestamp(datetime))
.withColumn('year', year(datetime))
.withColumn('month', month(datetime))
.withColumn('day', dayofmonth(datetime))
.withColumn('day_of_week', dayofweek(datetime))
.withColumn(
'is_weekend',
col('day_of_week').isin(1, 7).cast(IntegerType())) # 1: Sunday, 7: Saturday
.withColumn('hour', hour(datetime))
.drop('pickup_datetime'))
def add_h_distance(data_frame):
p = math.pi / 180
lat1 = col('pickup_latitude')
lon1 = col('pickup_longitude')
lat2 = col('dropoff_latitude')
lon2 = col('dropoff_longitude')
internal_value = (0.5
- cos((lat2 - lat1) * p) / 2
+ cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2)
h_distance = 12734 * asin(sqrt(internal_value))
return data_frame.withColumn('h_distance', h_distance)
| spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/pre_process.py |
spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/__init__.py |
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.types import *
label = 'fare_amount'
raw_schema = StructType([
StructField('vendor_id', StringType()),
StructField('pickup_datetime', StringType()),
StructField('dropoff_datetime', StringType()),
StructField('passenger_count', IntegerType()),
StructField('trip_distance', DoubleType()),
StructField('pickup_longitude', DoubleType()),
StructField('pickup_latitude', DoubleType()),
StructField('rate_code', StringType()),
StructField('store_and_fwd_flag', StringType()),
StructField('dropoff_longitude', DoubleType()),
StructField('dropoff_latitude', DoubleType()),
StructField('payment_type', StringType()),
StructField(label, DoubleType()),
StructField('surcharge', DoubleType()),
StructField('mta_tax', DoubleType()),
StructField('tip_amount', DoubleType()),
StructField('tolls_amount', DoubleType()),
StructField('total_amount', DoubleType()),
])
final_schema = StructType([
StructField('vendor_id', FloatType()),
StructField('passenger_count', FloatType()),
StructField('trip_distance', FloatType()),
StructField('pickup_longitude', FloatType()),
StructField('pickup_latitude', FloatType()),
StructField('rate_code', FloatType()),
StructField('store_and_fwd', FloatType()),
StructField('dropoff_longitude', FloatType()),
StructField('dropoff_latitude', FloatType()),
StructField(label, FloatType()),
StructField('hour', FloatType()),
StructField('year', IntegerType()),
StructField('month', IntegerType()),
StructField('day', FloatType()),
StructField('day_of_week', FloatType()),
StructField('is_weekend', FloatType()),
])
| spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/consts.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .consts import *
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
from xgboost.spark import SparkXGBRegressor, SparkXGBRegressorModel
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
train_data, eval_data, trans_data = valid_input_data(spark, args, raw_schema, final_schema)
if args.mode in ['all', 'train']:
if not train_data:
print('-' * 80)
print('Usage: training data path required when mode is all or train')
print('-' * 80)
exit(1)
train_data, features = transform_data(train_data, label, args.use_gpu)
xgboost_args['features_col'] = features
xgboost_args['label_col'] = label
regressor = SparkXGBRegressor(**xgboost_args)
if eval_data:
# pass
pass
model = with_benchmark('Training', lambda: regressor.fit(train_data))
if args.modelPath:
writer = model.write().overwrite() if args.overwrite else model
writer.save(args.modelPath)
else:
model = SparkXGBRegressorModel.load(args.modelPath)
if args.mode in ['all', 'transform']:
if not trans_data:
print('-' * 80)
print('Usage: trans data path required when mode is all or transform')
print('-' * 80)
exit(1)
trans_data, _ = transform_data(trans_data, label, args.use_gpu)
def transform():
result = model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
show_sample(args, result, label)
with_benchmark('Evaluation', lambda: check_regression_accuracy(result, label))
spark.stop()
| spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/main.py |
#
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .consts import *
from com.nvidia.spark.examples.utility.utils import *
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.sql import SparkSession
from xgboost.spark import SparkXGBRegressor, SparkXGBRegressorModel
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
train_data, eval_data, trans_data = valid_input_data(spark, args, raw_schema, final_schema)
if args.mode in ['all', 'train']:
if train_data is None:
print('-' * 80)
print('Usage: training data path required when mode is all or train')
print('-' * 80)
exit(1)
train_data, features = transform_data(train_data, label, args.use_gpu)
xgboost_args['features_col'] = features
xgboost_args['label_col'] = label
regressor = SparkXGBRegressor(**xgboost_args)
param_grid = (ParamGridBuilder()
.addGrid(regressor.max_depth, [6, 8])
.addGrid(regressor.n_estimators, [20, 40])
.build())
evaluator = (RegressionEvaluator()
.setLabelCol(label))
cross_validator = (CrossValidator()
.setEstimator(regressor)
.setEvaluator(evaluator)
.setEstimatorParamMaps(param_grid)
.setNumFolds(3))
model = with_benchmark('Training', lambda: cross_validator.fit(train_data))
# get the best model to do transform
model = model.bestModel
if args.modelPath:
writer = model.write().overwrite() if args.overwrite else model
writer.save(args.modelPath)
else:
model = SparkXGBRegressorModel.load(args.modelPath)
if args.mode in ['all', 'transform']:
if trans_data is None:
print('-' * 80)
print('Usage: trans data path required when mode is all or transform')
print('-' * 80)
exit(1)
trans_data, _ = transform_data(trans_data, label, args.use_gpu)
def transform():
result = model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
show_sample(args, result, label)
with_benchmark('Evaluation', lambda: check_regression_accuracy(result, label))
spark.stop()
| spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/cross_validator_main.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .consts import *
from .pre_process import pre_process
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
raw_data_path = extract_paths(args.dataPaths, 'raw::')
output_path = extract_paths(args.dataPaths, 'out::')[0]
if not raw_data_path:
print('-' * 80)
print('Usage: raw data path required when ETL')
exit(1)
if not output_path:
print('-' * 80)
print('Usage: output data path required when ETL')
exit(1)
raw_data = prepare_data(spark, args, raw_schema, raw_data_path)
etled_train, etled_eval, etled_trans = pre_process(raw_data).randomSplit(list(map(float, args.splitRatios)))
etled_train.write.mode("overwrite").parquet(output_path + '/train')
etled_eval.write.mode("overwrite").parquet(output_path + '/eval')
etled_trans.write.mode("overwrite").parquet(output_path + '/trans')
| spark-rapids-examples-main | examples/XGBoost-Examples/taxi/python/com/nvidia/spark/examples/taxi/etl_main.py |
spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/__init__.py |
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .consts import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
from sys import exit
get_quarter = udf(lambda path: path.split(r'.')[0].split('/')[-1], StringType())
standardize_name = udf(lambda name: name_mapping.get(name), StringType())
def load_data(spark, paths, schema, args, extra_csv_opts={}):
reader = (spark
.read
.format(args.format)
.option('asFloats', args.asFloats)
.option('maxRowsPerChunk', args.maxRowsPerChunk))
if args.format == 'csv':
(reader
.schema(schema)
.option('delimiter', '|')
.option('header', False))
for k, v in extra_csv_opts.items():
reader.option(k, v)
return reader.load(paths)
def prepare_rawDf(spark, args):
extra_csv_options = {
'nullValue': '',
'parserLib': 'univocity',
}
paths = extract_paths(args.dataPaths, 'data::')
rawDf = load_data(spark, paths, rawSchema, args, extra_csv_options)
return rawDf
def extract_perf_columns(rawDf):
perfDf = rawDf.select(
col("loan_id"),
date_format(to_date(col("monthly_reporting_period"),"MMyyyy"), "MM/dd/yyyy").alias("monthly_reporting_period"),
upper(col("servicer")).alias("servicer"),
col("interest_rate"),
col("current_actual_upb"),
col("loan_age"),
col("remaining_months_to_legal_maturity"),
col("adj_remaining_months_to_maturity"),
date_format(to_date(col("maturity_date"),"MMyyyy"), "MM/yyyy").alias("maturity_date"),
col("msa"),
col("current_loan_delinquency_status"),
col("mod_flag"),
col("zero_balance_code"),
date_format(to_date(col("zero_balance_effective_date"),"MMyyyy"), "MM/yyyy").alias("zero_balance_effective_date"),
date_format(to_date(col("last_paid_installment_date"),"MMyyyy"), "MM/dd/yyyy").alias("last_paid_installment_date"),
date_format(to_date(col("foreclosed_after"),"MMyyyy"), "MM/dd/yyyy").alias("foreclosed_after"),
date_format(to_date(col("disposition_date"),"MMyyyy"), "MM/dd/yyyy").alias("disposition_date"),
col("foreclosure_costs"),
col("prop_preservation_and_repair_costs"),
col("asset_recovery_costs"),
col("misc_holding_expenses"),
col("holding_taxes"),
col("net_sale_proceeds"),
col("credit_enhancement_proceeds"),
col("repurchase_make_whole_proceeds"),
col("other_foreclosure_proceeds"),
col("non_interest_bearing_upb"),
col("principal_forgiveness_upb"),
col("repurchase_make_whole_proceeds_flag"),
col("foreclosure_principal_write_off_amount"),
col("servicing_activity_indicator"))
return perfDf.select("*").filter("current_actual_upb != 0.0")
def prepare_performance(spark, args, rawDf):
performance = (extract_perf_columns(rawDf)
.withColumn('quarter', get_quarter(input_file_name()))
.withColumn('timestamp', to_date(col('monthly_reporting_period'), 'MM/dd/yyyy'))
.withColumn('timestamp_year', year(col('timestamp')))
.withColumn('timestamp_month', month(col('timestamp'))))
aggregation = (performance
.select(
'quarter',
'loan_id',
'current_loan_delinquency_status',
when(col('current_loan_delinquency_status') >= 1, col('timestamp'))
.alias('delinquency_30'),
when(col('current_loan_delinquency_status') >= 3, col('timestamp'))
.alias('delinquency_90'),
when(col('current_loan_delinquency_status') >= 6, col('timestamp'))
.alias('delinquency_180'))
.groupBy('quarter', 'loan_id')
.agg(
max('current_loan_delinquency_status').alias('delinquency_12'),
min('delinquency_30').alias('delinquency_30'),
min('delinquency_90').alias('delinquency_90'),
min('delinquency_180').alias('delinquency_180'))
.select(
'quarter',
'loan_id',
(col('delinquency_12') >= 1).alias('ever_30'),
(col('delinquency_12') >= 3).alias('ever_90'),
(col('delinquency_12') >= 6).alias('ever_180'),
'delinquency_30',
'delinquency_90',
'delinquency_180'))
months = spark.createDataFrame(range(12), IntegerType()).withColumnRenamed('value', 'month_y')
to_join = (performance
.select(
'quarter',
'loan_id',
'timestamp_year',
'timestamp_month',
col('current_loan_delinquency_status').alias('delinquency_12'),
col('current_actual_upb').alias('upb_12'))
.join(aggregation, ['loan_id', 'quarter'], 'left_outer')
.crossJoin(months)
.select(
'quarter',
floor(
(col('timestamp_year') * 12 + col('timestamp_month') - 24000 - col('month_y')) / 12
).alias('josh_mody_n'),
'ever_30',
'ever_90',
'ever_180',
'delinquency_30',
'delinquency_90',
'delinquency_180',
'loan_id',
'month_y',
'delinquency_12',
'upb_12')
.groupBy(
'quarter',
'loan_id',
'josh_mody_n',
'ever_30',
'ever_90',
'ever_180',
'delinquency_30',
'delinquency_90',
'delinquency_180',
'month_y')
.agg(
max('delinquency_12').alias('delinquency_12'),
min('upb_12').alias('upb_12'))
.withColumn(
'timestamp_year',
floor((24000 + (col('josh_mody_n') * 12) + (col('month_y') - 1)) / 12))
.withColumn(
'timestamp_month_tmp',
(24000 + (col('josh_mody_n') * 12) + col('month_y')) % 12)
.withColumn(
'timestamp_month',
when(col('timestamp_month_tmp') == 0, 12).otherwise(col('timestamp_month_tmp')))
.withColumn(
'delinquency_12',
((col('delinquency_12') > 3).cast('int') + (col('upb_12') == 0).cast('int')))
.drop('timestamp_month_tmp', 'josh_mody_n', 'month_y'))
return (performance
.join(to_join, ['quarter', 'loan_id', 'timestamp_year', 'timestamp_month'], 'left')
.drop('timestamp_year', 'timestamp_month'))
def extract_acq_columns(rawDf):
acqDf = rawDf.select(
col("loan_id"),
col("orig_channel"),
upper(col("seller_name")).alias("seller_name"),
col("orig_interest_rate"),
col("orig_upb"),
col("orig_loan_term"),
date_format(to_date(col("orig_date"),"MMyyyy"), "MM/yyyy").alias("orig_date"),
date_format(to_date(col("first_pay_date"),"MMyyyy"), "MM/yyyy").alias("first_pay_date"),
col("orig_ltv"),
col("orig_cltv"),
col("num_borrowers"),
col("dti"),
col("borrower_credit_score"),
col("first_home_buyer"),
col("loan_purpose"),
col("property_type"),
col("num_units"),
col("occupancy_status"),
col("property_state"),
col("zip"),
col("mortgage_insurance_percent"),
col("product_type"),
col("coborrow_credit_score"),
col("mortgage_insurance_type"),
col("relocation_mortgage_indicator"),
dense_rank().over(Window.partitionBy("loan_id").orderBy(to_date(col("monthly_reporting_period"),"MMyyyy"))).alias("rank")
)
return acqDf.select("*").filter(col("rank")==1)
def prepare_acquisition(spark, args, rawDf):
return (extract_acq_columns(rawDf)
.withColumn('quarter', get_quarter(input_file_name()))
.withColumn('seller_name', standardize_name(col('seller_name'))))
def extract_paths(paths, prefix):
results = [ path[len(prefix):] for path in paths if path.startswith(prefix) ]
if not results:
print('-' * 80)
print('Usage: {} data path required'.format(prefix))
exit(1)
return results
def etl(spark, args):
rawDf = prepare_rawDf(spark, args)
rawDf.write.parquet(extract_paths(args.dataPaths, 'tmp::')[0], mode='overwrite')
rawDf = spark.read.parquet(extract_paths(args.dataPaths, 'tmp::')[0])
performance = prepare_performance(spark, args, rawDf)
acquisition = prepare_acquisition(spark, args, rawDf)
return (performance
.join(acquisition, ['loan_id', 'quarter'], 'left_outer')
.select(
[(md5(col(x)) % 100).alias(x) for x in categorical_columns]
+ [col(x) for x in numeric_columns])
.withColumn('delinquency_12', when(col('delinquency_12') > 0, 1).otherwise(0))
.na
.fill(0))
| spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/etl.py |
spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/__init__.py |
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.types import *
label = 'delinquency_12'
schema = StructType([
StructField('orig_channel', FloatType()),
StructField('first_home_buyer', FloatType()),
StructField('loan_purpose', FloatType()),
StructField('property_type', FloatType()),
StructField('occupancy_status', FloatType()),
StructField('property_state', FloatType()),
StructField('product_type', FloatType()),
StructField('relocation_mortgage_indicator', FloatType()),
StructField('seller_name', FloatType()),
StructField('mod_flag', FloatType()),
StructField('orig_interest_rate', FloatType()),
StructField('orig_upb', DoubleType()),
StructField('orig_loan_term', IntegerType()),
StructField('orig_ltv', FloatType()),
StructField('orig_cltv', FloatType()),
StructField('num_borrowers', FloatType()),
StructField('dti', FloatType()),
StructField('borrower_credit_score', FloatType()),
StructField('num_units', IntegerType()),
StructField('zip', IntegerType()),
StructField('mortgage_insurance_percent', FloatType()),
StructField('current_loan_delinquency_status', IntegerType()),
StructField('current_actual_upb', FloatType()),
StructField('interest_rate', FloatType()),
StructField('loan_age', FloatType()),
StructField('msa', FloatType()),
StructField('non_interest_bearing_upb', FloatType()),
StructField(label, IntegerType()),
])
name_mapping = {
'WITMER FUNDING, LLC': 'Witmer',
'WELLS FARGO CREDIT RISK TRANSFER SECURITIES TRUST 2015': 'Wells Fargo',
'WELLS FARGO BANK, NA': 'Wells Fargo',
'WELLS FARGO BANK, N.A.': 'Wells Fargo',
'WELLS FARGO BANK, NA': 'Wells Fargo',
'USAA FEDERAL SAVINGS BANK': 'USAA',
'UNITED SHORE FINANCIAL SERVICES, LLC D\\/B\\/A UNITED WHOLESALE MORTGAGE': 'United Seq(e',
'U.S. BANK N.A.': 'US Bank',
'SUNTRUST MORTGAGE INC.': 'Suntrust',
'STONEGATE MORTGAGE CORPORATION': 'Stonegate Mortgage',
'STEARNS LENDING, LLC': 'Stearns Lending',
'STEARNS LENDING, INC.': 'Stearns Lending',
'SIERRA PACIFIC MORTGAGE COMPANY, INC.': 'Sierra Pacific Mortgage',
'REGIONS BANK': 'Regions',
'RBC MORTGAGE COMPANY': 'RBC',
'QUICKEN LOANS INC.': 'Quicken Loans',
'PULTE MORTGAGE, L.L.C.': 'Pulte Mortgage',
'PROVIDENT FUNDING ASSOCIATES, L.P.': 'Provident Funding',
'PROSPECT MORTGAGE, LLC': 'Prospect Mortgage',
'PRINCIPAL RESIDENTIAL MORTGAGE CAPITAL RESOURCES, LLC': 'Principal Residential',
'PNC BANK, N.A.': 'PNC',
'PMT CREDIT RISK TRANSFER TRUST 2015-2': 'PennyMac',
'PHH MORTGAGE CORPORATION': 'PHH Mortgage',
'PENNYMAC CORP.': 'PennyMac',
'PACIFIC UNION FINANCIAL, LLC': 'Other',
'OTHER': 'Other',
'NYCB MORTGAGE COMPANY, LLC': 'NYCB',
'NEW YORK COMMUNITY BANK': 'NYCB',
'NETBANK FUNDING SERVICES': 'Netbank',
'NATIONSTAR MORTGAGE, LLC': 'Nationstar Mortgage',
'METLIFE BANK, NA': 'Metlife',
'LOANDEPOT.COM, LLC': 'LoanDepot.com',
'J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2015-1': 'JP Morgan Chase',
'J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2014-1': 'JP Morgan Chase',
'JPMORGAN CHASE BANK, NATIONAL ASSOCIATION': 'JP Morgan Chase',
'JPMORGAN CHASE BANK, NA': 'JP Morgan Chase',
'JP MORGAN CHASE BANK, NA': 'JP Morgan Chase',
'IRWIN MORTGAGE, CORPORATION': 'Irwin Mortgage',
'IMPAC MORTGAGE CORP.': 'Impac Mortgage',
'HSBC BANK USA, NATIONAL ASSOCIATION': 'HSBC',
'HOMEWARD RESIDENTIAL, INC.': 'Homeward Mortgage',
'HOMESTREET BANK': 'Other',
'HOMEBRIDGE FINANCIAL SERVICES, INC.': 'HomeBridge',
'HARWOOD STREET FUNDING I, LLC': 'Harwood Mortgage',
'GUILD MORTGAGE COMPANY': 'Guild Mortgage',
'GMAC MORTGAGE, LLC (USAA FEDERAL SAVINGS BANK)': 'GMAC',
'GMAC MORTGAGE, LLC': 'GMAC',
'GMAC (USAA)': 'GMAC',
'FREMONT BANK': 'Fremont Bank',
'FREEDOM MORTGAGE CORP.': 'Freedom Mortgage',
'FRANKLIN AMERICAN MORTGAGE COMPANY': 'Franklin America',
'FLEET NATIONAL BANK': 'Fleet National',
'FLAGSTAR CAPITAL MARKETS CORPORATION': 'Flagstar Bank',
'FLAGSTAR BANK, FSB': 'Flagstar Bank',
'FIRST TENNESSEE BANK NATIONAL ASSOCIATION': 'Other',
'FIFTH THIRD BANK': 'Fifth Third Bank',
'FEDERAL HOME LOAN BANK OF CHICAGO': 'Fedral Home of Chicago',
'FDIC, RECEIVER, INDYMAC FEDERAL BANK FSB': 'FDIC',
'DOWNEY SAVINGS AND LOAN ASSOCIATION, F.A.': 'Downey Mortgage',
'DITECH FINANCIAL LLC': 'Ditech',
'CITIMORTGAGE, INC.': 'Citi',
'CHICAGO MORTGAGE SOLUTIONS DBA INTERFIRST MORTGAGE COMPANY': 'Chicago Mortgage',
'CHICAGO MORTGAGE SOLUTIONS DBA INTERBANK MORTGAGE COMPANY': 'Chicago Mortgage',
'CHASE HOME FINANCE, LLC': 'JP Morgan Chase',
'CHASE HOME FINANCE FRANKLIN AMERICAN MORTGAGE COMPANY': 'JP Morgan Chase',
'CHASE HOME FINANCE (CIE 1)': 'JP Morgan Chase',
'CHASE HOME FINANCE': 'JP Morgan Chase',
'CASHCALL, INC.': 'CashCall',
'CAPITAL ONE, NATIONAL ASSOCIATION': 'Capital One',
'CALIBER HOME LOANS, INC.': 'Caliber Funding',
'BISHOPS GATE RESIDENTIAL MORTGAGE TRUST': 'Bishops Gate Mortgage',
'BANK OF AMERICA, N.A.': 'Bank of America',
'AMTRUST BANK': 'AmTrust',
'AMERISAVE MORTGAGE CORPORATION': 'Amerisave',
'AMERIHOME MORTGAGE COMPANY, LLC': 'AmeriHome Mortgage',
'ALLY BANK': 'Ally Bank',
'ACADEMY MORTGAGE CORPORATION': 'Academy Mortgage',
'NO CASH-OUT REFINANCE': 'OTHER REFINANCE',
'REFINANCE - NOT SPECIFIED': 'OTHER REFINANCE',
'Other REFINANCE': 'OTHER REFINANCE',
}
rawSchema = StructType([
StructField("reference_pool_id", StringType()),
StructField("loan_id", LongType()),
StructField("monthly_reporting_period", StringType()),
StructField("orig_channel", StringType()),
StructField("seller_name", StringType()),
StructField("servicer", StringType()),
StructField("master_servicer", StringType()),
StructField("orig_interest_rate", DoubleType()),
StructField("interest_rate", DoubleType()),
StructField("orig_upb", DoubleType()),
StructField("upb_at_issuance", StringType()),
StructField("current_actual_upb", DoubleType()),
StructField("orig_loan_term", IntegerType()),
StructField("orig_date", StringType()),
StructField("first_pay_date", StringType()),
StructField("loan_age", DoubleType()),
StructField("remaining_months_to_legal_maturity", DoubleType()),
StructField("adj_remaining_months_to_maturity", DoubleType()),
StructField("maturity_date", StringType()),
StructField("orig_ltv", DoubleType()),
StructField("orig_cltv", DoubleType()),
StructField("num_borrowers", DoubleType()),
StructField("dti", DoubleType()),
StructField("borrower_credit_score", DoubleType()),
StructField("coborrow_credit_score", DoubleType()),
StructField("first_home_buyer", StringType()),
StructField("loan_purpose", StringType()),
StructField("property_type", StringType()),
StructField("num_units", IntegerType()),
StructField("occupancy_status", StringType()),
StructField("property_state", StringType()),
StructField("msa", DoubleType()),
StructField("zip", IntegerType()),
StructField("mortgage_insurance_percent", DoubleType()),
StructField("product_type", StringType()),
StructField("prepayment_penalty_indicator", StringType()),
StructField("interest_only_loan_indicator", StringType()),
StructField("interest_only_first_principal_and_interest_payment_date", StringType()),
StructField("months_to_amortization", StringType()),
StructField("current_loan_delinquency_status", IntegerType()),
StructField("loan_payment_history", StringType()),
StructField("mod_flag", StringType()),
StructField("mortgage_insurance_cancellation_indicator", StringType()),
StructField("zero_balance_code", StringType()),
StructField("zero_balance_effective_date", StringType()),
StructField("upb_at_the_time_of_removal", StringType()),
StructField("repurchase_date", StringType()),
StructField("scheduled_principal_current", StringType()),
StructField("total_principal_current", StringType()),
StructField("unscheduled_principal_current", StringType()),
StructField("last_paid_installment_date", StringType()),
StructField("foreclosed_after", StringType()),
StructField("disposition_date", StringType()),
StructField("foreclosure_costs", DoubleType()),
StructField("prop_preservation_and_repair_costs", DoubleType()),
StructField("asset_recovery_costs", DoubleType()),
StructField("misc_holding_expenses", DoubleType()),
StructField("holding_taxes", DoubleType()),
StructField("net_sale_proceeds", DoubleType()),
StructField("credit_enhancement_proceeds", DoubleType()),
StructField("repurchase_make_whole_proceeds", StringType()),
StructField("other_foreclosure_proceeds", DoubleType()),
StructField("non_interest_bearing_upb", DoubleType()),
StructField("principal_forgiveness_upb", StringType()),
StructField("original_list_start_date", StringType()),
StructField("original_list_price", StringType()),
StructField("current_list_start_date", StringType()),
StructField("current_list_price", StringType()),
StructField("borrower_credit_score_at_issuance", StringType()),
StructField("co-borrower_credit_score_at_issuance", StringType()),
StructField("borrower_credit_score_current", StringType()),
StructField("co-Borrower_credit_score_current", StringType()),
StructField("mortgage_insurance_type", DoubleType()),
StructField("servicing_activity_indicator", StringType()),
StructField("current_period_modification_loss_amount", StringType()),
StructField("cumulative_modification_loss_amount", StringType()),
StructField("current_period_credit_event_net_gain_or_loss", StringType()),
StructField("cumulative_credit_event_net_gain_or_loss", StringType()),
StructField("homeready_program_indicator", StringType()),
StructField("foreclosure_principal_write_off_amount", StringType()),
StructField("relocation_mortgage_indicator", StringType()),
StructField("zero_balance_code_change_date", StringType()),
StructField("loan_holdback_indicator", StringType()),
StructField("loan_holdback_effective_date", StringType()),
StructField("delinquent_accrued_interest", StringType()),
StructField("property_valuation_method", StringType()),
StructField("high_balance_loan_indicator", StringType()),
StructField("arm_initial_fixed-rate_period_lt_5_yr_indicator", StringType()),
StructField("arm_product_type", StringType()),
StructField("initial_fixed-rate_period", StringType()),
StructField("interest_rate_adjustment_frequency", StringType()),
StructField("next_interest_rate_adjustment_date", StringType()),
StructField("next_payment_change_date", StringType()),
StructField("index", StringType()),
StructField("arm_cap_structure", StringType()),
StructField("initial_interest_rate_cap_up_percent", StringType()),
StructField("periodic_interest_rate_cap_up_percent", StringType()),
StructField("lifetime_interest_rate_cap_up_percent", StringType()),
StructField("mortgage_margin", StringType()),
StructField("arm_balloon_indicator", StringType()),
StructField("arm_plan_number", StringType()),
StructField("borrower_assistance_plan", StringType()),
StructField("hltv_refinance_option_indicator", StringType()),
StructField("deal_name", StringType()),
StructField("repurchase_make_whole_proceeds_flag", StringType()),
StructField("alternative_delinquency_resolution", StringType()),
StructField("alternative_delinquency_resolution_count", StringType()),
StructField("total_deferral_amount", StringType())
])
categorical_columns = [
'orig_channel',
'first_home_buyer',
'loan_purpose',
'property_type',
'occupancy_status',
'property_state',
'product_type',
'relocation_mortgage_indicator',
'seller_name',
'mod_flag',
]
numeric_columns = [
'orig_interest_rate',
'orig_upb',
'orig_loan_term',
'orig_ltv',
'orig_cltv',
'num_borrowers',
'dti',
'borrower_credit_score',
'num_units',
'zip',
'mortgage_insurance_percent',
'current_loan_delinquency_status',
'current_actual_upb',
'interest_rate',
'loan_age',
'msa',
'non_interest_bearing_upb',
'delinquency_12',
]
| spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/consts.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from xgboost.spark import SparkXGBClassifier, SparkXGBClassifierModel
from .consts import *
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
train_data, eval_data, trans_data = valid_input_data(spark, args, '', schema)
if args.mode in ['all', 'train']:
if train_data is None:
print('-' * 80)
print('Usage: training data path required when mode is all or train')
exit(1)
train_data, features = transform_data(train_data, label, args.use_gpu)
xgboost_args['features_col'] = features
xgboost_args['label_col'] = label
classifier = SparkXGBClassifier(**xgboost_args)
if eval_data:
# TODO
pass
model = with_benchmark('Training', lambda: classifier.fit(train_data))
if args.modelPath:
writer = model.write().overwrite() if args.overwrite else model
writer.save(args.modelPath)
else:
model = SparkXGBClassifierModel.load(args.modelPath)
if args.mode in ['all', 'transform']:
trans_data, _ = transform_data(trans_data, label, args.use_gpu)
def transform():
result = model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
if not trans_data:
print('-' * 80)
print('Usage: trans data path required when mode is all or transform')
exit(1)
result = with_benchmark('Transformation', transform)
show_sample(args, result, label)
with_benchmark('Evaluation', lambda: check_classification_accuracy(result, label))
spark.stop()
| spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/main.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from .consts import *
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
from xgboost.spark import SparkXGBClassifier, SparkXGBClassifierModel
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
train_data, eval_data, trans_data = valid_input_data(spark, args, '', schema)
if args.mode in ['all', 'train']:
if train_data is None:
print('-' * 80)
print('Usage: training data path required when mode is all or train')
exit(1)
train_data, features = transform_data(train_data, label, args.use_gpu)
xgboost_args['features_col'] = features
xgboost_args['label_col'] = label
classifier = SparkXGBClassifier(**xgboost_args)
evaluator = (MulticlassClassificationEvaluator()
.setLabelCol(label))
param_grid = (ParamGridBuilder()
.addGrid(classifier.max_depth, [6, 8])
.addGrid(classifier.n_estimators, [20, 40])
.build())
cross_validator = (CrossValidator()
.setEstimator(classifier)
.setEvaluator(evaluator)
.setEstimatorParamMaps(param_grid)
.setNumFolds(3))
if not train_data:
print('-' * 80)
print('Usage: training data path required when mode is all or train')
exit(1)
model = with_benchmark('Training', lambda: cross_validator.fit(train_data))
# get the best model to do transform
model = model.bestModel
if args.modelPath:
writer = model.write().overwrite() if args.overwrite else model
writer.save(args.modelPath)
else:
model = SparkXGBClassifierModel.load(args.modelPath)
if args.mode in ['all', 'transform']:
if not trans_data:
print('-' * 80)
print('Usage: trans data path required when mode is all or transform')
exit(1)
trans_data, _ = transform_data(trans_data, label, args.use_gpu)
def transform():
result = model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
show_sample(args, result, label)
with_benchmark('Evaluation', lambda: check_classification_accuracy(result, label))
spark.stop()
| spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/cross_validator_main.py |
#
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .etl import etl, extract_paths
from com.nvidia.spark.examples.utility.utils import *
from pyspark.sql import SparkSession
def main(args, xgboost_args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
etled_df = etl(spark, args)
# outPath should has only one input
outPath = extract_paths(args.dataPaths, 'out::')[0]
etled_df.write.mode("overwrite").parquet(outPath)
| spark-rapids-examples-main | examples/XGBoost-Examples/mortgage/python/com/nvidia/spark/examples/mortgage/etl_main.py |
spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/__init__.py |
|
spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/examples/__init__.py |
Subsets and Splits